Skip to content

Commit db6b178

Browse files
committed
docs: update extending docs
1 parent 8609c33 commit db6b178

File tree

4 files changed

+110
-146
lines changed

4 files changed

+110
-146
lines changed

docs/extending/datagrabber.rst

Lines changed: 51 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -140,29 +140,24 @@ With the variables defined above, we can create our DataGrabber and name it
140140
141141
from pathlib import Path
142142
143-
from junifer.datagrabber import PatternDataGrabber
143+
from junifer.datagrabber import PatternDataGrabber, DataType
144+
from junifer.typing import DataGrabberPatterns
144145
145146
146147
class ExampleBIDSDataGrabber(PatternDataGrabber):
147-
def __init__(self, datadir: str | Path) -> None:
148-
types = ["T1w", "BOLD"]
149-
patterns = {
150-
"T1w": {
151-
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
152-
"space": "native",
153-
},
154-
"BOLD": {
155-
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
156-
"space": "MNI152NLin6Asym",
157-
},
158-
}
159-
replacements = ["subject", "session"]
160-
super().__init__(
161-
datadir=datadir,
162-
types=types,
163-
patterns=patterns,
164-
replacements=replacements,
165-
)
148+
149+
types: list[DataType] = [DataType.T1w, DataType.BOLD]
150+
patterns: DataGrabberPatterns = {
151+
"T1w": {
152+
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
153+
"space": "native",
154+
},
155+
"BOLD": {
156+
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
157+
"space": "MNI152NLin6Asym",
158+
},
159+
}
160+
replacements: list[str] = ["subject", "session"]
166161
167162
Our DataGrabber is ready to be used by ``junifer``. However, it is still unknown
168163
to the library. We need to register it in the library. To do so, we need to
@@ -175,29 +170,24 @@ use the :func:`.register_datagrabber` decorator.
175170
176171
from junifer.api.decorators import register_datagrabber
177172
from junifer.datagrabber import PatternDataGrabber
173+
from junifer.typing import DataGrabberPatterns
178174
179175
180176
@register_datagrabber
181177
class ExampleBIDSDataGrabber(PatternDataGrabber):
182-
def __init__(self, datadir: str | Path) -> None:
183-
types = ["T1w", "BOLD"]
184-
patterns = {
185-
"T1w": {
186-
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
187-
"space": "native",
188-
},
189-
"BOLD": {
190-
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
191-
"space": "MNI152NLin6Asym",
192-
},
193-
}
194-
replacements = ["subject", "session"]
195-
super().__init__(
196-
datadir=datadir,
197-
types=types,
198-
patterns=patterns,
199-
replacements=replacements,
200-
)
178+
179+
types: list[DataType] = [DataType.T1w, DataType.BOLD]
180+
patterns: DataGrabberPatterns = {
181+
"T1w": {
182+
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
183+
"space": "native",
184+
},
185+
"BOLD": {
186+
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
187+
"space": "MNI152NLin6Asym",
188+
},
189+
}
190+
replacements: list[str] = ["subject", "session"]
201191
202192
203193
Now, we can use our DataGrabber in ``junifer``, by setting the ``datagrabber``
@@ -259,35 +249,30 @@ And we can create our DataGrabber:
259249

260250
.. code-block:: python
261251
252+
from pathlib import Path
253+
262254
from junifer.api.decorators import register_datagrabber
263255
from junifer.datagrabber import PatternDataladDataGrabber
256+
from pydantic import HttpUrl
264257
265258
266259
@register_datagrabber
267260
class ExampleBIDSDataGrabber(PatternDataladDataGrabber):
268-
def __init__(self) -> None:
269-
types = ["T1w", "BOLD"]
270-
patterns = {
271-
"T1w": {
272-
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
273-
"space": "native",
274-
},
275-
"BOLD": {
276-
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
277-
"space": "MNI152NLin6Asym",
278-
},
279-
}
280-
replacements = ["subject", "session"]
281-
uri = "https://gin.g-node.org/juaml/datalad-example-bids"
282-
rootdir = "example_bids_ses"
283-
super().__init__(
284-
datadir=None,
285-
uri=uri,
286-
rootdir=rootdir,
287-
types=types,
288-
patterns=patterns,
289-
replacements=replacements,
290-
)
261+
262+
uri: HttpUrl = HttpUrl("https://gin.g-node.org/juaml/datalad-example-bids")
263+
types: list[DataType] = [DataType.T1w, DataType.BOLD]
264+
patterns: DataGrabberPatterns = {
265+
"T1w": {
266+
"pattern": "{subject}/{session}/anat/{subject}_{session}_T1w.nii.gz",
267+
"space": "native",
268+
},
269+
"BOLD": {
270+
"pattern": "{subject}/{session}/func/{subject}_{session}_task-rest_bold.nii.gz",
271+
"space": "MNI152NLin6Asym",
272+
},
273+
}
274+
replacements: list[str] = ["subject", "session"]
275+
rootdir: Path = Path("example_bids_ses")
291276
292277
This approach can be used directly from the YAML, like so:
293278

@@ -376,8 +361,8 @@ need to implement the following methods:
376361

377362
.. note::
378363

379-
The ``__init__`` method could also be implemented, but it is not mandatory.
380-
This is required if the DataGrabber requires any extra parameter.
364+
If the DataGrabber requires any extra parameter, they could be defined as
365+
class attributes.
381366

382367
We will now implement our BIDS example with this method.
383368

@@ -494,8 +479,8 @@ more information about the format of the confounds file. Thus, the
494479
``BOLD.confounds`` element is a dictionary with the following keys:
495480

496481
- ``path``: the path to the confounds file.
497-
- ``format``: the format of the confounds file. Currently, this can be either
498-
``fmriprep`` or ``adhoc``.
482+
- ``format``: the format of the confounds file. Check :enum:`.ConfoundsFormat`
483+
for options.
499484

500485
The ``fmriprep`` format corresponds to the format of the confounds files
501486
generated by `fMRIPrep`_. The ``adhoc`` format corresponds to a format that is

docs/extending/dependencies.rst

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ by having a class attribute like so:
4646
4747
_EXT_DEPENDENCIES: ClassVar[ExternalDependencies] = [
4848
{
49-
"name": "afni",
49+
"name": ExtDep.AFNI,
5050
"commands": ["3dReHo", "3dAFNItoNIFTI"],
5151
},
5252
]
@@ -55,7 +55,7 @@ The above example is taken from the class which computes regional homogeneity
5555
(ReHo) using AFNI. The general pattern is that you need to have the value of
5656
``_EXT_DEPENDENCIES`` as a list of dictionary with two keys:
5757

58-
* ``name`` (str) : lowercased name of the toolbox
58+
* ``name`` (:enum:`.ExtDep`) : name of the toolbox
5959
* ``commands`` (list of str) : actual names of the commands you need to use
6060

6161
This is simple but powerful as we will see in the following sub-sections.
@@ -81,30 +81,28 @@ that it shows the problem a bit better and how we solve it:
8181
_CONDITIONAL_DEPENDENCIES: ClassVar[ConditionalDependencies] = [
8282
{
8383
"using": "fsl",
84-
"depends_on": FSLWarper,
84+
"depends_on": [FSLWarper],
8585
},
8686
{
8787
"using": "ants",
88-
"depends_on": ANTsWarper,
88+
"depends_on": [ANTSWarper],
8989
},
9090
{
9191
"using": "auto",
9292
"depends_on": [FSLWarper, ANTsWarper],
9393
},
9494
]
9595
96-
def __init__(
97-
self, using: str, reference: str, on: Union[List[str], str]
98-
) -> None:
99-
# validation and setting up
100-
...
96+
using: str
97+
reference: str
98+
on: List[DataType]
10199
102100
103101
Here, you see a new class attribute ``_CONDITIONAL_DEPENDENCIES`` which is a
104102
list of dictionaries with two keys:
105103

106104
* ``using`` (str) : lowercased name of the toolbox
107-
* ``depends_on`` (object or list of objects) : a class or list of classes which \
105+
* ``depends_on`` (list of objects) : list of classes which \
108106
implements the particular tool's use
109107

110108
It is mandatory to have the ``using`` positional argument in the constructor in
@@ -128,7 +126,7 @@ similar. ``FSLWarper`` looks like this (only the relevant part is shown here):
128126
129127
_EXT_DEPENDENCIES: ClassVar[ExternalDependencies] = [
130128
{
131-
"name": "fsl",
129+
"name": ExtDep.FSL,
132130
"commands": ["flirt", "applywarp"],
133131
},
134132
]

docs/extending/marker.rst

Lines changed: 34 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ Most of the functionality of a ``junifer`` Marker has been taken care by the
1414
:class:`.BaseMarker` class. Thus, only a few methods and class attributes are
1515
required:
1616

17-
#. ``__init__``: The initialisation method, where the Marker is configured.
17+
#. (optional) ``validate_marker_params``: The method to perform logical validation of parameters (if required).
1818
#. ``compute``: The method that given the data, computes the Marker.
1919

2020
As an example, we will develop a ``ParcelMean`` Marker, a Marker that first
@@ -29,23 +29,23 @@ Step 1: Configure input and output
2929
This step is quite simple: we need to define the input and output of the Marker.
3030
Based on the current :ref:`data types <data_types>`, we can have ``BOLD``,
3131
``VBM_WM`` and ``VBM_GM`` as valid inputs. The output of the Marker depends on
32-
the input. For ``BOLD``, it will be ``timeseries``, while for the rest of the
33-
inputs, it will be ``vector``. Thus, we have a class attribute like so:
32+
the input. For ``BOLD``, it will be ``Timeseries``, while for the rest of the
33+
inputs, it will be ``Vector``. Thus, we have a class attribute like so:
3434

3535
.. code-block:: python
3636
3737
# NOTE: data type -> feature -> storage type
3838
# You can have multiple features for one data type,
3939
# each feature having same or different storage type
4040
_MARKER_INOUT_MAPPINGS = {
41-
"BOLD": {
42-
"parcel_mean": "timeseries",
41+
DataType.BOLD: {
42+
"parcel_mean": StorageType.Timeseries,
4343
},
44-
"VBM_WM": {
45-
"parcel_mean": "vector",
44+
DataType.VBM_WM: {
45+
"parcel_mean": StorageType.Vector,
4646
},
47-
"VBM_GM": {
48-
"parcel_mean": "vector",
47+
DataType.VBM_GM: {
48+
"parcel_mean": StorageType.Vector,
4949
},
5050
}
5151
@@ -57,13 +57,11 @@ Step 2: Initialise the Marker
5757
In this step we need to define the parameters of the Marker the user can provide
5858
to configure how the Marker will behave.
5959

60-
The parameters of the Marker are defined in the ``__init__`` method. The
61-
:class:`.BaseMarker` class requires two optional parameters:
60+
The parameters of the Marker are defined as class attributes. The
61+
:class:`.BaseMarker` class defines two optional parameters:
6262

63-
1. ``name``: the name of the Marker. This is used to identify the Marker in the
64-
configuration file.
65-
2. ``on``: a list or string with the data types that the Marker will be applied
66-
to.
63+
1. ``name``: the name of the Marker. This is used to identify the Marker in the configuration file.
64+
2. ``on``: a list of :enum:`.DataType` with the data types that the Marker will be applied to.
6765

6866
.. attention::
6967

@@ -72,18 +70,11 @@ The parameters of the Marker are defined in the ``__init__`` method. The
7270
JSON format, and JSON only supports these types.
7371

7472
In this example, only parameter required for the computation is the name of the
75-
parcellation to use. Thus, we can define the ``__init__`` method as follows:
73+
parcellation to use. Thus, we can define as follows:
7674

7775
.. code-block:: python
7876
79-
def __init__(
80-
self,
81-
parcellation: str,
82-
on: str | list[str] | None = None,
83-
name: str | None = None,
84-
) -> None:
85-
self.parcellation = parcellation
86-
super().__init__(on=on, name=name)
77+
parcellation: str
8778
8879
.. caution::
8980

@@ -121,7 +112,7 @@ and the values would be a dictionary of storage type specific key-value pairs.
121112

122113
To simplify the ``store`` method, define keys of the dictionary based on the
123114
corresponding store functions in the :ref:`storage types <storage_types>`.
124-
For example, if the output is a ``vector``, the keys of the dictionary should
115+
For example, if the output is a ``Vector``, the keys of the dictionary should
125116
be ``data`` and ``col_names``.
126117

127118
.. code-block:: python
@@ -142,7 +133,7 @@ and the values would be a dictionary of storage type specific key-value pairs.
142133
143134
# Get the parcellation tailored for the target
144135
t_parcellation, t_labels, _ = get_parcellation(
145-
name=self.parcellation_name,
136+
name=self.parcellation,
146137
target_data=input,
147138
extra_input=extra_input,
148139
)
@@ -195,7 +186,9 @@ Finally, we need to register the Marker using the ``@register_marker`` decorator
195186
196187
from junifer.api.decorators import register_marker
197188
from junifer.data import get_parcellation
189+
from junifer.datagrabber import DataType
198190
from junifer.markers import BaseMarker
191+
from junifer.storage import StorageType
199192
from junifer.typing import Dependencies, MarkerInOutMappings
200193
from nilearn.maskers import NiftiLabelsMasker
201194
@@ -206,25 +199,18 @@ Finally, we need to register the Marker using the ``@register_marker`` decorator
206199
_DEPENDENCIES: ClassVar[Dependencies] = {"nilearn", "numpy"}
207200
208201
_MARKER_INOUT_MAPPINGS: ClassVar[MarkerInOutMappings] = {
209-
"BOLD": {
210-
"parcel_mean": "timeseries",
202+
DataType.BOLD: {
203+
"parcel_mean": StorageType.Timeseries,
211204
},
212-
"VBM_WM": {
213-
"parcel_mean": "vector",
205+
DataType.VBM_WM: {
206+
"parcel_mean": StorageType.Vector,
214207
},
215-
"VBM_GM": {
216-
"parcel_mean": "vector",
208+
DataType.VBM_GM: {
209+
"parcel_mean": StorageType.Vector,
217210
},
218211
}
219212
220-
def __init__(
221-
self,
222-
parcellation: str,
223-
on: str | list[str] | None = None,
224-
name: str | None = None,
225-
) -> None:
226-
self.parcellation = parcellation
227-
super().__init__(on=on, name=name)
213+
parcellation: str
228214
229215
def compute(
230216
self,
@@ -236,7 +222,7 @@ Finally, we need to register the Marker using the ``@register_marker`` decorator
236222
237223
# Get the parcellation tailored for the target
238224
t_parcellation, t_labels, _ = get_parcellation(
239-
name=self.parcellation_name,
225+
name=self.parcellation,
240226
target_data=input,
241227
extra_input=extra_input,
242228
)
@@ -280,9 +266,13 @@ Template for a custom Marker
280266
# TODO: add the input-output mappings
281267
_MARKER_INOUT_MAPPINGS = {}
282268
283-
def __init__(self, on=None, name=None):
284-
# TODO: add marker-specific parameters
285-
super().__init__(on=on, name=name)
269+
# TODO: define marker-specific parameters
270+
271+
# optional
272+
def validate_marker_params(self):
273+
# TODO: add validation logic for marker parameters
274+
pass
286275
287276
def compute(self, input, extra_input):
288277
# TODO: compute the marker and create the output dictionary
278+
return {}

0 commit comments

Comments
 (0)