forked from mne-tools/mne-bids-pipeline
-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathconfig.py
More file actions
721 lines (586 loc) · 21.6 KB
/
config.py
File metadata and controls
721 lines (586 loc) · 21.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
"""
===========
Config file
===========
Configuration parameters for the study.
"""
import os
from collections import defaultdict
import numpy as np
# ``plot`` : boolean
# If True, the scripts will generate plots.
# If running the scripts from a notebook or spyder
# run %matplotlib qt in the command line to get the plots in extra windows
plot = False
###############################################################################
# DIRECTORIES
# -----------
#
# ``study_path`` : str
# Set the `study path`` where the data is stored on your system.
#
# Example
# ~~~~~~~
# >>> study_path = '../MNE-sample-data/'
# or
# >>> study_path = '/neurospin/meg/meg_tmp/
# Dynacomp_Ciuciu_2011/2019_MEG_Pipeline/'
study_path = 'data/'
# ``subjects_dir`` : str
# The ``subjects_dir`` contains the MRI files for all subjects.
subjects_dir = os.path.join(study_path, 'subjects')
# ``meg_dir`` : str
# The ``meg_dir`` contains the MEG data in subfolders
# named my_study_path/MEG/my_subject/
meg_dir = os.path.join(study_path, 'MEG')
###############################################################################
# SUBJECTS / RUNS
# ---------------
#
# ``study_name`` : str
# This is the name of your experiment.
#
# Example
# ~~~~~~~
# >>> study_name = 'MNE-sample'
study_name = 'Localizer'
# ``subjects_list`` : list of str
# To define the list of participants, we use a list with all the anonymized
# participant names. Even if you plan on analyzing a single participant, it
# needs to be set up as a list with a single element, as in the 'example'
# subjects_list = ['SB01']
# To use all subjects use
# subjects_list = ['SB02', 'SB04', 'SB05', 'SB06', 'SB07',
# 'SB08', 'SB09', 'SB10', 'SB11', 'SB12']
# else for speed and fast test you can use:
subjects_list = ['SB02', 'SB04']
# ``exclude_subjects`` : list of str
# Now you can specify subjects to exclude from the group study:
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# Keep track of the criteria leading you to exclude
# a participant (e.g. too many movements, missing blocks, aborted experiment,
# did not understand the instructions, etc, ...)
exclude_subjects = ['']
subjects_list = list(set(subjects_list) - set(exclude_subjects))
subjects_list.sort()
# ``runs`` : list of str
# Define the names of your ``runs``
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# The naming should be consistent across participants. List the number of runs
# you ideally expect to have per participant. The scripts will issue a warning
# if there are less runs than is expected. If there is only just one file,
# leave empty!
runs = [''] # ['run01', 'run02']
# ``ch_types`` : list of st
# The list of channel types to consider.
#
# Example
# ~~~~~~~
# >>> ch_types = ['meg', 'eeg'] # to use MEG and EEG channels
# or
# >>> ch_types = ['meg'] # to use only MEG
# or
# >>> ch_types = ['grad'] # to use only gradiometer MEG channels
ch_types = ['meg']
# ``base_fname`` : str
# This automatically generates the name for all files
# with the variables specified above.
# Normally you should not have to touch this
base_fname = '{subject}_' + study_name + '{extension}.fif'
###############################################################################
# BAD CHANNELS
# ------------
# needed for 01-import_and_maxwell_filter.py
# ``bads`` : dict of list | dict of dict
# Bad channels are noisy sensors that *must* to be listed
# *before* maxfilter is applied. You can use the dict of list structure
# of you have bad channels that are the same for all runs.
# Use the dict(dict) if you have many runs or if noisy sensors are changing
# across runs.
#
# Example
# ~~~~~~~
# >>> bads = defaultdict(list)
# >>> bads['sample'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# or
# >>> def default_bads():
# >>> return dict(run01=[], run02=[])
# >>>
# >>> bads = defaultdict(default_bads)
# >>> bads['subject01'] = dict(run01=['MEG1723', 'MEG1722'], run02=['MEG1723'])
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# During the acquisition of your MEG / EEG data, systematically list and keep
# track of the noisy sensors. Here, put the number of runs you ideally expect
# to have per participant. Use the simple dict if you don't have runs or if
# the same sensors are noisy across all runs.
bads = defaultdict(list)
# either put the bad channels here directly
bads['SB01'] = ['MEG1723', 'MEG1722']
bads['SB02'] = ['MEG1723', 'MEG1722']
bads['SB04'] = ['MEG0543', 'MEG2333']
bads['SB06'] = ['MEG2632', 'MEG2033']
# or read bad channels from textfile in the subject's data folder, named
# bad_channels.txt
#import re
#for subject in subjects_list:
# bad_chans_file_name = os.path.join(meg_dir,subject,'bad_channels.txt')
# bad_chans_file = open(bad_chans_file_name,"r")
# bad_chans = bad_chans_file.readlines()
# bad_chans_file.close()
#
# for i in bad_chans:
# if study_name in i:
# SBbads = re.findall(r'\d+|\d+.\d+', i)
# if SBbads:
# for b, bad in enumerate(SBbads):
# SBbads[b] = 'MEG' + str(bad)
# bads[subject]=SBbads
# del SBbads
#
#del subject
###############################################################################
# DEFINE ADDITIONAL CHANNELS
# --------------------------
# needed for 01-import_and_maxwell_filter.py
# ``set_channel_types``: dict
# Here you define types of channels to pick later.
#
# Example
# ~~~~~~~
# >>> set_channel_types = {'EEG061': 'eog', 'EEG062': 'eog',
# 'EEG063': 'ecg', 'EEG064': 'misc'}
set_channel_types = None
# ``rename_channels`` : dict rename channels
# Here you name or replace extra channels that were recorded, for instance
# EOG, ECG.
#
# Example
# ~~~~~~~
# Here rename EEG061 to EOG061, EEG062 to EOG062, EEG063 to ECG063:
# >>> rename_channels = {'EEG061': 'EOG061', 'EEG062': 'EOG062',
# 'EEG063': 'ECG063'}
rename_channels = None
###############################################################################
# FREQUENCY FILTERING
# -------------------
# done in 01-import_and_maxwell_filter.py
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# It is typically better to set your filtering properties on the raw data so
# as to avoid what we call border (or edge) effects.
#
# If you use this pipeline for evoked responses, you could consider
# a low-pass filter cut-off of h_freq = 40 Hz
# and possibly a high-pass filter cut-off of l_freq = 1 Hz
# so you would preserve only the power in the 1Hz to 40 Hz band.
# Note that highpass filtering is not necessarily recommended as it can
# distort waveforms of evoked components, or simply wash out any low
# frequency that can may contain brain signal. It can also act as
# a replacement for baseline correction in Epochs. See below.
#
# If you use this pipeline for time-frequency analysis, a default filtering
# could be a high-pass filter cut-off of l_freq = 1 Hz
# a low-pass filter cut-off of h_freq = 120 Hz
# so you would preserve only the power in the 1Hz to 120 Hz band.
#
# If you need more fancy analysis, you are already likely past this kind
# of tips! :)
# ``l_freq`` : float
# The low-frequency cut-off in the highpass filtering step.
# Keep it None if no highpass filtering should be applied.
l_freq = 1.
# ``h_freq`` : float
# The high-frequency cut-off in the lowpass filtering step.
# Keep it None if no lowpass filtering should be applied.
h_freq = 40.
###############################################################################
# MAXFILTER PARAMETERS
# --------------------
#
# ``use_maxwell_filter`` : bool
# Use or not maxwell filter to preprocess the data.
use_maxwell_filter = True
# There are two kinds of maxfiltering: SSS and tSSS
# [SSS = signal space separation ; tSSS = temporal signal space separation]
# (Taulu et al, 2004): http://cds.cern.ch/record/709081/files/0401166.pdf
#
# ``mf_st_duration`` : float | None
# If not None, apply spatiotemporal SSS (tSSS) with specified buffer
# duration (in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
# Spatiotemporal SSS acts as implicitly as a high-pass filter where the
# cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
# buffers are generally better as long as your system can handle the
# higher memory usage. To ensure that each window is processed
# identically, choose a buffer length that divides evenly into your data.
# Any data at the trailing edge that doesn't fit evenly into a whole
# buffer window will be lumped into the previous buffer.
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# If you are interested in low frequency activity (<0.1Hz), avoid using tSSS
# and set mf_st_duration to None
#
# If you are interested in low frequency above 0.1 Hz, you can use the
# default mf_st_duration to 10 s meaning it acts like a 0.1 Hz highpass filter.
#
# Example
# ~~~~~~~
# >>> mf_st_duration = None
# or
# >>> mf_st_duration = 10. # to apply tSSS with 0.1Hz highpass filter.
mf_st_duration = None
# ``mf_head_origin`` : array-like, shape (3,) | 'auto'
# Origin of internal and external multipolar moment space in meters.
# If 'auto', it will be estimated from headshape points.
# If automatic fitting fails (e.g., due to having too few digitization
# points), consider separately calling the fitting function with different
# options or specifying the origin manually.
#
# Example
# ~~~~~~~
# >>> mf_head_origin = 'auto'
mf_head_origin = 'auto'
# ``cross talk`` : str
# Path to the cross talk file
#
#
# ``calibration`` : str
# Path to the calibration file.
#
#
# These 2 files should be downloaded and made available for running
# maxwell filtering.
#
# Example
# ~~~~~~~
# >>> cal_files_path = os.path.join(study_path, 'SSS')
# >>> mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_mgh.fif')
# >>> mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_mgh.dat')
#
# Warning
# ~~~~~~~
# These 2 files are site and machine specific files that provide information
# about the environmental noise. For practical purposes, place them in your
# study folder.
#
# At NeuroSpin: ct_sparse and sss_call are on the meg_tmp server
cal_files_path = os.path.join(study_path, 'system_calibration_files')
mf_ctc_fname = os.path.join(cal_files_path, 'ct_sparse_nsp_2017.fif')
mf_cal_fname = os.path.join(cal_files_path, 'sss_cal_nsp_2017.dat')
# Despite all possible care to avoid movements in the MEG, the participant
# will likely slowly drift down from the Dewar or slightly shift the head
# around in the course of the recording session. Hence, to take this into
# account, we are realigning all data to a single position. For this, you need
# to define a reference run (typically the one in the middle of
# the recording session).
#
# ``mf_reference_run`` : int
# Which run to take as the reference for adjusting the head position of all
# runs.
#
# Example
# ~~~~~~~
# >>> mf_reference_run = 0 # to use the first run
mf_reference_run = 0
###############################################################################
# RESAMPLING
# ----------
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# If you have acquired data with a very high sampling frequency (e.g. 2 kHz)
# you will likely want to downsample to lighten up the size of the files you
# are working with (pragmatics)
# If you are interested in typical analysis (up to 120 Hz) you can typically
# resample your data down to 500 Hz without preventing reliable time-frequency
# exploration of your data
#
# ``resample_sfreq`` : float
# Specifies at which sampling frequency the data should be resampled.
# If None then no resampling will be done.
#
# Example
# ~~~~~~~
# >>> resample_sfreq = None # no resampling
# or
# >>> resample_sfreq = 500 # resample to 500Hz
resample_sfreq = 500. # None
# ``decim`` : int
# Says how much to decimate data at the epochs level.
# It is typically an alternative to the `resample_sfreq` parameter that
# can be used for resampling raw data. 1 means no decimation.
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# Decimation requires to lowpass filtered the data to avoid aliasing.
# Note that using decimation is much faster than resampling.
#
# Example
# ~~~~~~~
# >>> decim = 1 # no decimation
# or
# >>> decim = 4 # decimate by 4 ie devide sampling frequency by 4
decim = 1
###############################################################################
# AUTOMATIC REJECTION OF ARTIFACTS
# --------------------------------
#
# Good Practice / Advice
# ~~~~~~~~~~~~~~~~~~~~~~
# Have a look at your raw data and train yourself to detect a blink, a heart
# beat and an eye movement.
# You can do a quick average of blink data and check what the amplitude looks
# like.
#
# ``reject`` : dict | None
# The rejection limits to make some epochs as bads.
# This allows to remove strong transient artifacts.
# If you want to reject and retrieve blinks later, e.g. with ICA,
# don't specify a value for the eog channel (see examples below).
# Make sure to include values for eeg if you have EEG data
#
# Note
# ~~~~
# These numbers tend to vary between subjects.. You might want to consider
# using the autoreject method by Jas et al. 2018.
# See https://autoreject.github.io
#
# Example
# ~~~~~~~
# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eog': 150e-6}
# >>> reject = {'grad': 4000e-13, 'mag': 4e-12, 'eeg': 200e-6}
# >>> reject = None
reject = {'grad': 4000e-13, 'mag': 4e-12}
###############################################################################
# EPOCHING
# --------
#
# ``tmin``: float
# A float in seconds that gives the start time before event of an epoch.
#
# Example
# ~~~~~~~
# >>> tmin = -0.2 # take 200ms before event onset.
tmin = -0.6
# ``tmax``: float
# A float in seconds that gives the end time before event of an epoch.
#
# Example
# ~~~~~~~
# >>> tmax = 0.5 # take 500ms after event onset.
tmax = 1.5
# ``trigger_time_shift`` : float | None
# If float it specifies the offset for the trigger and the stimulus
# (in seconds). You need to measure this value for your specific
# experiment/setup.
#
# Example
# ~~~~~~~
# >>> trigger_time_shift = 0 # don't apply any offset
trigger_time_shift = -0.0416
# ``baseline`` : tuple
# It specifies how to baseline the epochs; if None, no baseline is applied.
#
# Example
# ~~~~~~~
# >>> baseline = (None, 0) # baseline between tmin and 0
# There is an event 500ms prior to the time-locking event, so we want
# to take a baseline before that
baseline = (-.6, -.5) # (None, 0.)
# ``stim_channel`` : str
# The name of the stimulus channel, which contains the events.
#
# Example
# ~~~~~~~
# >>> stim_channel = 'STI 014' # or 'STI101'
stim_channel = 'STI101'
# ``min_event_duration`` : float
# The minimal duration of the events you want to extract (in seconds).
#
# Example
# ~~~~~~~
# >>> min_event_duration = 0.002 # 2 miliseconds
min_event_duration = 0.002
# `event_id`` : dict
# Dictionary that maps events (trigger/marker values)
# to conditions.
#
# Example
# ~~~~~~~
# >>> event_id = {'auditory/left': 1, 'auditory/right': 2}`
# or
# >>> event_id = {'Onset': 4} with conditions = ['Onset']
event_id = {'incoherent/1': 33, 'incoherent/2': 35,
'coherent/down': 37, 'coherent/up': 39}
# `conditions`` : dict
# List of condition names to consider. Must match the keys
# in event_id.
#
# Example
# ~~~~~~~
# >>> conditions = ['auditory', 'visual']
# or
# >>> conditions = ['left', 'right']
conditions = ['incoherent', 'coherent']
###############################################################################
# ARTIFACT REMOVAL
# ----------------
#
# You can choose between ICA and SSP to remove eye and heart artifacts.
# SSP: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ssp.html?highlight=ssp # noqa
# ICA: https://mne-tools.github.io/stable/auto_tutorials/plot_artifacts_correction_ica.html?highlight=ica # noqa
# if you choose ICA, run scripts 5a and 6a
# if you choose SSP, run scripts 5b and 6b
#
# Currently you cannot use both.
#
# ``use_ssp`` : bool
# If True ICA should be used or not.
use_ssp = False
# ``use_ica`` : bool
# If True ICA should be used or not.
use_ica = True
# ``ica_decim`` : int
# The decimation parameter to compute ICA. If 5 it means
# that 1 every 5 sample is used by ICA solver. The higher the faster
# it is to run but the less data you have to compute a good ICA.
ica_decim = 4
# ``default_reject_comps`` : dict
# A dictionary that specifies the indices of the ICA components to reject
# for each subject. For example you can use:
# rejcomps_man['subject01'] = dict(eeg=[12], meg=[7])
def default_reject_comps():
return dict(meg=[], eeg=[])
rejcomps_man = defaultdict(default_reject_comps)
# ``ica_ctps_ecg_threshold``: float
# The threshold parameter passed to `find_bads_ecg` method.
ica_ctps_ecg_threshold = 0.1
# ``ica_correlation_eog_threshold``: float
# The threshold parameter passed to `find_bads_eog` method.
ica_correlation_eog_threshold = 3.
###############################################################################
# DECODING
# --------
#
# ``decoding_conditions`` : list
# List of conditions to be classified.
#
# Example
# ~~~~~~~
# >>> decoding_conditions = [] # don't do decoding
# or
# >>> decoding_conditions = [('auditory', 'visual'), ('left', 'right')]
decoding_conditions = [('incoherent', 'coherent')]
# ``decoding_metric`` : str
# The metric to use for cross-validation. It can be 'roc_auc' or 'accuracy'
# or any metric supported by scikit-learn.
decoding_metric = 'roc_auc'
# ``decoding_n_splits`` : int
# The number of folds (a.k.a. splits) to use in the cross-validation.
decoding_n_splits = 5
###############################################################################
# TIME-FREQUENCY
# --------------
#
# ``time_frequency_conditions`` : list
# The conditions to compute time-frequency decomposition on.
time_frequency_conditions = ['coherent']
###############################################################################
# SOURCE SPACE PARAMETERS
# -----------------------
#
# ``spacing`` : str
# The spacing to use. Can be ``'ico#'`` for a recursively subdivided
# icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
# ``'all'`` for all points, or an integer to use appoximate
# distance-based spacing (in mm).
spacing = 'oct6'
# ``mindist`` : float
# Exclude points closer than this distance (mm) to the bounding surface.
mindist = 5
# ``loose`` : float in [0, 1] | 'auto'
# Value that weights the source variances of the dipole components
# that are parallel (tangential) to the cortical surface. If loose
# is 0 then the solution is computed with fixed orientation,
# and fixed must be True or "auto".
# If loose is 1, it corresponds to free orientations.
# The default value ('auto') is set to 0.2 for surface-oriented source
# space and set to 1.0 for volumetric, discrete, or mixed source spaces,
# unless ``fixed is True`` in which case the value 0. is used.
loose = 0.2
# ``depth`` : None | float | dict
# If float (default 0.8), it acts as the depth weighting exponent (``exp``)
# to use (must be between 0 and 1). None is equivalent to 0, meaning no
# depth weighting is performed. Can also be a `dict` containing additional
# keyword arguments to pass to :func:`mne.forward.compute_depth_prior`
# (see docstring for details and defaults).
depth = 0.8
# method : "MNE" | "dSPM" | "sLORETA" | "eLORETA"
# Use minimum norm, dSPM (default), sLORETA, or eLORETA.
method = 'dSPM'
# smooth : int | None
# Number of iterations for the smoothing of the surface data.
# If None, smooth is automatically defined to fill the surface
# with non-zero values. The default is spacing=None.
smooth = 10
# ``base_fname_trans`` : str
# The path to the trans files obtained with coregistration.
#
# Example
# ~~~~~~~
# >>> base_fname_trans = '{subject}_' + study_name + '_raw-trans.fif'
# or
# >>> base_fname_trans = '{subject}-trans.fif'
base_fname_trans = '{subject}-trans.fif'
# XXX not needed
# fsaverage_vertices = [np.arange(10242), np.arange(10242)]
if not os.path.isdir(study_path):
os.mkdir(study_path)
if not os.path.isdir(subjects_dir):
os.mkdir(subjects_dir)
###############################################################################
# ADVANCED
# --------
#
# ``l_trans_bandwidth`` : float | 'auto'
# A float that specifies the transition bandwidth of the
# highpass filter. By default it's `'auto'` and uses default mne
# parameters.
l_trans_bandwidth = 'auto'
# ``h_trans_bandwidth`` : float | 'auto'
# A float that specifies the transition bandwidth of the
# lowpass filter. By default it's `'auto'` and uses default mne
# parameters.
h_trans_bandwidth = 'auto'
# ``N_JOBS`` : int
# An integer that specifies how many subjects you want to run in parallel.
N_JOBS = 1
# ``random_state`` : None | int | np.random.RandomState
# To specify the random generator state. This allows to have
# the results more reproducible between machines and systems.
# Some methods like ICA need random values for initialisation.
random_state = 42
# ``shortest_event`` : int
# Minimum number of samples an event must last. If the
# duration is less than this an exception will be raised.
shortest_event = 1
# ``allow_maxshield`` : bool
# To import data that was recorded with Maxshield on before running
# maxfilter set this to True.
allow_maxshield = True
###############################################################################
# CHECKS
# --------
#
# --- --- You should not touch the next lines --- ---
if (use_maxwell_filter and
len(set(ch_types).intersection(('meg', 'grad', 'mag'))) == 0):
raise ValueError('Cannot use maxwell filter without MEG channels.')
if use_ssp and use_ica:
raise ValueError('Cannot use both SSP and ICA.')