|
12 | 12 |
|
13 | 13 | """ |
14 | 14 |
|
| 15 | +###################################################################### |
15 | 16 | # .. image:: https://colab.research.google.com/assets/colab-badge.svg |
16 | 17 | # :target: https://colab.research.google.com/github/eeg2025/startkit/blob/main/challenge_1.ipynb |
17 | 18 | # :alt: Open In Colab |
18 | 19 |
|
| 20 | +###################################################################### |
19 | 21 | # Preliminary notes |
20 | 22 | # ----------------- |
21 | 23 | # Before we begin, I just want to make a deal with you, ok? |
|
28 | 30 | # The entire decoding community will only go further when we stop |
29 | 31 | # solving the same problems over and over again, and it starts working together. |
30 | 32 |
|
| 33 | +###################################################################### |
31 | 34 | # How can we use the knowledge from one EEG Decoding task into another? |
32 | 35 | # --------------------------------------------------------------------- |
33 | 36 | # Transfer learning is a widespread technique used in deep learning. It |
|
58 | 61 | # and fine-tuned on data from another condition, evaluating its capacity to |
59 | 62 | # generalize with task-specific fine-tuning. |
60 | 63 |
|
| 64 | +###################################################################### |
61 | 65 | # __________ |
62 | 66 | # |
63 | 67 | # Note: For simplicity purposes, we will only show how to do the decoding |
64 | 68 | # directly in our target task, and it is up to the teams to think about |
65 | 69 | # how to use the passive task to perform the pre-training. |
66 | 70 | # |
| 71 | +###################################################################### |
67 | 72 | # Install dependencies |
68 | 73 | # -------------------- |
69 | 74 | # For the challenge, we will need two significant dependencies: |
|
75 | 80 | # have braindecode as a dependency. |
76 | 81 | # you can just run ``pip install eegdash``. |
77 | 82 |
|
| 83 | +###################################################################### |
78 | 84 | # Imports and setup |
79 | 85 | # ----------------- |
80 | 86 | from pathlib import Path |
|
96 | 102 | import copy |
97 | 103 | from joblib import Parallel, delayed |
98 | 104 |
|
| 105 | +###################################################################### |
99 | 106 | # Check GPU availability |
100 | 107 | # ---------------------- |
101 | 108 | # |
|
116 | 123 | ) |
117 | 124 | print(msg) |
118 | 125 |
|
| 126 | +###################################################################### |
119 | 127 | # What are we decoding? |
120 | 128 | # --------------------- |
121 | 129 | # |
122 | 130 | # To start to talk about what we want to analyse, the important thing |
123 | 131 | # is to understand some basic concepts. |
124 | 132 | # |
| 133 | +###################################################################### |
125 | 134 | # The brain decodes the problem |
126 | 135 | # ============================= |
127 | 136 | # |
|
146 | 155 | # is the temporal window length/epoch size over the interval of interest. |
147 | 156 | # Here, :math:`\theta` denotes the parameters learned by the neural network. |
148 | 157 | # |
| 158 | +# ------------------------------------------------------------------------------ |
149 | 159 | # Input/Output definition |
150 | 160 | # --------------------------- |
151 | 161 | # For the competition, the HBN-EEG (Healthy Brain Network EEG Datasets) |
|
159 | 169 | # You can use any model you want, as long as it follows the input/output |
160 | 170 | # definitions above. |
161 | 171 |
|
| 172 | +###################################################################### |
162 | 173 | # Understand the task: Contrast Change Detection (CCD) |
163 | 174 | # -------------------------------------------------------- |
164 | 175 | # If you are interested to get more neuroscience insight, we recommend these two references, `HBN-EEG <https://www.biorxiv.org/content/10.1101/2024.10.03.615261v2.full.pdf>`__ and `Langer, N et al. (2017) <https://www.nature.com/articles/sdata201740#Sec2>`__. |
|
183 | 194 | # * The **ramp onset**, the **button press**, and the **feedback** are **time-locked events** that yield ERP-like components. |
184 | 195 | # |
185 | 196 | # Your task (**label**) is to predict the response time for the subject during this windows. |
186 | | -# |
| 197 | +###################################################################### |
187 | 198 | # In the figure below, we have the timeline representation of the cognitive task: |
188 | | -# |
189 | 199 | # .. image:: https://eeg2025.github.io/assets/img/image-2.jpg |
190 | | -# |
191 | 200 |
|
| 201 | +###################################################################### |
192 | 202 | # Stimulus demonstration |
193 | 203 | # ---------------------- |
194 | | -# |
195 | 204 | # .. raw:: html |
196 | 205 | # |
197 | 206 | # <div class="video-wrapper"> |
|
201 | 210 | # allowfullscreen></iframe> |
202 | 211 | # </div> |
203 | 212 | # |
| 213 | +###################################################################### |
204 | 214 | # PyTorch Dataset for the competition |
205 | 215 | # ----------------------------------- |
206 | 216 | # Now, we have a Pytorch Dataset object that contains the set of recordings for the task |
|
238 | 248 | # And to download all the data all data directly, you can do: |
239 | 249 | raws = Parallel(n_jobs=-1)(delayed(lambda d: d.raw)(d) for d in dataset_ccd.datasets) |
240 | 250 |
|
| 251 | +###################################################################### |
241 | 252 | # Alternatives for Downloading the data |
242 | 253 | # ------------------------------------- |
243 | 254 | # |
|
254 | 265 | # aws s3 sync s3://nmdatasets/NeurIPS25/R1_L100_bdf data/R1_L100_bdf --no-sign-request |
255 | 266 |
|
256 | 267 |
|
| 268 | +###################################################################### |
257 | 269 | # Create windows of interest |
258 | 270 | # ----------------------------- |
259 | 271 | # So we epoch after the stimulus moment with a beginning shift of 500 ms. |
|
307 | 319 | "response_type", |
308 | 320 | ), |
309 | 321 | ) |
| 322 | +###################################################################### |
310 | 323 | # Inspect the label distribution |
311 | 324 | # ------------------------------- |
312 | 325 | import numpy as np |
|
326 | 339 | plt.show() |
327 | 340 |
|
328 | 341 |
|
| 342 | +###################################################################### |
329 | 343 | # Split the data |
330 | 344 | # --------------- |
331 | 345 | # Extract meta information |
|
377 | 391 | print(f"Valid:\t{len(valid_set)}") |
378 | 392 | print(f"Test:\t{len(test_set)}") |
379 | 393 |
|
| 394 | +###################################################################### |
380 | 395 | # Create dataloaders |
381 | 396 | # ------------------- |
382 | 397 | batch_size = 128 |
|
393 | 408 | test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers |
394 | 409 | ) |
395 | 410 |
|
| 411 | +###################################################################### |
396 | 412 | # Build the model |
397 | 413 | # ----------------- |
398 | 414 | # For neural network models, **to start**, we suggest using `braindecode models <https://braindecode.org/1.2/models/models_table.html>`__ zoo. |
|
412 | 428 | model.to(device) |
413 | 429 |
|
414 | 430 |
|
| 431 | +###################################################################### |
415 | 432 | # Define training and validation functions |
416 | 433 | # ------------------------------------------- |
417 | 434 | # The rest is our classic PyTorch/torch lighting/skorch training pipeline, |
@@ -519,6 +536,7 @@ def valid_model( |
519 | 536 | return avg_loss, rmse |
520 | 537 |
|
521 | 538 |
|
| 539 | +###################################################################### |
522 | 540 | # Train the model |
523 | 541 | # ------------------ |
524 | 542 | lr = 1e-3 |
@@ -569,6 +587,7 @@ def valid_model( |
569 | 587 | if best_state is not None: |
570 | 588 | model.load_state_dict(best_state) |
571 | 589 |
|
| 590 | +###################################################################### |
572 | 591 | # Save the model |
573 | 592 | # ----------------- |
574 | 593 | torch.save(model.state_dict(), "weights_challenge_1.pt") |
|
0 commit comments