Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
dac2b94
feat: TinyMyo pretraining and finetuning
MatteoFasulo Nov 25, 2025
a9f67ff
feat: add EMG regression head and update TinyMyo model for regression…
MatteoFasulo Dec 7, 2025
01d5084
feat: enhance dataset classes and update TinyMyo model documentation …
MatteoFasulo Dec 8, 2025
8e40940
fix: update safetensor file path handling in checkpoint conversion sc…
MatteoFasulo Dec 10, 2025
0657117
Update TinyMyo model configurations and enhance data handling for pre…
MatteoFasulo Dec 11, 2025
cfd6363
Refactor configuration and model files for EMG dataset and TinyMyo model
MatteoFasulo Dec 11, 2025
e8106e5
Delete .gitignore
Thoriri Dec 19, 2025
9bc3b89
fix: correct parameter settings and addressed suggested changes
MatteoFasulo Dec 19, 2025
b604d99
Update training script handling environment variables default values …
MatteoFasulo Dec 22, 2025
fe100cb
Add TinyMyo logo image to documentation
MatteoFasulo Jan 5, 2026
c35fbb1
Rename TinyMyo logo file
MatteoFasulo Jan 5, 2026
964d968
Changed TinyMyo logo file
MatteoFasulo Jan 5, 2026
d656af7
Add files via upload
MatteoFasulo Jan 5, 2026
2aed0ec
Added TinyMyo arxiv and HuggingFace badge to Readme
MatteoFasulo Jan 5, 2026
32e7006
Added TinyMyo arxiv and HuggingFace badge, why TinyMyo section with c…
MatteoFasulo Jan 5, 2026
15f674a
Update README and TinyMyo documentation with additional model details…
MatteoFasulo Jan 7, 2026
7ee5c65
Add link to scripts for downloading and preprocessing TinyMyo dataset…
MatteoFasulo Jan 16, 2026
b1c4e3b
Update README with new HF repo
MatteoFasulo Jan 21, 2026
61e60d2
Update README and pyproject.toml with author details, uv requirements…
MatteoFasulo Jan 28, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
161 changes: 161 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml

# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

# Hydra outputs
outputs/
37 changes: 37 additions & 0 deletions config/data_module/emg_finetune_data_module.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# @package _global_
#*----------------------------------------------------------------------------*
#* Copyright (C) 2025 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Author: Matteo Fasulo *
#*----------------------------------------------------------------------------*
data_module:
_target_: data_module.finetune_data_module.FinetuneDataModule
name: "emg"
cfg:
num_workers: ${num_workers}
batch_size: ${batch_size}
train:
_target_: 'datasets.emg_finetune_dataset.EMGDataset'
hdf5_file: ${env:DATA_PATH}/UCI_EMG/EMG_data_for_gestures-master/h5/train.h5
finetune: true
val:
_target_: 'datasets.emg_finetune_dataset.EMGDataset'
hdf5_file: ${env:DATA_PATH}/UCI_EMG/EMG_data_for_gestures-master/h5/val.h5
finetune: true
test:
_target_: 'datasets.emg_finetune_dataset.EMGDataset'
hdf5_file: ${env:DATA_PATH}/UCI_EMG/EMG_data_for_gestures-master/h5/test.h5
finetune: true
40 changes: 40 additions & 0 deletions config/data_module/emg_pretrain_data_module.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# @package _global_
#*----------------------------------------------------------------------------*
#* Copyright (C) 2025 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Author: Matteo Fasulo *
#*----------------------------------------------------------------------------*
data_module:
_target_: 'data_module.pretrain_data_module.PretrainDataModule'
name: "emg"
cfg:
num_workers: ${num_workers}
batch_size: ${batch_size}
test: null
train_val_split_ratio: 0.8
datasets:
demo_dataset: null
emg2pose:
_target_: 'datasets.emg_pretrain_dataset.EMGPretrainDataset'
data_dir: "${env:DATA_PATH}/emg2pose/h5/"
db6:
_target_: 'datasets.emg_pretrain_dataset.EMGPretrainDataset'
data_dir: "${env:DATA_PATH}/ninapro/DB6/h5/"
pad_up_to_max_chans: 16
db7:
_target_: 'datasets.emg_pretrain_dataset.EMGPretrainDataset'
data_dir: "${env:DATA_PATH}/ninapro/DB7/h5/"
pad_up_to_max_chans: 16
100 changes: 100 additions & 0 deletions config/experiment/TinyMyo_finetune.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# @package _global_
#*----------------------------------------------------------------------------*
#* Copyright (C) 2025 ETH Zurich, Switzerland *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Author: Matteo Fasulo *
#*----------------------------------------------------------------------------*
tag: EMG_finetune

gpus: -1
num_nodes: 1
num_workers: 8
batch_size: 32
max_epochs: 50

training: True
final_validate: True
final_test: True
finetune_pretrained: True
resume: False

layerwise_lr_decay: 0.90
scheduler_type: cosine

pretrained_checkpoint_path: null
pretrained_safetensors_path: null

finetuning:
freeze_layers: False

io:
base_output_path: ${env:DATA_PATH}
checkpoint_dirpath: ${env:CHECKPOINT_DIR}/checkpoints
version: 0

defaults:
- override /data_module: emg_finetune_data_module
- override /model: TinyMyo_finetune
- override /scheduler: cosine
- override /task: finetune_task_TinyMyo
- override /criterion: finetune_criterion

masking:
patch_size: [1, 20]
masking_ratio: 0.50
unmasked_loss_coeff: 0.1

input_normalization:
normalize: False

model:
num_classes: 6
classification_type: "ml"

trainer:
accelerator: gpu
num_nodes: ${num_nodes}
devices: ${gpus}
strategy: auto
max_epochs: ${max_epochs}

model_checkpoint:
save_last: True
monitor: "val_loss"
mode: "min"
save_top_k: 1

callbacks:
early_stopping:
_target_: 'pytorch_lightning.callbacks.EarlyStopping'
monitor: "val_loss"
patience: 7
mode: "min"
verbose: True

optimizer:
optim: 'AdamW'
lr: 5e-4
betas: [0.9, 0.98]
weight_decay: 0.01

scheduler:
trainer: ${trainer}
min_lr: 1e-5
warmup_lr_init: 1e-5
warmup_epochs: 5
total_training_opt_steps: ${max_epochs}
t_in_epochs: True
Loading