diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index cc41777..835d4e8 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -10,4 +10,4 @@ RUN curl -L -o /usr/local/bin/pixi -fsSL --compressed "https://github.com/prefix USER vscode WORKDIR /home/vscode -RUN echo 'eval "$(pixi completion -s bash)"' >> /home/vscode/.bashrc \ No newline at end of file +RUN echo 'eval "$(pixi completion -s bash)"' >> /home/vscode/.bashrc diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 0f5b875..870261a 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,24 +1,17 @@ { - "name": "MultiQC_SAV Dev Container", - "runArgs": ["--platform=linux/amd64" ], - "build": { - "dockerfile": "Dockerfile", - "context": ".." - }, - "customizations": { - "vscode": { - "settings": {}, - "extensions": [ - "ms-python.python", - "charliermarsh.ruff", - "GitHub.copilot" - ] - } - }, - "features": { - }, - "mounts": [ - "source=${localWorkspaceFolderBasename}-pixi,target=${containerWorkspaceFolder}/.pixi,type=volume" - ], - "postCreateCommand": "sudo chown vscode .pixi && pixi install" -} \ No newline at end of file + "name": "MultiQC_SAV Dev Container", + "runArgs": ["--platform=linux/amd64"], + "build": { + "dockerfile": "Dockerfile", + "context": ".." + }, + "customizations": { + "vscode": { + "settings": {}, + "extensions": ["ms-python.python", "charliermarsh.ruff", "GitHub.copilot"] + } + }, + "features": {}, + "mounts": ["source=${localWorkspaceFolderBasename}-pixi,target=${containerWorkspaceFolder}/.pixi,type=volume"], + "postCreateCommand": "sudo chown vscode .pixi && pixi install" +} diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000..39f641b --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,24 @@ +name: "Lint" + +on: + push: + branches: [main, master] + pull_request: + +jobs: + pre-commit: + name: Pre-commit + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Run pre-commit + uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/linux.yaml b/.github/workflows/linux.yaml index 226e46a..5de8bd8 100644 --- a/.github/workflows/linux.yaml +++ b/.github/workflows/linux.yaml @@ -1,52 +1,84 @@ -name: "Build - Linux" -on: [push, pull_request] +name: Generate test reports + +on: + push: + branches: [main, dev] + pull_request: jobs: run_multiqc: name: Linux - Python ${{ matrix.python-version }} runs-on: ubuntu-latest strategy: + fail-fast: false matrix: - python-version: [3.11, 3.12, 3.13] + python-version: ["3.9", "3.14"] timeout-minutes: 10 steps: - # Check out MultiQC code - - uses: actions/checkout@v2 + - name: Checkout code + uses: actions/checkout@v4 - # Set up Python - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - # Update pip and install beautifulsoup4 for CI tests (CSP checking) - - name: Install dependencies for CI tests + - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools beautifulsoup4 multiqc + python -m pip install --upgrade pip setuptools wheel + pip install multiqc - # Install MultiQC - name: Install MultiQC_SAV run: pip install . - # Run all of the tests! - - name: MiSeq - run: multiqc -m SAV test_data/MiSeq + - name: Make test output dir + run: mkdir test_output - - name: MiSeqI100 - run: multiqc --strict -v --no-version-check -m SAV test_data/MiSeqI100 + - name: Test MiSeq + run: | + multiqc --strict test_data/MiSeq -o test_output/MiSeq + test -f test_output/MiSeq/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) - - name: HiSeq3000 - run: multiqc --strict -v --no-version-check -m SAV test_data/HiSeq3000 + - name: Test MiSeqI100 + run: | + multiqc --strict test_data/MiSeqI100 -o test_output/MiSeqI100 + test -f test_output/MiSeqI100/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) - - name: NextSeq500 - run: multiqc --strict -v --no-version-check -m SAV test_data/NextSeq500 + - name: Test HiSeq3000 + run: | + multiqc --strict test_data/HiSeq3000 -o test_output/HiSeq3000 + test -f test_output/HiSeq3000/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) - - name: NextSeq2000 - run: multiqc --strict -v --no-version-check -m SAV test_data/NextSeq2000 + - name: Test NextSeq500 + run: | + multiqc --strict test_data/NextSeq500 -o test_output/NextSeq500 + test -f test_output/NextSeq500/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) - - name: NovaSeq6000 - run: multiqc --strict -v --no-version-check -m SAV test_data/NovaSeq6000 + - name: Test NextSeq2000 + run: | + multiqc --strict test_data/NextSeq2000 -o test_output/NextSeq2000 + test -f test_output/NextSeq2000/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) - - name: NovaSeqX - run: multiqc --strict -v --no-version-check -m SAV test_data/NovaSeqX \ No newline at end of file + - name: Test NovaSeq6000 + run: | + multiqc --strict test_data/NovaSeq6000 -o test_output/NovaSeq6000 + test -f test_output/NovaSeq6000/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) + + - name: Test NovaSeqX + run: | + multiqc --strict test_data/NovaSeqX -o test_output/NovaSeqX + test -f test_output/NovaSeqX/multiqc_report.html || (echo "ERROR: Report not generated" && exit 1) + + - name: Check that ignoring samples works + run: | + multiqc --strict test_data/ -o test_output/ignore_samples --ignore-samples "*" + test ! -f test_output/ignore_samples/multiqc_report.html || (echo "ERROR: Report should not be generated" && exit 1) + + - name: Upload test output + uses: actions/upload-artifact@v4 + if: always() + with: + name: test-output-py${{ matrix.python-version }} + path: test_output/ + retention-days: 7 diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 0000000..769d3e1 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,31 @@ +name: "Publish to PyPI" + +on: + release: + types: [published] + +jobs: + publish: + name: Publish to PyPI + runs-on: ubuntu-latest + environment: pypi + permissions: + id-token: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install build dependencies + run: pip install build + + - name: Build package + run: python -m build + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..af1f300 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,55 @@ +minimum_pre_commit_version: "2.9.2" + +repos: + # Meta hooks for verification + - repo: meta + hooks: + - id: identity + - id: check-hooks-apply + + # Standard pre-commit hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: "v5.0.0" + hooks: + - id: check-added-large-files + args: ["--maxkb=1000"] + - id: check-merge-conflict + - id: check-yaml + - id: check-toml + - id: debug-statements + - id: end-of-file-fixer + exclude: ^(test_data|docs/example)/ + - id: trailing-whitespace + exclude: ^(test_data|docs/example)/ + - id: mixed-line-ending + args: ["--fix=lf"] + exclude: ^(test_data|docs/example)/ + + # Line ending normalization + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: "v1.5.5" + hooks: + - id: remove-crlf + exclude: ^(test_data|docs/example)/ + + # Prettier for markdown, yaml, json formatting + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + types_or: [markdown, yaml, json] + additional_dependencies: + - "prettier@3.1.0" + + # Ruff formatting + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.8.3" + hooks: + - id: ruff-format + + # Ruff linting with auto-fix + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.8.3" + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000..8cfe595 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,30 @@ +# Build artifacts +dist/ +build/ +*.egg-info/ + +# Python cache +__pycache__/ +*.pyc + +# Test data +test_data/ + +# Virtual environments +venv/ +.venv/ +env/ + +# Coverage +htmlcov/ +.coverage + +# IDE +.idea/ +.vscode/ + +# Example reports +docs/example/ + +# Lock files +*.lock diff --git a/.prettierrc.js b/.prettierrc.js new file mode 100644 index 0000000..ff9d568 --- /dev/null +++ b/.prettierrc.js @@ -0,0 +1,10 @@ +module.exports = { + printWidth: 120, + tabWidth: 2, + useTabs: false, + semi: true, + singleQuote: false, + trailingComma: "all", + bracketSpacing: true, + proseWrap: "preserve", +}; diff --git a/.vscode/settings.json b/.vscode/settings.json index ba2a6c0..a281e49 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,4 @@ { "python-envs.defaultEnvManager": "ms-python.python:system", "python-envs.pythonProjects": [] -} \ No newline at end of file +} diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..2ae3317 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,160 @@ +# CLAUDE.md + +This file provides guidance for Claude Code (claude.ai/code) when working with the MultiQC_SAV plugin. + +## Project Overview + +MultiQC_SAV is a plugin for [MultiQC](https://multiqc.info/) that parses InterOp files from Illumina sequencers and generates tables and graphs for quality control metrics. It leverages the [Illumina InterOp Python API](https://github.com/Illumina/interop) to read binary metric files. + +## Project Structure + +``` +MultiQC_SAV/ +├── multiqc_sav/ # Main plugin package +│ ├── __init__.py # Package metadata +│ ├── multiqc_sav.py # Plugin hook (before_config) +│ └── modules/ +│ ├── __init__.py +│ └── sav/ +│ ├── __init__.py +│ └── sav.py # Main SAV module implementation +├── test_data/ # Test datasets for various sequencers +│ ├── HiSeq3000/ +│ ├── MiSeq/ +│ ├── MiSeqI100/ +│ ├── NextSeq500/ +│ ├── NextSeq2000/ +│ ├── NovaSeq6000/ +│ └── NovaSeqX/ +├── .devcontainer/ # VS Code devcontainer configuration +├── .github/workflows/ # CI/CD workflows +│ ├── linux.yaml # Build and test workflow +│ ├── lint.yaml # Linting workflow (pre-commit) +│ └── publish.yaml # PyPI publish workflow +├── pyproject.toml # Project configuration and dependencies +├── .pre-commit-config.yaml # Pre-commit hooks configuration +├── .prettierrc.js # Prettier formatter config +└── .prettierignore # Prettier exclusions +``` + +## Development Setup + +```bash +# Install in development mode with dev dependencies +pip install -e ".[dev]" + +# Install pre-commit hooks +pre-commit install + +# Or use pixi (if available) +pixi install +``` + +## Common Commands + +```bash +# Run linting +ruff check . +ruff format --check . + +# Auto-fix linting issues +ruff check --fix . +ruff format . + +# Run prettier on markdown/yaml +prettier --check "**/*.{md,yaml,yml,json}" + +# Run tests with test data +multiqc --strict -v --no-version-check -m SAV test_data/MiSeq +multiqc --strict -v --no-version-check -m SAV test_data/HiSeq3000 +multiqc --strict -v --no-version-check -m SAV test_data/NextSeq500 +multiqc --strict -v --no-version-check -m SAV test_data/NextSeq2000 +multiqc --strict -v --no-version-check -m SAV test_data/NovaSeq6000 +multiqc --strict -v --no-version-check -m SAV test_data/NovaSeqX + +# Run all pre-commit hooks +pre-commit run --all-files +``` + +## Code Style Guidelines + +- **Line length**: 120 characters +- **Python version**: 3.9+ (tested on 3.11, 3.12, 3.13) +- **Formatting**: Ruff (format + lint) +- **Lint rules**: E, F, W, I (pycodestyle, pyflakes, isort) +- **Imports**: Sorted by isort (via ruff) + +## MultiQC Module Architecture + +### Plugin Registration + +The plugin uses entry points in `pyproject.toml`: + +```toml +[project.entry-points."multiqc.hooks.v1"] +before_config = "multiqc_sav.multiqc_sav:sav_execution_start" + +[project.entry-points."multiqc.modules.v1"] +SAV = "multiqc_sav.modules.sav.sav:SAVModule" +``` + +### Hook System + +`multiqc_sav.py` contains the `sav_execution_start()` hook which runs before config loading: + +- Logs the plugin version +- Registers the SAV module in the module order with tags (DNA, RNA, BCL, Demultiplex) +- Disables the built-in InterOp module to avoid duplicate data +- Configures search patterns: + - `SAV/RunInfo`: `RunInfo.xml` + - `SAV/RunParameters`: `RunParameters.xml` + - `SAV/InterOp`: `InterOp/*.bin` + - `bclconvert/runinfo`: shared `RunInfo.xml` + +### Main Module + +`modules/sav/sav.py` contains the `SAVModule` class which extends `BaseMultiqcModule`: + +1. **Initialization**: Minimal class that sets up module name and anchor +2. **InterOp Processing**: The `add_interop_sections()` function processes InterOp data +3. **Metrics Loading**: Uses InterOp API to read binary metric files +4. **Data Processing**: Parses metrics into pandas DataFrames +5. **Visualization**: Generates MultiQC plots (tables, bargraphs, heatmaps, linegraphs, scatter plots) + +### Key Functions + +- `add_interop_sections(module)`: Main entry point for adding InterOp visualizations +- `_add_summary_sections()`: Read and lane summary tables +- `_add_qscore_sections()`: Q-score heatmaps and histograms +- `_add_imaging_sections()`: Imaging metrics visualizations + +## Supported Sequencers + +- MiSeq +- MiSeq (Illumina Connected) +- HiSeq 3000/4000 +- NextSeq 500/550 +- NextSeq 1000/2000 +- NovaSeq 6000 +- NovaSeq X/X Plus + +## Key Dependencies + +- `interop>=1.7.0,<2` - Illumina InterOp Python API for reading binary metrics +- `multiqc>=1.25` - MultiQC framework +- `pandas` - Data manipulation +- `numpy` - Numerical operations + +## Testing + +Tests are run via GitHub Actions on Python 3.11, 3.12, and 3.13. Each test: + +1. Runs MultiQC with the SAV module on test data +2. Verifies that `multiqc_report.html` is generated +3. Checks that the SAV module appears in the log + +## CI/CD + +- **Build workflow** (`linux.yaml`): Tests installation and module execution with report validation +- **Lint workflow** (`lint.yaml`): Runs pre-commit hooks (ruff, prettier) +- **Publish workflow** (`publish.yaml`): Publishes to PyPI on release using trusted publishing diff --git a/multiqc_sav/modules/sav/sav.py b/multiqc_sav/modules/sav/sav.py index 6043cd2..4996ad8 100644 --- a/multiqc_sav/modules/sav/sav.py +++ b/multiqc_sav/modules/sav/sav.py @@ -5,25 +5,26 @@ and generate advanced visualizations. """ +import contextlib import glob import logging import os import re -from typing import Dict, List, Optional +from typing import Any, Optional import interop import numpy as np import pandas as pd from interop import py_interop_plot from multiqc import config -from multiqc.base_module import BaseMultiqcModule, ModuleNoSamplesFound +from multiqc.base_module import BaseMultiqcModule from multiqc.plots import bargraph, heatmap, linegraph, scatter, table from multiqc.utils import mqc_colour log = logging.getLogger(__name__) # Table headers for summary metrics -HEADERS: Dict[str, Dict] = { +HEADERS: dict[str, dict] = { "Error Rate": { "title": "Error Rate (%)", "description": "The calculated error rate, as determined by a PhiX spike-in", @@ -240,15 +241,17 @@ }, } + class SAVModule(BaseMultiqcModule): - def __init__(self): - super(SAVModule, self).__init__( + def __init__(self) -> None: + super().__init__( name="SAV", anchor="SAV", info=" - Illumina SAV InterOp Analysis", ) -def add_interop_sections(module) -> None: + +def add_interop_sections(module: BaseMultiqcModule) -> None: """ Add InterOp-based sections to the SAV module. @@ -293,7 +296,7 @@ def add_interop_sections(module) -> None: break -def _add_summary_sections(module, run_metrics) -> None: +def _add_summary_sections(module: BaseMultiqcModule, run_metrics: Any) -> None: """Add read and lane summary table sections.""" log.info("Gathering summary metrics") @@ -357,7 +360,7 @@ def _add_summary_sections(module, run_metrics) -> None: log.debug("Could not generate lane summary: %s", e) -def _add_qscore_sections(module, run_metrics) -> None: +def _add_qscore_sections(module: BaseMultiqcModule, run_metrics: Any) -> None: """Add Q-score heatmap and histogram sections.""" log.info("Generating Q-score plots") @@ -371,10 +374,8 @@ def _add_qscore_sections(module, run_metrics) -> None: data_buffer = np.zeros((rows, cols), dtype=np.float32) data = py_interop_plot.heatmap_data() - try: + with contextlib.suppress(py_interop_plot.invalid_filter_option): py_interop_plot.plot_qscore_heatmap(run_metrics, options, data, data_buffer.ravel()) - except py_interop_plot.invalid_filter_option: - pass # data_buffer shape is (rows=qscores, cols=cycles) # heatmap expects: rows match ycats, cols match xcats @@ -421,10 +422,10 @@ def _add_qscore_sections(module, run_metrics) -> None: options = py_interop_plot.filter_options(run_metrics.run_info().flowcell().naming_method()) py_interop_plot.plot_qscore_histogram(run_metrics, options, bar_data) - hist: Dict = {} - qscore: List = [] - reads: List = [] - binsize: List = [] + hist: dict = {} + qscore: list = [] + reads: list = [] + binsize: list = [] for i in range(bar_data.size()): qscore = [bar_data.at(i).at(j).x() for j in range(bar_data.at(i).size())] @@ -459,7 +460,7 @@ def _add_qscore_sections(module, run_metrics) -> None: log.debug("Could not generate Q-score histogram: %s", e) -def _add_imaging_sections(module, run_metrics) -> None: +def _add_imaging_sections(module: BaseMultiqcModule, run_metrics: Any) -> None: """Add imaging-related sections (intensity per cycle, % PF vs % occupied).""" log.info("Gathering imaging metrics") @@ -492,9 +493,9 @@ def _parse_read_summary( read_metrics: pd.DataFrame, non_index_metrics: pd.DataFrame, total_metrics: pd.DataFrame, -) -> Dict: +) -> dict: """Parse read summary DataFrames into dict format.""" - table_data: Dict = _parse_reads(read_metrics) + table_data: dict = _parse_reads(read_metrics) for _, data in non_index_metrics.iterrows(): table_data["Non-Indexed"] = data.to_dict() @@ -505,10 +506,10 @@ def _parse_read_summary( return table_data -def _parse_lane_summary(data: pd.DataFrame) -> Dict: +def _parse_lane_summary(data: pd.DataFrame) -> dict: """Parse lane summary DataFrame into dict format.""" lanes = data.groupby("Lane") - table_data: Dict = {} + table_data: dict = {} for lane, reads in lanes: reads_dict = _parse_reads(reads, key_prefix=f"Lane {lane}") @@ -517,9 +518,9 @@ def _parse_lane_summary(data: pd.DataFrame) -> Dict: return table_data -def _parse_reads(reads_df: pd.DataFrame, key_prefix: Optional[str] = None) -> Dict: +def _parse_reads(reads_df: pd.DataFrame, key_prefix: Optional[str] = None) -> dict: """Utility function to parse a reads DataFrame to dict.""" - reads_dict: Dict = {} + reads_dict: dict = {} reads_df = reads_df.set_index("ReadNumber") for read, data in reads_df.iterrows(): @@ -531,14 +532,14 @@ def _parse_reads(reads_df: pd.DataFrame, key_prefix: Optional[str] = None) -> Di return reads_dict -def _parse_imaging_table(data: pd.DataFrame) -> Dict: +def _parse_imaging_table(data: pd.DataFrame) -> dict: """Parse imaging table DataFrame for intensity and occupancy plots.""" cscale = mqc_colour.mqc_colour_scale() colors = cscale.get_colours("Dark2") per_lane = data.groupby("Lane") - occ_pf: Dict = {} - intensity_cycle: Dict = {} + occ_pf: dict = {} + intensity_cycle: dict = {} for lane, lane_data in per_lane: lane_int = None @@ -591,10 +592,10 @@ def _parse_imaging_table(data: pd.DataFrame) -> Dict: return {"intensity_cycle": intensity_cycle, "occ_vs_pf": occ_pf} -def _clusters_lane_plot(data: Dict): +def _clusters_lane_plot(data: dict) -> Any: """Generate clusters/reads per lane bar plot.""" - cluster_data: Dict = {} - read_data: Dict = {} + cluster_data: dict = {} + read_data: dict = {} for value in data.values(): lane = int(value["Lane"]) @@ -632,9 +633,9 @@ def _clusters_lane_plot(data: Dict): ) -def _intensity_cycle_plot(data: Dict): +def _intensity_cycle_plot(data: dict) -> Any: """Generate intensity per cycle line plot.""" - key_color_dict: Dict = {} + key_color_dict: dict = {} for key in data: if re.match(r"\w+/red", key, re.IGNORECASE): @@ -662,7 +663,7 @@ def _intensity_cycle_plot(data: Dict): ) -def _occ_vs_pf_plot(data: Dict): +def _occ_vs_pf_plot(data: dict) -> Any: """Generate % PF vs % Occupied scatter plot.""" return scatter.plot( data, diff --git a/multiqc_sav/multiqc_sav.py b/multiqc_sav/multiqc_sav.py index 6c0c521..9512f68 100644 --- a/multiqc_sav/multiqc_sav.py +++ b/multiqc_sav/multiqc_sav.py @@ -6,11 +6,11 @@ log = logging.getLogger("multiqc") -def sav_execution_start(): +def sav_execution_start() -> None: # Plugin's version number defined in pyproject.toml: version = importlib_metadata.version("multiqc_sav") - log.debug("Running MultiQC SAV Plugin v{}".format(version)) + log.debug(f"Running MultiQC SAV Plugin v{version}") log.debug("SAV - Updating config") # Add module to module order @@ -30,9 +30,10 @@ def sav_execution_start(): update_dict(config.sp, {"bclconvert/runinfo": {"fn": "RunInfo.xml", "shared": True}}) # Set SAV file search patterns update_dict( - config.sp, { + config.sp, + { "SAV/RunInfo": {"fn": "RunInfo.xml", "shared": True}, "SAV/RunParameters": {"fn": "RunParameters.xml", "shared": True}, - "SAV/InterOp": {"fn_re": "InterOp/.*\\.bin"} - } + "SAV/InterOp": {"fn_re": "InterOp/.*\\.bin"}, + }, ) diff --git a/pyproject.toml b/pyproject.toml index 59a45d6..f1504a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,8 +31,8 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Bio-Informatics", "Topic :: Scientific/Engineering :: Visualization", -] -requires-python = ">=3.9,<=3.13" +] +requires-python = ">=3.9" dependencies = [ "multiqc>=1.25", "pandas", @@ -40,9 +40,18 @@ dependencies = [ "interop>=1.7.0,<2", ] +[project.optional-dependencies] +dev = [ + "pre-commit", + "ruff", + "mypy", + "pytest", +] + [project.urls] Homepage = "https://github.com/MultiQC/MultiQC_SAV" Repository = "https://github.com/MultiQC/MultiQC_SAV" +Issues = "https://github.com/MultiQC/MultiQC_SAV/issues" [project.entry-points."multiqc.modules.v1"] SAV = "multiqc_sav.modules.sav.sav:SAVModule" @@ -57,9 +66,12 @@ include = ["multiqc_sav*"] line-length = 120 [tool.ruff.lint] -select = ["E", "F", "W", "I"] +select = ["E", "F", "W", "I", "UP", "SIM"] ignore = ["E501"] +[tool.mypy] +ignore_missing_imports = true + [tool.pixi.workspace] channels = ["conda-forge","bioconda"] platforms = ["linux-64"]