diff --git a/README.md b/README.md index 6628674bb..493ddd396 100644 --- a/README.md +++ b/README.md @@ -33,179 +33,17 @@ This repository includes external repositories as Git submodules, so, unless you git submodule update --init --recursive ``` -### Create a Conda Environment +GTSfM supports two installation methods. Choose the one that best fits your workflow: -To run GTSfM, first, we need to create a conda environment with the required dependencies. +### Option 1: Conda Setup (Recommended for most users) -[Install MiniConda](https://www.anaconda.com/docs/getting-started/miniconda/install) if needed, then: +For detailed Conda installation instructions, see **[conda-setup.md](docs/setup/conda-setup.md)** -On **Linux**, with CUDA support, run: -```bash -conda env create -f environment_linux.yml -conda activate gtsfm-v1 # you may need "source activate gtsfm-v1" depending upon your bash and conda set-up -``` -Check your cuda version then install `torch_scatter` from pre-built wheels - -For example, for CUDA 12.1 → use cu121 -```bash -pip install torch-scatter -f https://data.pyg.org/whl/torch-2.5.1+cu121.html -``` - -To use PACE, use the same linux installation `environment_linux.yml` - -Then add `dask-cuda` -```bash -conda install -c rapidsai -c conda-forge dask-cuda -``` - - -On **macOS**, there is no CUDA support, so run: - -```bash -conda env create -f environment_mac.yml -conda activate gtsfm-v1 -``` - -### Install `gtsfm` as a module - -Now, install `gtsfm` as a module: - -```bash -pip install -e . -``` - -Make sure that you can run `python -c "import gtsfm; import gtsam; print('hello world')"` in python, and you are good to go! - - -#### Set Up GTSfM with UV - -**Basic Installation:** -Navigate to the GTSfM directory: - -```bash -cd path/to/gtsfm - -# Clean existing environment (if any) -rm -rf .venv - -# Install on Linux for CPU only and macOS -uv sync --python 3.10 - -# Install on Linux with CUDA GPU -uv sync --python 3.10 --extra complete - -# Install torch-scatter (platform-specific) -# For Linux (CUDA 12.1): -uv pip install torch-scatter --find-links https://data.pyg.org/whl/torch-2.5.1+cu121.html - -``` - -**Multi-GPU Installation (For Distributed Computing):** - -If you have multiple GPUs on the same machine and want to use Dask for distributed GPU computing: - -```bash -# Multiple GPUs per node (e.g., 4x or 8x A100) -uv sync --python 3.10 --extra complete --extra multi-gpu -``` - -This adds `dask-cuda` for GPU-aware distributed scheduling. - -**When do you need `--extra multi-gpu`?** -- You have multiple GPUs on the same machine -- You want to use Dask to distribute work across GPUs -- You're running on a GPU cluster node - -**When you DON'T need it:** -- Single GPU workstation -- Laptop with one GPU -- CPU-only machines -- Multiple machines (handled differently) +### Option 2: UV Setup (Fast alternative package manager) -**Add System Level Package** -```bash -# Linux -sudo apt-get install nodejs npm -sudo apt-get install graphviz -# macOS -brew install node -brew install graphviz -``` - -**Verify installation** -```bash -uv run python -c "import gtsfm; import pydegensac; import torch; import torch_scatter; print('✅ Success!')" -``` - -#### Go to test using UV -```bash -uv run ./run --dataset_dir tests/data/set1_lund_door \ ---config_name unified_binary.yaml \ ---loader olsson \ ---num_workers 2 graph_partitioner.max_depth=1 -``` - -```bash -uv run .github/scripts/execute_single_benchmark.sh skydio-8 lightglue 15 colmap-loader 760 true -``` - -#### Managing Packages with UV - -**Adding a new package:** -```bash -uv add -# Example: uv add numpy -``` - -**Adding a development dependency:** -```bash -uv add --dev -# Example: uv add --dev pytest -``` - -**Removing a package:** -```bash -uv remove -# Example: uv remove numpy -``` - -**Installing a package without adding to dependencies:** -```bash -uv pip install -``` - -#### When to Use `uv lock` - -The `uv lock` command updates the lock file (`uv.lock`) without installing packages. Use it when: - -- **After manually editing `pyproject.toml`:** When you directly modify dependencies in the configuration file -- **To update dependencies:** When you want to resolve and lock new versions without installing -- **In CI/CD pipelines:** To ensure reproducible builds by generating a lock file -- **Before committing changes:** To update the lock file for team members - -```bash -# Update lock file after editing pyproject.toml -uv lock - -# Update lock file and install packages -uv lock && uv sync -``` - -> **Note:** `uv add` and `uv remove` automatically update the lock file, so you typically don't need to run `uv lock` manually after these commands. - -#### Running Commands with UV - -Once set up, prefix Python commands with `uv run`: - -```bash -# Run GTSfM -uv run python -m gtsfm.runner --config_name deep_front_end.yaml - -# Or use the run script -uv run ./run --dataset_dir tests/data/set1_lund_door --config_name deep_front_end.yaml -``` +For detailed UV installation instructions, see **[uv-setup.md](docs/setup/uv-setup.md)** -Make sure that you can run `python -c "import gtsfm; import gtsam; print('hello world')"` in python, and you are good to go! +Both methods will allow you to run GTSfM successfully. ## Try It on Google Colab @@ -320,7 +158,7 @@ Refer to the detailed guide: ### Running GTSFM on a Multi-Machine Cluster For users who want to run GTSFM on a **cluster of multiple machines**, follow the setup instructions here: -📄 [CLUSTER.md](https://github.com/borglab/gtsfm/tree/master/CLUSTER.md) +📄 [CLUSTER.md](https://github.com/borglab/gtsfm/tree/master/docs/deployment/CLUSTER.md) ### Where Are the Results Stored? @@ -422,7 +260,7 @@ GTSfM is designed in a modular way. Each module can be swapped out with a new on ## Contributing -Contributions are always welcome! Please be aware of our [contribution guidelines for this project](CONTRIBUTING.md). +Contributions are always welcome! Please be aware of our [contribution guidelines for this project](docs/CONTRIBUTING.md). ## Citing this work diff --git a/CONTRIBUTING.md b/docs/CONTRIBUTING.md similarity index 100% rename from CONTRIBUTING.md rename to docs/CONTRIBUTING.md diff --git a/CLUSTER.md b/docs/deployment/CLUSTER.md similarity index 100% rename from CLUSTER.md rename to docs/deployment/CLUSTER.md diff --git a/docs/setup/conda-setup.md b/docs/setup/conda-setup.md new file mode 100644 index 000000000..abce2b651 --- /dev/null +++ b/docs/setup/conda-setup.md @@ -0,0 +1,51 @@ +# Conda Setup Guide + +This guide covers installation of GTSfM using Conda package manager. + +## Prerequisites + +- [MiniConda](https://www.anaconda.com/docs/getting-started/miniconda/install) installed on your system + +## Create a Conda Environment + +To run GTSfM, first, we need to create a conda environment with the required dependencies. + +### Linux (with CUDA support) + +```bash +conda env create -f environment_linux.yml +conda activate gtsfm-v1 # you may need "source activate gtsfm-v1" depending upon your bash and conda set-up +``` + +Check your cuda version then install `torch_scatter` from pre-built wheels + +For example, for CUDA 12.1, use cu121 +```bash +pip install torch-scatter -f https://data.pyg.org/whl/torch-2.5.1+cu121.html +``` + +For CUDA 13, use cu128 +```bash +pip install torch-scatter --find-links https://data.pyg.org/whl/torch-2.7.0+cu128.html +``` + +### macOS (no CUDA support) + +```bash +conda env create -f environment_mac.yml +conda activate gtsfm-v1 +``` + +## Install `gtsfm` as a module + +Now, install `gtsfm` as a module: + +```bash +pip install -e . +``` + +## Verify Installation + + +Make sure that you can run `python -c "import gtsfm; import gtsam; print('hello world')"` in python, and you are good to go! + diff --git a/docs/setup/uv-setup.md b/docs/setup/uv-setup.md new file mode 100644 index 000000000..cfb5ca7e0 --- /dev/null +++ b/docs/setup/uv-setup.md @@ -0,0 +1,176 @@ +# UV Setup Guide + +This guide covers installation of GTSfM using UV, a fast Python package manager. + +## Install UV + +```bash +curl -LsSf https://astral.sh/uv/install.sh +``` + +## Install System Packages + +Before setting up the Python environment, install required system packages: + +### Linux +```bash +sudo apt-get install nodejs npm graphviz +``` + +### macOS +```bash +brew install node graphviz +``` + +## Basic Installation + +Navigate to the GTSfM directory: + +```bash +cd path/to/gtsfm + +# Clean existing environment (if any) +rm -rf .venv + +# Install on Linux for CPU only and macOS +uv sync --python 3.10 + +# Install on Linux with CUDA GPU +uv sync --python 3.10 --extra complete +``` +## Install torch-scatter (platform-specific) +For NVIDIA drivers 550+ (which support CUDA 12.8), +use cu128 because that's what PyTorch 2.7.0 was compiled with. +PyTorch brings its own CUDA runtime, system CUDA version doesn't matter. + +```bash +uv pip install torch-scatter --find-links https://data.pyg.org/whl/torch-2.7.0+cu128.html + +``` + +## Multi-GPU Installation (For Distributed Computing) + +If you have multiple GPUs on the same machine and want to use Dask for distributed GPU computing: + +```bash +# Multiple GPUs per node (e.g., 4x or 8x A100) +uv sync --python 3.10 --extra complete --extra multi-gpu +``` + +This adds `dask-cuda` for GPU-aware distributed scheduling. + +### When do you need `--extra multi-gpu`? +- You have multiple GPUs on the same machine +- You want to use Dask to distribute work across GPUs +- You're running on a GPU cluster node + +### When you DON'T need it: +- Single GPU workstation +- Laptop with one GPU +- CPU-only machines +- Multiple machines (handled differently) + +## Verify Installation + +```bash +uv run python -c "import gtsfm; import pydegensac; import torch; import torch_scatter; print('✅ Success')" +``` + +## Quick Test + +Test the installation with a sample dataset: + +```bash +uv run ./run --dataset_dir tests/data/set1_lund_door \ +--config_name unified_binary.yaml \ +--loader olsson \ +--num_workers 2 graph_partitioner.max_depth=1 +``` + +Or run a benchmark: + +```bash +uv run .github/scripts/execute_single_benchmark.sh skydio-8 lightglue 15 colmap-loader 760 true +``` + +## Managing Packages with UV + +### Adding a new package: +```bash +uv add +# Example: uv add numpy +``` + +### Adding a development dependency: +```bash +uv add --dev +# Example: uv add --dev pytest +``` + +### Removing a package: +```bash +uv remove +# Example: uv remove numpy +``` + +### Installing a package without adding to dependencies: +```bash +uv pip install +``` + +## When to Use `uv lock` + +The `uv lock` command updates the lock file (`uv.lock`) without installing packages. Use it when: + +- **After manually editing `pyproject.toml`:** When you directly modify dependencies in the configuration file +- **To update dependencies:** When you want to resolve and lock new versions without installing +- **In CI/CD pipelines:** To ensure reproducible builds by generating a lock file +- **Before committing changes:** To update the lock file for team members + +```bash +# Update lock file after editing pyproject.toml +uv lock + +# Update lock file and install packages +uv lock && uv sync +``` + +> **Note:** `uv add` and `uv remove` automatically update the lock file, so you typically don't need to run `uv lock` manually after these commands. + +## Running Commands with UV + +You have two options for running Python commands with UV: + +### Option 1: Activate the Virtual Environment + +Activate the virtual environment once per shell session: + +**Linux/macOS:** +```bash +source .venv/bin/activate +``` + +After activation, you can run commands directly without the `uv run` prefix: + +```bash +# Verify installation +python -c "import gtsfm; import gtsam; print('hello world')" + +``` + +To deactivate the environment: +```bash +deactivate +``` + +### Option 2: Use `uv run` Prefix + +Prefix Python commands with `uv run` (no activation needed): + +```bash +# Verify installation +uv run python -c "import gtsfm; import gtsam; print('hello world')" + +``` + + diff --git a/pyproject.toml b/pyproject.toml index f99839e8c..227f3ec8e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,8 +68,8 @@ dependencies = [ "wget", # PyTorch (sources configured below for GPU/CPU selection) - "torch==2.5.1", - "torchvision==0.20.1", + "torch>=2.5.0,<2.8.0", + "torchvision>=0.20.0,<0.23.0", # Computer vision "opencv-python>=4.5.4.60", @@ -112,7 +112,7 @@ dependencies = [ # ============================================================================ [project.optional-dependencies] complete = [ - "xformers==0.0.29 ; sys_platform == 'linux'", + "xformers==0.0.30 ; sys_platform == 'linux'", ] # Multi-GPU support (only needed for distributed computing with multiple GPUs per node) @@ -123,7 +123,7 @@ multi-gpu = [ # All optional features combined (for power users and clusters) all = [ - "xformers==0.0.29 ; sys_platform == 'linux'", + "xformers==0.0.30 ; sys_platform == 'linux'", "dask-cuda>=24.8.0 ; sys_platform == 'linux'", ] @@ -149,13 +149,13 @@ dev = [ # Dependency Sources # ============================================================================ [tool.uv.sources] -# PyTorch: Use CUDA 12.1 on Linux, CPU on macOS +# PyTorch: Use CUDA 12.8 on Linux (for Blackwell support), CPU on macOS torch = [ - { index = "pytorch-cu121", marker = "sys_platform == 'linux'" }, + { index = "pytorch-cu128", marker = "sys_platform == 'linux'" }, { index = "pytorch-cpu", marker = "sys_platform == 'darwin'" }, ] torchvision = [ - { index = "pytorch-cu121", marker = "sys_platform == 'linux'" }, + { index = "pytorch-cu128", marker = "sys_platform == 'linux'" }, { index = "pytorch-cpu", marker = "sys_platform == 'darwin'" }, ] @@ -172,6 +172,11 @@ name = "pytorch-cu121" url = "https://download.pytorch.org/whl/cu121" explicit = true +[[tool.uv.index]] +name = "pytorch-cu128" +url = "https://download.pytorch.org/whl/cu128" +explicit = true + # ============================================================================ # Build System # ============================================================================