diff --git a/label_studio_ml/examples/segment_anything_2_image/README.md b/label_studio_ml/examples/segment_anything_2_image/README.md index 97e97b3d..9548668b 100644 --- a/label_studio_ml/examples/segment_anything_2_image/README.md +++ b/label_studio_ml/examples/segment_anything_2_image/README.md @@ -133,16 +133,27 @@ cd label_studio_ml/examples/segment_anything_2_image pip install -r requirements.txt ``` -2. Download [`segment-anything-2` repo](https://github.com/facebookresearch/segment-anything-2) into the root directory. Install SegmentAnything model and download checkpoints using [the official Meta documentation](https://github.com/facebookresearch/segment-anything-2?tab=readme-ov-file#installation) - +2. Download [`segment-anything-2` repo](https://github.com/facebookresearch/sam2) into the root directory. Install SegmentAnything model and download checkpoints using [the official Meta documentation](https://github.com/facebookresearch/sam2?tab=readme-ov-file#installation) +You should now have the following folder structure: + + | root directory + | label-studio-ml-backend + | label-studio-ml + | examples + | segment_anythng_2_image + | sam2 + | sam2 + | checkpoints 3. Then you can start the ML backend on the default port `9090`: ```bash -cd ../ -label-studio-ml start ./segment_anything_2_image +cd ~/sam2 +label-studio-ml start ../label-studio-ml-backend/label_studio_ml/examples/segment_anything_2_image ``` +Due to breaking changes from Meta [HERE](https://github.com/facebookresearch/sam2/blob/c2ec8e14a185632b0a5d8b161928ceb50197eddc/sam2/build_sam.py#L20), it is CRUCIAL that you run this command from the sam2 directory at your root directory. + 4. Connect running ML backend server to Label Studio: go to your project `Settings -> Machine Learning -> Add Model` and specify `http://localhost:9090` as a URL. Read more in the official [Label Studio documentation](https://labelstud.io/guide/ml#Connect-the-model-to-Label-Studio). ## Running with Docker (coming soon) diff --git a/label_studio_ml/examples/segment_anything_2_image/model.py b/label_studio_ml/examples/segment_anything_2_image/model.py index 9d29c9f1..86118e4b 100644 --- a/label_studio_ml/examples/segment_anything_2_image/model.py +++ b/label_studio_ml/examples/segment_anything_2_image/model.py @@ -1,6 +1,7 @@ import torch import numpy as np import os +import sys import pathlib from typing import List, Dict, Optional from uuid import uuid4 @@ -9,14 +10,16 @@ from label_studio_sdk.converter import brush from label_studio_sdk._extensions.label_studio_tools.core.utils.io import get_local_path from PIL import Image + +ROOT_DIR = os.getcwd() +sys.path.insert(0, ROOT_DIR) from sam2.build_sam import build_sam2 from sam2.sam2_image_predictor import SAM2ImagePredictor DEVICE = os.getenv('DEVICE', 'cuda') -SEGMENT_ANYTHING_2_REPO_PATH = os.getenv('SEGMENT_ANYTHING_2_REPO_PATH', 'segment-anything-2') -MODEL_CONFIG = os.getenv('MODEL_CONFIG', 'sam2_hiera_l.yaml') -MODEL_CHECKPOINT = os.getenv('MODEL_CHECKPOINT', 'sam2_hiera_large.pt') +MODEL_CONFIG = os.getenv('MODEL_CONFIG', 'configs/sam2.1/sam2.1_hiera_l.yaml') +MODEL_CHECKPOINT = os.getenv('MODEL_CHECKPOINT', 'sam2.1_hiera_large.pt') if DEVICE == 'cuda': # use bfloat16 for the entire notebook @@ -29,7 +32,7 @@ # build path to the model checkpoint -sam2_checkpoint = str(pathlib.Path(__file__).parent / SEGMENT_ANYTHING_2_REPO_PATH / "checkpoints" / MODEL_CHECKPOINT) +sam2_checkpoint = str(os.path.join(ROOT_DIR, "checkpoints", MODEL_CHECKPOINT)) sam2_model = build_sam2(MODEL_CONFIG, sam2_checkpoint, device=DEVICE)