Skip to content

Commit f464693

Browse files
bump PT 2.8 & install with --upgrade-strategy greedy (#2107)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent fe3ec59 commit f464693

20 files changed

+62
-53
lines changed

.azure/gpu-test.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,13 @@ jobs:
2929
HF_HOME: "/var/tmp/hf/home"
3030
HF_HUB_CACHE: "/var/tmp/hf/hub"
3131
SKIP_WITH_CI: "1"
32+
SKIP_WITH_GPT_CI: "1"
3233
NCCL_DEBUG: "INFO"
34+
TORCHDYNAMO_VERBOSE: "1"
35+
TORCHDYNAMO_INLINE_INBUILT_NN_MODULES: "0"
3336
PYTHON_VERSION: "3.10"
3437
CUDA_VERSION: "12.6.3"
35-
TORCH_VERSION: "2.7.1"
38+
TORCH_VERSION: "2.8.0"
3639
CUDNN_FRONTEND_VERSION: "1.10.0"
3740
CUBLAS_WORKSPACE_CONFIG: ":4096:8"
3841
container:
@@ -64,7 +67,7 @@ jobs:
6467
6568
- script: |
6669
pip install --upgrade pip
67-
pip install '.[extra,test]' "torch==${TORCH_VERSION}" cffi -U
70+
pip install '.[extra,test]' "torch==${TORCH_VERSION}" cffi -U --upgrade-strategy eager
6871
displayName: "Install package & dependencies"
6972
7073
- script: |
@@ -95,7 +98,6 @@ jobs:
9598
PL_RUN_STANDALONE_TESTS: "1"
9699
# NUM_PARALLEL_TESTS: "10"
97100
NCCL_IGNORE_DISABLED_P2P: "1"
98-
NCCL_DEBUG: "INFO"
99101
timeoutInMinutes: "10"
100102
101103
- bash: |
@@ -110,6 +112,4 @@ jobs:
110112
RUN_ONLY_CUDA_TESTS=0 pytest tests/ext_thunder/test_thunder_networks.py -v --durations=50
111113
displayName: "Extra tests for Thunder [main branch]"
112114
condition: eq(variables['dependency'], 'compiler')
113-
env:
114-
TORCHDYNAMO_VERBOSE: "1"
115115
timeoutInMinutes: "10"

.github/workflows/cpu-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ jobs:
119119
python -m lightning_utilities.cli requirements set-oldest --req_files=pyproject.toml
120120
- name: Install dependencies
121121
run: |
122-
pip install '.[extra,compiler,test]' -U
122+
pip install '.[extra,compiler,test]' -U --upgrade-strategy eager
123123
pip list
124124
125125
- name: Run tests

litgpt/data/alpaca.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,12 @@
77
from typing import Optional, Union
88

99
import torch
10-
from lightning_utilities.core.imports import RequirementCache
1110
from torch.utils.data import DataLoader, random_split
1211

1312
from litgpt.data.base import DataModule, SFTDataset, get_sft_collate_fn
1413
from litgpt.prompts import PromptStyle
1514
from litgpt.tokenizer import Tokenizer
15+
from litgpt.utils import _REQUESTS_AVAILABLE
1616

1717
_URL = "https://raw.githubusercontent.com/tloen/alpaca-lora/main/alpaca_data_cleaned_archive.json"
1818

@@ -115,9 +115,8 @@ def download_if_missing(file_path: Path, file_url: str, mode: str = "w", stream:
115115
"""Downloads the raw json data file and saves it in the given destination."""
116116
if file_path.exists() and file_path.stat().st_size > 0:
117117
return
118-
requests_available = RequirementCache("requests")
119-
if not requests_available:
120-
raise ModuleNotFoundError(str(requests_available))
118+
if not _REQUESTS_AVAILABLE:
119+
raise ModuleNotFoundError(str(_REQUESTS_AVAILABLE))
121120
import requests
122121

123122
response = requests.get(file_url, stream=stream)

litgpt/data/prepare_starcoder.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,9 @@
55
import traceback
66
from pathlib import Path
77

8-
from lightning_utilities.core.imports import RequirementCache
9-
108
from litgpt.tokenizer import Tokenizer
11-
from litgpt.utils import CLI, extend_checkpoint_dir
9+
from litgpt.utils import _LITDATA_AVAILABLE, CLI, extend_checkpoint_dir
1210

13-
_LITDATA_AVAILABLE = RequirementCache("litdata")
1411
if _LITDATA_AVAILABLE:
1512
from litdata.processing.data_processor import DataChunkRecipe
1613
else:

litgpt/deploy/serve.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,10 @@
66
from typing import Any, Dict, Literal, Optional
77

88
import torch
9-
from lightning_utilities.core.imports import RequirementCache
109

1110
from litgpt.api import LLM
12-
from litgpt.utils import auto_download_checkpoint
11+
from litgpt.utils import _JINJA2_AVAILABLE, _LITSERVE_AVAILABLE, auto_download_checkpoint
1312

14-
_LITSERVE_AVAILABLE = RequirementCache("litserve")
15-
_JINJA2_AVAILABLE = RequirementCache("jinja2")
1613
if _LITSERVE_AVAILABLE:
1714
from litserve import LitAPI, LitServer
1815
from litserve.specs.openai import ChatCompletionRequest, OpenAISpec

litgpt/finetune/adapter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from lightning.fabric.plugins import BitsandbytesPrecision
1414
from lightning.fabric.strategies import FSDPStrategy
1515
from lightning.fabric.utilities import ThroughputMonitor
16-
from lightning_utilities.core.imports import RequirementCache
1716
from torch.utils.data import ConcatDataset, DataLoader
1817
from torchmetrics import RunningMean
1918

@@ -24,6 +23,7 @@
2423
from litgpt.prompts import save_prompt_style
2524
from litgpt.tokenizer import Tokenizer
2625
from litgpt.utils import (
26+
_BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0,
2727
CycleIterator,
2828
auto_download_checkpoint,
2929
check_nvlink_connectivity,
@@ -108,7 +108,7 @@ def setup(
108108
if quantize is not None and quantize.startswith("bnb."):
109109
if "mixed" in precision:
110110
raise ValueError("Quantization and mixed precision is not supported.")
111-
if RequirementCache("bitsandbytes != 0.42.0"):
111+
if _BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0:
112112
warnings.warn(
113113
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
114114
)

litgpt/finetune/adapter_v2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from lightning.fabric.plugins import BitsandbytesPrecision
1414
from lightning.fabric.strategies import FSDPStrategy
1515
from lightning.fabric.utilities import ThroughputMonitor
16-
from lightning_utilities.core.imports import RequirementCache
1716
from torch.utils.data import ConcatDataset, DataLoader
1817
from torchmetrics import RunningMean
1918

@@ -24,6 +23,7 @@
2423
from litgpt.prompts import save_prompt_style
2524
from litgpt.tokenizer import Tokenizer
2625
from litgpt.utils import (
26+
_BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0,
2727
CycleIterator,
2828
auto_download_checkpoint,
2929
check_nvlink_connectivity,
@@ -110,7 +110,7 @@ def setup(
110110
if quantize is not None and quantize.startswith("bnb."):
111111
if "mixed" in precision:
112112
raise ValueError("Quantization and mixed precision is not supported.")
113-
if RequirementCache("bitsandbytes != 0.42.0"):
113+
if _BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0:
114114
warnings.warn(
115115
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
116116
)

litgpt/finetune/lora.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from lightning.fabric.plugins import BitsandbytesPrecision
1414
from lightning.fabric.strategies import ModelParallelStrategy
1515
from lightning.fabric.utilities import ThroughputMonitor
16-
from lightning_utilities.core.imports import RequirementCache
1716
from torch.utils.data import ConcatDataset, DataLoader
1817
from torchmetrics import RunningMean
1918

@@ -25,6 +24,7 @@
2524
from litgpt.scripts.merge_lora import merge_lora
2625
from litgpt.tokenizer import Tokenizer
2726
from litgpt.utils import (
27+
_BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0,
2828
CycleIterator,
2929
auto_download_checkpoint,
3030
check_nvlink_connectivity,
@@ -140,7 +140,7 @@ def setup(
140140
if quantize is not None and quantize.startswith("bnb."):
141141
if "mixed" in precision:
142142
raise ValueError("Quantization and mixed precision is not supported.")
143-
if RequirementCache("bitsandbytes != 0.42.0"):
143+
if _BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0:
144144
warnings.warn(
145145
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
146146
)

litgpt/finetune/lora_legacy.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
from lightning.fabric.plugins import BitsandbytesPrecision
1414
from lightning.fabric.strategies import FSDPStrategy
1515
from lightning.fabric.utilities import ThroughputMonitor
16-
from lightning_utilities.core.imports import RequirementCache
1716
from torch.utils.data import ConcatDataset, DataLoader
1817
from torchmetrics import RunningMean
1918

@@ -25,6 +24,7 @@
2524
from litgpt.scripts.merge_lora import merge_lora
2625
from litgpt.tokenizer import Tokenizer
2726
from litgpt.utils import (
27+
_BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0,
2828
CycleIterator,
2929
auto_download_checkpoint,
3030
check_nvlink_connectivity,
@@ -139,7 +139,7 @@ def setup(
139139
if quantize is not None and quantize.startswith("bnb."):
140140
if "mixed" in precision:
141141
raise ValueError("Quantization and mixed precision is not supported.")
142-
if RequirementCache("bitsandbytes != 0.42.0"):
142+
if _BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0:
143143
warnings.warn(
144144
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
145145
)

litgpt/generate/adapter.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,13 @@
1010
import lightning as L
1111
import torch
1212
from lightning.fabric.plugins import BitsandbytesPrecision
13-
from lightning_utilities.core.imports import RequirementCache
1413

1514
from litgpt import PromptStyle, Tokenizer
1615
from litgpt.adapter import GPT, Config
1716
from litgpt.generate.base import generate
1817
from litgpt.prompts import has_prompt_style, load_prompt_style
1918
from litgpt.utils import (
19+
_BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0,
2020
check_file_size_on_cpu_and_warn,
2121
check_valid_checkpoint_dir,
2222
extend_checkpoint_dir,
@@ -83,7 +83,7 @@ def main(
8383
if quantize is not None and quantize.startswith("bnb."):
8484
if "mixed" in precision:
8585
raise ValueError("Quantization and mixed precision is not supported.")
86-
if RequirementCache("bitsandbytes != 0.42.0"):
86+
if _BITANDBYTES_AVAILABLE_NOT_EQUAL_0_42_0:
8787
warnings.warn(
8888
"LitGPT only supports bitsandbytes v0.42.0. This may result in errors when using quantization."
8989
)

0 commit comments

Comments
 (0)