Skip to content

Commit 0ce00ad

Browse files
author
George Ohashi
committed
merge main
1 parent 5edf461 commit 0ce00ad

File tree

9 files changed

+9
-16
lines changed

9 files changed

+9
-16
lines changed

src/llmcompressor/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,6 @@
3838
active_session,
3939
callbacks,
4040
create_session,
41-
finalize,
42-
initialize,
4341
reset_session,
4442
)
45-
from llmcompressor.entrypoints import Oneshot, oneshot
43+
from llmcompressor.entrypoints import Oneshot, oneshot, train

src/llmcompressor/core/__init__.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@
1313
active_session,
1414
callbacks,
1515
create_session,
16-
finalize,
17-
initialize,
1816
reset_session,
1917
)
2018
from llmcompressor.core.state import Data, Hardware, ModifiedState, State
@@ -35,8 +33,6 @@
3533
"create_session",
3634
"active_session",
3735
"reset_session",
38-
"initialize",
39-
"finalize",
4036
"apply",
4137
"callbacks",
4238
"LifecycleCallbacks",

src/llmcompressor/transformers/finetune/session_mixin.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from transformers.trainer_callback import TrainerState
1212
from transformers.trainer_utils import get_last_checkpoint
1313

14-
from llmcompressor.core import active_session, callbacks, create_session, finalize
14+
from llmcompressor.core import active_session, callbacks, create_session
1515
from llmcompressor.metrics import LoggerManager
1616
from llmcompressor.modifiers.distillation.utils.pytorch.model_wrapper import (
1717
KDModelWrapper,
@@ -182,7 +182,7 @@ def finalize_session(self):
182182

183183
with summon_full_params_context(self.model, offload_to_cpu=True):
184184
# in order to update each layer we need to gathers all its parameters
185-
finalize()
185+
active_session().finalize()
186186
logger.info("Finalized LLM Compressor session")
187187
model = get_session_model()
188188
self.model = model

src/llmcompressor/transformers/finetune/text_generation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def oneshot(**kwargs) -> None:
4949

5050
@deprecated(
5151
message=(
52-
"`from llmcompressor.transformers import train` is deprecated, "
52+
"`from llmcompressor import train` is deprecated, "
5353
"please use `from llmcompressor import train`."
5454
)
5555
)

tests/llmcompressor/transformers/finetune/test_finetune_no_recipe_custom_dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
class TestFinetuneNoRecipeCustomDataset(unittest.TestCase):
2020
def _test_finetune_wout_recipe_custom_dataset(self):
21-
from llmcompressor.transformers import train
21+
from llmcompressor import train
2222

2323
dataset_path = Path(tempfile.mkdtemp())
2424

tests/llmcompressor/transformers/finetune/test_finetune_without_recipe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def setUp(self):
2020
self.output = "./finetune_output"
2121

2222
def test_finetune_without_recipe(self):
23-
from llmcompressor.transformers import train
23+
from llmcompressor import train
2424

2525
recipe_str = None
2626
device = "cuda:0"

tests/llmcompressor/transformers/finetune/test_oneshot_then_finetune.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,9 @@
66
from transformers import AutoModelForCausalLM
77
from transformers.utils.quantization_config import CompressedTensorsConfig
88

9-
from llmcompressor import oneshot
9+
from llmcompressor import oneshot, train
1010
from llmcompressor.core import create_session
1111
from llmcompressor.modifiers.quantization import QuantizationModifier
12-
from llmcompressor.transformers import train
1312

1413

1514
@pytest.mark.unit

tests/llmcompressor/transformers/finetune/test_safetensors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def setUp(self):
2222
self.output = Path("./finetune_output")
2323

2424
def test_safetensors(self):
25-
from llmcompressor.transformers import train
25+
from llmcompressor import train
2626

2727
device = "cuda:0"
2828
output_dir = self.output / "output1"

tests/llmcompressor/transformers/test_clear_ml.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
except Exception:
1111
is_clearml = False
1212

13-
from llmcompressor.transformers import train
13+
from llmcompressor import train
1414

1515

1616
@pytest.mark.skipif(not is_clearml, reason="clearML not installed")

0 commit comments

Comments
 (0)