Skip to content

Commit 2d6ff00

Browse files
committed
Fixed errors
Signed-off-by: romit <[email protected]>
1 parent 8abb968 commit 2d6ff00

File tree

5 files changed

+33
-9
lines changed

5 files changed

+33
-9
lines changed

tests/acceleration/test_acceleration_framework.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def test_framework_raises_if_used_with_missing_package():
266266
copy.deepcopy(MODEL_ARGS),
267267
copy.deepcopy(DATA_ARGS),
268268
copy.deepcopy(TRAIN_ARGS),
269-
PEFT_LORA_ARGS,
269+
copy.deepcopy(PEFT_LORA_ARGS),
270270
quantized_lora_config=quantized_lora_config,
271271
)
272272

@@ -322,7 +322,7 @@ def test_framework_raises_due_to_invalid_arguments(
322322
model_args,
323323
copy.deepcopy(DATA_ARGS),
324324
train_args,
325-
peft_config,
325+
copy.deepcopy(peft_config),
326326
quantized_lora_config=quantized_lora_config,
327327
)
328328

@@ -379,7 +379,7 @@ def test_framework_initialized_properly_peft(
379379
train_args = copy.deepcopy(TRAIN_ARGS)
380380
train_args.output_dir = tempdir
381381
train_args.save_strategy = "no"
382-
train_args.fp16 = True
382+
train_args.bf16 = True
383383
peft_args = copy.deepcopy(PEFT_LORA_ARGS)
384384
peft_args.target_modules = ["q_proj", "k_proj"]
385385

@@ -430,7 +430,7 @@ def test_framework_initialized_properly_foak():
430430
train_args = copy.deepcopy(TRAIN_ARGS)
431431
train_args.output_dir = tempdir
432432
train_args.save_strategy = "no"
433-
train_args.fp16 = True
433+
train_args.bf16 = True
434434
peft_args = copy.deepcopy(PEFT_LORA_ARGS)
435435
peft_args.target_modules = ["q_proj", "k_proj"]
436436

@@ -693,7 +693,6 @@ def test_error_raised_with_fused_lora_enabled_without_quantized_argument():
693693
train_args = copy.deepcopy(TRAIN_ARGS)
694694
train_args.output_dir = tempdir
695695
train_args.save_strategy = "no"
696-
train_args.fp16 = True
697696
peft_args = copy.deepcopy(PEFT_LORA_ARGS)
698697
peft_args.target_modules = ["q_proj", "k_proj"]
699698

tests/artifacts/predefined_data_configs/tokenize_and_apply_input_masking_streaming.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ datasets:
88
data_handlers:
99
- name: tokenize_and_apply_input_masking
1010
arguments:
11-
# remove_columns: all
11+
remove_columns: all
1212
batched: false
1313
fn_kwargs:
1414
input_column_name: input

tests/build/test_utils.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ def test_accelerate_launch_args_user_set_num_processes_ignored(
7272
assert os.getenv("CUDA_VISIBLE_DEVICES") == "0"
7373

7474

75-
@patch.dict(os.environ, {"SET_NUM_PROCESSES_TO_NUM_GPUS": "False"})
76-
def test_accelerate_launch_args_user_set_num_processes(job_config):
75+
def test_accelerate_launch_args_user_set_num_processes(job_config, monkeypatch):
76+
monkeypatch.setenv("SET_NUM_PROCESSES_TO_NUM_GPUS", "False")
7777
job_config_copy = copy.deepcopy(job_config)
7878
job_config_copy["accelerate_launch_args"]["num_processes"] = "3"
7979

@@ -100,7 +100,10 @@ def test_accelerate_launch_args_default_fsdp_config_multigpu(job_config):
100100

101101

102102
@patch("os.path.exists")
103-
def test_process_accelerate_launch_custom_config_file(patch_path_exists):
103+
def test_process_accelerate_launch_custom_config_file(patch_path_exists, monkeypatch):
104+
monkeypatch.setenv("SET_NUM_PROCESSES_TO_NUM_GPUS", "False")
105+
monkeypatch.setenv("CUDA_VISIBLE_DEVICES", "")
106+
104107
patch_path_exists.return_value = True
105108

106109
dummy_config_path = "dummy_fsdp_config.yaml"

tests/utils/test_embedding_resize.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ def _inference(
4747
) -> str:
4848
device = "cuda" if torch.cuda.is_available() else "cpu"
4949
tokenized_input = tokenizer(input_text, return_tensors="pt").to(device)
50+
model = model.to(device)
51+
5052
generated_output = model.generate(
5153
**tokenized_input,
5254
max_new_tokens=max_new_tokens,

tox.ini

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,5 +61,25 @@ deps =
6161
.[aim,mlflow,clearml,scanner-dev,fms-accel-all]
6262
setenv =
6363
CUDA_VISIBLE_DEVICES=0
64+
commands_pre =
65+
pip install --no-build-isolation .[flash-attn]
66+
commands =
67+
pytest tests/acceleration
68+
pytest tests/build
69+
pytest tests/data
70+
pytest tests/trackers
71+
pytest tests/trainercontroller
72+
pytest tests/utils
73+
pytest tests/test_sft_trainer.py
74+
75+
[testenv:gpu]
76+
description = run all unit tests including requring GPU support
77+
deps =
78+
pytest>=7
79+
.[aim,mlflow,clearml,scanner-dev,fms-accel-all]
80+
setenv =
81+
CUDA_VISIBLE_DEVICES=0
82+
commands_pre =
83+
pip install --no-build-isolation .[flash-attn]
6484
commands =
6585
pytest {posargs:tests}

0 commit comments

Comments
 (0)