Skip to content

Commit 26dff88

Browse files
committed
Remove old test
1 parent 8075c48 commit 26dff88

File tree

1 file changed

+0
-40
lines changed

1 file changed

+0
-40
lines changed

src/tests/test_finetuning.py

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -107,46 +107,6 @@ def test_finetuning(
107107
assert model.return_value.to.call_count == 0
108108

109109

110-
# @patch("llama_recipes.finetuning.torch.cuda.is_available")
111-
# @patch("llama_recipes.finetuning.train")
112-
# @patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
113-
# @patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
114-
# @patch("llama_recipes.finetuning.get_preprocessed_dataset")
115-
# @patch("llama_recipes.finetuning.generate_peft_config")
116-
# @patch("llama_recipes.finetuning.get_peft_model")
117-
# @patch("llama_recipes.finetuning.optim.AdamW")
118-
# @patch("llama_recipes.finetuning.StepLR")
119-
# @pytest.mark.parametrize("cuda_is_available", [True, False])
120-
# def test_finetuning_peft_lora(
121-
# step_lr,
122-
# optimizer,
123-
# get_peft_model,
124-
# gen_peft_config,
125-
# get_dataset,
126-
# tokenizer,
127-
# get_model,
128-
# train,
129-
# cuda,
130-
# cuda_is_available,
131-
# ):
132-
# kwargs = {"use_peft": True}
133-
134-
# get_dataset.return_value = get_fake_dataset()
135-
# cuda.return_value = cuda_is_available
136-
137-
# get_model.return_value.get_input_embeddings.return_value.weight.shape = [0]
138-
139-
# main(**kwargs)
140-
141-
# if cuda_is_available:
142-
# assert get_peft_model.return_value.to.call_count == 1
143-
# assert get_peft_model.return_value.to.call_args.args[0] == "cuda"
144-
# else:
145-
# assert get_peft_model.return_value.to.call_count == 0
146-
147-
148-
149-
150110
@patch("llama_recipes.finetuning.get_peft_model")
151111
@patch("llama_recipes.finetuning.setup")
152112
@patch("llama_recipes.finetuning.train")

0 commit comments

Comments
 (0)