@@ -107,46 +107,6 @@ def test_finetuning(
107
107
assert model .return_value .to .call_count == 0
108
108
109
109
110
- # @patch("llama_recipes.finetuning.torch.cuda.is_available")
111
- # @patch("llama_recipes.finetuning.train")
112
- # @patch("llama_recipes.finetuning.LlamaForCausalLM.from_pretrained")
113
- # @patch("llama_recipes.finetuning.AutoTokenizer.from_pretrained")
114
- # @patch("llama_recipes.finetuning.get_preprocessed_dataset")
115
- # @patch("llama_recipes.finetuning.generate_peft_config")
116
- # @patch("llama_recipes.finetuning.get_peft_model")
117
- # @patch("llama_recipes.finetuning.optim.AdamW")
118
- # @patch("llama_recipes.finetuning.StepLR")
119
- # @pytest.mark.parametrize("cuda_is_available", [True, False])
120
- # def test_finetuning_peft_lora(
121
- # step_lr,
122
- # optimizer,
123
- # get_peft_model,
124
- # gen_peft_config,
125
- # get_dataset,
126
- # tokenizer,
127
- # get_model,
128
- # train,
129
- # cuda,
130
- # cuda_is_available,
131
- # ):
132
- # kwargs = {"use_peft": True}
133
-
134
- # get_dataset.return_value = get_fake_dataset()
135
- # cuda.return_value = cuda_is_available
136
-
137
- # get_model.return_value.get_input_embeddings.return_value.weight.shape = [0]
138
-
139
- # main(**kwargs)
140
-
141
- # if cuda_is_available:
142
- # assert get_peft_model.return_value.to.call_count == 1
143
- # assert get_peft_model.return_value.to.call_args.args[0] == "cuda"
144
- # else:
145
- # assert get_peft_model.return_value.to.call_count == 0
146
-
147
-
148
-
149
-
150
110
@patch ("llama_recipes.finetuning.get_peft_model" )
151
111
@patch ("llama_recipes.finetuning.setup" )
152
112
@patch ("llama_recipes.finetuning.train" )
0 commit comments