From 5baf347d52e1544261bbd24ff1aa1dcc212f53eb Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Thu, 20 Nov 2025 21:55:59 +0530 Subject: [PATCH 1/6] adding test cases --- .../tests/finetuning/test_finetuning.py | 411 ++++++++++++------ .../tests/finetuning/test_finetuning_async.py | 2 +- 2 files changed, 286 insertions(+), 127 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index 0c71336b45ea..f5f2e0c25915 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -14,9 +14,16 @@ reason="Skipped because we cannot record network calls with AOAI client", ) class TestFineTuning(TestBase): + + SFT_JOB_TYPE = "sft" + DPO_JOB_TYPE = "dpo" + RFT_JOB_TYPE = "rft" + + STANDARD_TRAINING_TYPE = "Standard" + GLOBAL_STANDARD_TRAINING_TYPE = "GlobalStandard" def _create_sft_finetuning_job( - self, openai_client, train_file_id, validation_file_id, model_type="openai", training_type="Standard" + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" ): """Helper method to create a supervised fine-tuning job.""" return openai_client.fine_tuning.jobs.create( @@ -36,12 +43,12 @@ def _create_sft_finetuning_job( extra_body={"trainingType": training_type}, ) - def _create_dpo_finetuning_job(self, openai_client, train_file_id, validation_file_id): + def _create_dpo_finetuning_job(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): """Helper method to create a DPO fine-tuning job.""" return openai_client.fine_tuning.jobs.create( training_file=train_file_id, validation_file=validation_file_id, - model=self.test_finetuning_params["dpo"]["openai"]["model_name"], + model=self.test_finetuning_params["dpo"][model_type]["model_name"], method={ "type": "dpo", "dpo": { @@ -52,10 +59,10 @@ def _create_dpo_finetuning_job(self, openai_client, train_file_id, validation_fi } }, }, - extra_body={"trainingType": "Standard"}, + extra_body={"trainingType": training_type}, ) - def _create_rft_finetuning_job(self, openai_client, train_file_id, validation_file_id): + def _create_rft_finetuning_job(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): """Helper method to create an RFT fine-tuning job.""" grader = { "name": "Response Quality Grader", @@ -73,7 +80,7 @@ def _create_rft_finetuning_job(self, openai_client, train_file_id, validation_fi return openai_client.fine_tuning.jobs.create( training_file=train_file_id, validation_file=validation_file_id, - model=self.test_finetuning_params["rft"]["openai"]["model_name"], + model=self.test_finetuning_params["rft"][model_type]["model_name"], method={ "type": "reinforcement", "reinforcement": { @@ -88,7 +95,7 @@ def _create_rft_finetuning_job(self, openai_client, train_file_id, validation_fi }, }, }, - extra_body={"trainingType": "Standard"}, + extra_body={"trainingType": training_type}, ) def _upload_test_files(self, openai_client, job_type="sft"): @@ -103,7 +110,7 @@ def _upload_test_files(self, openai_client, job_type="sft"): assert train_processed_file is not None assert train_processed_file.id is not None TestBase.assert_equal_or_not_none(train_processed_file.status, "processed") - print(f"[test_finetuning_{job_type}] Uploaded training file: {train_processed_file.id}") + print(f"[test_finetuning] Uploaded training file: {train_processed_file.id}") with open(validation_file_path, "rb") as f: validation_file = openai_client.files.create(file=f, purpose="fine-tune") @@ -111,173 +118,351 @@ def _upload_test_files(self, openai_client, job_type="sft"): assert validation_processed_file is not None assert validation_processed_file.id is not None TestBase.assert_equal_or_not_none(validation_processed_file.status, "processed") - print(f"[test_finetuning_{job_type}] Uploaded validation file: {validation_processed_file.id}") + print(f"[test_finetuning] Uploaded validation file: {validation_processed_file.id}") return train_processed_file, validation_processed_file - def _cleanup_test_files(self, openai_client, train_file, validation_file, job_type): - """Helper method to clean up uploaded files after testing.""" - openai_client.files.delete(train_file.id) - print(f"[test_finetuning_{job_type}] Deleted training file: {train_file.id}") - - openai_client.files.delete(validation_file.id) - print(f"[test_finetuning_{job_type}] Deleted validation file: {validation_file.id}") - - @servicePreparer() - @recorded_by_proxy - def test_sft_finetuning_create_job(self, **kwargs): + def _cleanup_test_file(self, openai_client, file_id): + """Helper method to clean up uploaded file.""" + openai_client.files.delete(file_id) + print(f"[test_finetuning] Deleted file: {file_id}") + def _test_cancel_job_helper(self, job_type, model_type, training_type, expected_method_type, **kwargs): + """Helper method for testing canceling fine-tuning jobs across different configurations.""" + with self.create_client(**kwargs) as project_client: - with project_client.get_openai_client() as openai_client: + + train_file, validation_file = self._upload_test_files(openai_client, job_type) + + if job_type == self.SFT_JOB_TYPE: + fine_tuning_job = self._create_sft_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + elif job_type == self.DPO_JOB_TYPE: + fine_tuning_job = self._create_dpo_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + elif job_type == self.RFT_JOB_TYPE: + fine_tuning_job = self._create_rft_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + else: + raise ValueError(f"Unsupported job type: {job_type}") + + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Created job: {fine_tuning_job.id}") + + cancelled_job = openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Cancelled job: {cancelled_job.id}") + + # Validate the cancelled job + TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") + TestBase.assert_equal_or_not_none(cancelled_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(cancelled_job.validation_file, validation_file.id) + + # Validate method type + assert cancelled_job.method is not None, f"Method should not be None for {job_type} job" + TestBase.assert_equal_or_not_none(cancelled_job.method.type, expected_method_type) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}") + + # Verify cancellation persisted by retrieving the job + retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}") + TestBase.validate_fine_tuning_job( + retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" + ) + + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) - train_file, validation_file = self._upload_test_files(openai_client, "sft") - - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_sft] Created fine-tuning job: {fine_tuning_job.id}") - + def _test_sft_create_job_helper(self, model_type, training_type, **kwargs): + """Helper method for testing SFT fine-tuning job creation across different configurations.""" + + with self.create_client(**kwargs) as project_client: + with project_client.get_openai_client() as openai_client: + + train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) + + fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_sft_{model_type}_{training_type}] Created fine-tuning job: {fine_tuning_job.id}") + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft] SFT method validation passed - type: {fine_tuning_job.method.type}") - + print(f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}") + + # For OSS models, validate the specific model name + if model_type == "oss": + TestBase.validate_fine_tuning_job( + fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] + ) + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_sft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) + + def _test_dpo_create_job_helper(self, model_type, training_type, **kwargs): + """Helper method for testing DPO fine-tuning job creation across different configurations.""" + + with self.create_client(**kwargs) as project_client: + with project_client.get_openai_client() as openai_client: + + train_file, validation_file = self._upload_test_files(openai_client, self.DPO_JOB_TYPE) + + fine_tuning_job = self._create_dpo_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}") + print(fine_tuning_job) + + TestBase.validate_fine_tuning_job(fine_tuning_job) + TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) + assert fine_tuning_job.method is not None, "Method should not be None for DPO job" + TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") + + print(f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}") + + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_dpo_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) + + def _test_rft_create_job_helper(self, model_type, training_type, **kwargs): + """Helper method for testing RFT fine-tuning job creation across different configurations.""" + + with self.create_client(**kwargs) as project_client: + with project_client.get_openai_client() as openai_client: + + train_file, validation_file = self._upload_test_files(openai_client, self.RFT_JOB_TYPE) + + fine_tuning_job = self._create_rft_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}") + + TestBase.validate_fine_tuning_job(fine_tuning_job) + TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) + assert fine_tuning_job.method is not None, "Method should not be None for RFT job" + TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") + + print(f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}") + + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_rft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) - self._cleanup_test_files(openai_client, train_file, validation_file, "sft") + @servicePreparer() + @recorded_by_proxy + def test_sft_finetuning_create_job_openai_standard(self, **kwargs): + """Test creating SFT fine-tuning job with OpenAI model and Standard training.""" + self._test_sft_create_job_helper("openai", self.STANDARD_TRAINING_TYPE, **kwargs) @servicePreparer() @recorded_by_proxy - def test_finetuning_retrieve_job(self, **kwargs): + def test_sft_finetuning_create_job_openai_globalstandard(self, **kwargs): + """Test creating SFT fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_sft_create_job_helper("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) - with self.create_client(**kwargs) as project_client: + @servicePreparer() + @recorded_by_proxy + def test_sft_finetuning_create_job_oss_globalstandard(self, **kwargs): + """Test creating SFT fine-tuning job with OSS model and GlobalStandard training.""" + self._test_sft_create_job_helper("oss", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) + @servicePreparer() + @recorded_by_proxy + def test_finetuning_retrieve_sft_job(self, **kwargs): + """Test retrieving SFT fine-tuning job.""" + with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, "sft") + train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") + fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_sft] Created job: {fine_tuning_job.id}") retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_sft] Retrieved job: {retrieved_job.id}") + print(f"[test_finetuning_retrieve_sft] Retrieved job: {retrieved_job.id}") TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for SFT job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "supervised") + assert self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_retrieve_sft] Cancelled job: {fine_tuning_job.id}") - self._cleanup_test_files(openai_client, train_file, validation_file, "sft") + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy - def test_finetuning_list_jobs(self, **kwargs): - + def test_finetuning_retrieve_dpo_job(self, **kwargs): + """Test retrieving DPO fine-tuning job.""" with self.create_client(**kwargs) as project_client: - with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, "sft") + train_file, validation_file = self._upload_test_files(openai_client, self.DPO_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") - - jobs_list = list(openai_client.fine_tuning.jobs.list()) - print(f"[test_finetuning_sft] Listed {len(jobs_list)} jobs") + fine_tuning_job = self._create_dpo_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_dpo] Created job: {fine_tuning_job.id}") - assert len(jobs_list) > 0 + retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_dpo] Retrieved job: {retrieved_job.id}") - job_ids = [job.id for job in jobs_list] - assert fine_tuning_job.id in job_ids + TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for DPO job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "dpo") + assert self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_retrieve_dpo] Cancelled job: {fine_tuning_job.id}") - self._cleanup_test_files(openai_client, train_file, validation_file, "sft") + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy - def test_finetuning_cancel_job(self, **kwargs): - + def test_finetuning_retrieve_rft_job(self, **kwargs): + """Test retrieving RFT fine-tuning job.""" with self.create_client(**kwargs) as project_client: - with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, "sft") + train_file, validation_file = self._upload_test_files(openai_client, self.RFT_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") + fine_tuning_job = self._create_rft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_rft] Created job: {fine_tuning_job.id}") - cancelled_job = openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft] Cancelled job: {cancelled_job.id}") + retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_rft] Retrieved job: {retrieved_job.id}") - TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) - TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") + TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for RFT job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "reinforcement") + assert self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" - retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_sft] Verified cancellation persisted for job: {retrieved_job.id}") - TestBase.validate_fine_tuning_job( - retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" - ) + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_rft] Cancelled job: {fine_tuning_job.id}") - self._cleanup_test_files(openai_client, train_file, validation_file, "sft") + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy - def test_dpo_finetuning_create_job(self, **kwargs): + def test_finetuning_list_jobs(self, **kwargs): with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, "dpo") + train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = self._create_dpo_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_dpo] Created DPO fine-tuning job: {fine_tuning_job.id}") - print(fine_tuning_job) + fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") - TestBase.validate_fine_tuning_job(fine_tuning_job) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for DPO job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") + jobs_list = list(openai_client.fine_tuning.jobs.list()) + print(f"[test_finetuning_sft] Listed {len(jobs_list)} jobs") + + assert len(jobs_list) > 0 - print(f"[test_finetuning_dpo] DPO method validation passed - type: {fine_tuning_job.method.type}") + job_ids = [job.id for job in jobs_list] + assert fine_tuning_job.id in job_ids openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_dpo] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") + + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) - self._cleanup_test_files(openai_client, train_file, validation_file, "dpo") + @servicePreparer() @recorded_by_proxy - def test_rft_finetuning_create_job(self, **kwargs): + def test_sft_cancel_job_openai_standard(self, **kwargs): + """Test canceling SFT fine-tuning job with OpenAI model and Standard training.""" + self._test_cancel_job_helper(self.SFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "supervised", **kwargs) - with self.create_client(**kwargs) as project_client: + @servicePreparer() + @recorded_by_proxy + def test_sft_cancel_job_openai_globalstandard(self, **kwargs): + """Test canceling SFT fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_cancel_job_helper(self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) - with project_client.get_openai_client() as openai_client: + @servicePreparer() + @recorded_by_proxy + def test_sft_cancel_job_oss_globalstandard(self, **kwargs): + """Test canceling SFT fine-tuning job with OSS model and GlobalStandard training.""" + self._test_cancel_job_helper(self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) - train_file, validation_file = self._upload_test_files(openai_client, "rft") + @servicePreparer() + @recorded_by_proxy + def test_dpo_cancel_job_openai_standard(self, **kwargs): + """Test canceling DPO fine-tuning job with OpenAI model and Standard training.""" + self._test_cancel_job_helper(self.DPO_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "dpo", **kwargs) - fine_tuning_job = self._create_rft_finetuning_job(openai_client, train_file.id, validation_file.id) - print(f"[test_finetuning_rft] Created RFT fine-tuning job: {fine_tuning_job.id}") + @servicePreparer() + @recorded_by_proxy + def test_dpo_cancel_job_openai_globalstandard(self, **kwargs): + """Test canceling DPO fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_cancel_job_helper(self.DPO_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "dpo", **kwargs) - TestBase.validate_fine_tuning_job(fine_tuning_job) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for RFT job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") + @servicePreparer() + @recorded_by_proxy + def test_rft_cancel_job_openai_standard(self, **kwargs): + """Test canceling RFT fine-tuning job with OpenAI model and Standard training.""" + self._test_cancel_job_helper(self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) - print(f"[test_finetuning_rft] RFT method validation passed - type: {fine_tuning_job.method.type}") + @servicePreparer() + @recorded_by_proxy + def test_rft_cancel_job_openai_globalstandard(self, **kwargs): + """Test canceling RFT fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_cancel_job_helper(self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) - openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_rft] Cancelled job: {fine_tuning_job.id}") + + + @servicePreparer() + @recorded_by_proxy + def test_dpo_finetuning_create_job_openai_standard(self, **kwargs): + """Test creating DPO fine-tuning job with OpenAI model and Standard training.""" + self._test_dpo_create_job_helper("openai", self.STANDARD_TRAINING_TYPE, **kwargs) + + @servicePreparer() + @recorded_by_proxy + def test_dpo_finetuning_create_job_openai_globalstandard(self, **kwargs): + """Test creating DPO fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_dpo_create_job_helper("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) + + - self._cleanup_test_files(openai_client, train_file, validation_file, "rft") + @servicePreparer() + @recorded_by_proxy + def test_rft_finetuning_create_job_openai_standard(self, **kwargs): + """Test creating RFT fine-tuning job with OpenAI model and Standard training.""" + self._test_rft_create_job_helper("openai", self.STANDARD_TRAINING_TYPE, **kwargs) + + @servicePreparer() + @recorded_by_proxy + def test_rft_finetuning_create_job_openai_globalstandard(self, **kwargs): + """Test creating RFT fine-tuning job with OpenAI model and GlobalStandard training.""" + self._test_rft_create_job_helper("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) @servicePreparer() @recorded_by_proxy @@ -287,9 +472,9 @@ def test_finetuning_list_events(self, **kwargs): with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, "sft") + train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id) + fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") TestBase.validate_fine_tuning_job(fine_tuning_job) @@ -315,33 +500,7 @@ def test_finetuning_list_events(self, **kwargs): assert event.type is not None, "Event should have a type" print(f"[test_finetuning_sft] Successfully validated {len(events_list)} events") - self._cleanup_test_files(openai_client, train_file, validation_file, "sft") + self._cleanup_test_file(openai_client, train_file.id) + self._cleanup_test_file(openai_client, validation_file.id) - @servicePreparer() - @recorded_by_proxy - def test_sft_finetuning_create_job_oss_model(self, **kwargs): - - with self.create_client(**kwargs) as project_client: - - with project_client.get_openai_client() as openai_client: - - train_file, validation_file = self._upload_test_files(openai_client, "sft") - - fine_tuning_job = self._create_sft_finetuning_job( - openai_client, train_file.id, validation_file.id, "oss", "GlobalStandard" - ) - print(f"[test_finetuning_sft_oss] Created fine-tuning job: {fine_tuning_job.id}") - - TestBase.validate_fine_tuning_job( - fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] - ) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for SFT job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft_oss] SFT method validation passed - type: {fine_tuning_job.method.type}") - - openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_oss] Cancelled job: {fine_tuning_job.id}") - self._cleanup_test_files(openai_client, train_file, validation_file, "sft_oss") diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index 08dd1cb9b12e..b7c8a1f73400 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -347,7 +347,7 @@ async def test_finetuning_list_events_async(self, **kwargs): @servicePreparer() @recorded_by_proxy_async - async def test_sft_finetuning_create_job_oss_model_async(self, **kwargs): + async def test_sft_finetuning_create_job_oss_using_model_async(self, **kwargs): project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() From 3fea003ede7a6144f37545af277426923f524dce Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Fri, 21 Nov 2025 12:55:06 +0530 Subject: [PATCH 2/6] updating list test --- .../sample_finetuning_supervised_job.py | 39 +++---------------- .../tests/finetuning/test_finetuning.py | 29 +++++--------- 2 files changed, 15 insertions(+), 53 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 8057ab43cf12..41c3f0dabbcd 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] +endpoint = "https://foundrysdk-eastus2-foundry-resou.services.ai.azure.com/api/projects/foundrysdk-eastus2-project" model_name = os.environ.get("MODEL_NAME", "gpt-4.1") script_dir = Path(__file__).parent training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl")) @@ -52,9 +52,9 @@ ) # For Deployment and inferencing on model -subscription_id = os.environ["AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID"] -resource_group = os.environ["AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP"] -account_name = os.environ["AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT"] +subscription_id = "856c80fd-f14e-436b-b434-fbc44a9103f7" +resource_group = "foundrysdk-eastus2-rg" +account_name = "foundrysdk-eastus2-foundry-resou" def pause_job(openai_client, job_id): @@ -167,34 +167,7 @@ def main() -> None: AIProjectClient(endpoint=endpoint, credential=credential) as project_client, project_client.get_openai_client() as openai_client, ): - print("Uploading training file...") - with open(training_file_path, "rb") as f: - train_file = openai_client.files.create(file=f, purpose="fine-tune") - print(f"Uploaded training file with ID: {train_file.id}") - - print("Uploading validation file...") - with open(validation_file_path, "rb") as f: - validation_file = openai_client.files.create(file=f, purpose="fine-tune") - print(f"Uploaded validation file with ID: {validation_file.id}") - - print("Waits for the training and validation files to be processed...") - openai_client.files.wait_for_processing(train_file.id) - openai_client.files.wait_for_processing(validation_file.id) - - print("Creating supervised fine-tuning job") - fine_tuning_job = openai_client.fine_tuning.jobs.create( - training_file=train_file.id, - validation_file=validation_file.id, - model=model_name, - method={ - "type": "supervised", - "supervised": {"hyperparameters": {"n_epochs": 3, "batch_size": 1, "learning_rate_multiplier": 1.0}}, - }, - extra_body={ - "trainingType": "Standard" - }, # Recommended approach to set trainingType. Omitting this field may lead to unsupported behavior. - ) - print(fine_tuning_job) + # Uncomment any of the following methods to test specific functionalities: # retrieve_job(openai_client, fine_tuning_job.id) @@ -213,7 +186,7 @@ def main() -> None: # deployment_name = deploy_model(openai_client, credential, fine_tuning_job.id) - # infer(openai_client, deployment_name) + infer(openai_client, "gpt-4-1-fine-tuned") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index f5f2e0c25915..529f33b53b24 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -368,29 +368,20 @@ def test_finetuning_retrieve_rft_job(self, **kwargs): @servicePreparer() @recorded_by_proxy def test_finetuning_list_jobs(self, **kwargs): - with self.create_client(**kwargs) as project_client: - with project_client.get_openai_client() as openai_client: - train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) - print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") - jobs_list = list(openai_client.fine_tuning.jobs.list()) - print(f"[test_finetuning_sft] Listed {len(jobs_list)} jobs") - - assert len(jobs_list) > 0 + print(f"[test_finetuning_list] Listed {len(jobs_list)} jobs") - job_ids = [job.id for job in jobs_list] - assert fine_tuning_job.id in job_ids - - openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") - - self._cleanup_test_file(openai_client, train_file.id) - self._cleanup_test_file(openai_client, validation_file.id) + assert isinstance(jobs_list, list), "Jobs list should be a list" + + for job in jobs_list: + assert job.id is not None, "Job should have an ID" + assert job.created_at is not None, "Job should have a creation timestamp" + assert job.status is not None, "Job should have a status" + print(f"[test_finetuning_list] Validated job {job.id} with status {job.status}") + print(f"[test_finetuning_list] Successfully validated list functionality with {len(jobs_list)} jobs") @@ -487,10 +478,8 @@ def test_finetuning_list_events(self, **kwargs): events_list = list(openai_client.fine_tuning.jobs.list_events(fine_tuning_job.id)) print(f"[test_finetuning_sft] Listed {len(events_list)} events for job: {fine_tuning_job.id}") - # Verify that events exist (at minimum, job creation event should be present) assert len(events_list) > 0, "Fine-tuning job should have at least one event" - # Verify events have required attributes for event in events_list: assert event.id is not None, "Event should have an ID" assert event.object is not None, "Event should have an object type" From fbcca512445601d0e4cad24be3e9e74218cc5d08 Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Fri, 21 Nov 2025 14:33:38 +0530 Subject: [PATCH 3/6] Updating async changes --- .../tests/finetuning/test_finetuning_async.py | 441 ++++++++++++------ 1 file changed, 286 insertions(+), 155 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index b7c8a1f73400..4d5d5ca39da7 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -15,11 +15,18 @@ reason="Skipped because we cannot record network calls with AOAI client", ) class TestFineTuningAsync(TestBase): + + SFT_JOB_TYPE = "sft" + DPO_JOB_TYPE = "dpo" + RFT_JOB_TYPE = "rft" + + STANDARD_TRAINING_TYPE = "Standard" + GLOBAL_STANDARD_TRAINING_TYPE = "GlobalStandard" async def _create_sft_finetuning_job_async( - self, openai_client, train_file_id, validation_file_id, model_type="openai", training_type="Standard" + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" ): - """Helper method to create a supervised fine-tuning job asynchronously.""" + """Helper method to create a supervised fine-tuning job.""" return await openai_client.fine_tuning.jobs.create( training_file=train_file_id, validation_file=validation_file_id, @@ -37,12 +44,12 @@ async def _create_sft_finetuning_job_async( extra_body={"trainingType": training_type}, ) - async def _create_dpo_finetuning_job_async(self, openai_client, train_file_id, validation_file_id): - """Helper method to create a DPO fine-tuning job asynchronously.""" + async def _create_dpo_finetuning_job_async(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + """Helper method to create a DPO fine-tuning job.""" return await openai_client.fine_tuning.jobs.create( training_file=train_file_id, validation_file=validation_file_id, - model=self.test_finetuning_params["dpo"]["openai"]["model_name"], + model=self.test_finetuning_params["dpo"][model_type]["model_name"], method={ "type": "dpo", "dpo": { @@ -53,11 +60,11 @@ async def _create_dpo_finetuning_job_async(self, openai_client, train_file_id, v } }, }, - extra_body={"trainingType": "Standard"}, + extra_body={"trainingType": training_type}, ) - async def _create_rft_finetuning_job_async(self, openai_client, train_file_id, validation_file_id): - """Helper method to create an RFT fine-tuning job asynchronously.""" + async def _create_rft_finetuning_job_async(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + """Helper method to create an RFT fine-tuning job.""" grader = { "name": "Response Quality Grader", "type": "score_model", @@ -74,7 +81,7 @@ async def _create_rft_finetuning_job_async(self, openai_client, train_file_id, v return await openai_client.fine_tuning.jobs.create( training_file=train_file_id, validation_file=validation_file_id, - model=self.test_finetuning_params["rft"]["openai"]["model_name"], + model=self.test_finetuning_params["rft"][model_type]["model_name"], method={ "type": "reinforcement", "reinforcement": { @@ -89,11 +96,11 @@ async def _create_rft_finetuning_job_async(self, openai_client, train_file_id, v }, }, }, - extra_body={"trainingType": "Standard"}, + extra_body={"trainingType": training_type}, ) async def _upload_test_files_async(self, openai_client, job_type="sft"): - """Helper method to upload training and validation files for fine-tuning tests asynchronously.""" + """Helper method to upload training and validation files for fine-tuning tests.""" test_data_dir = Path(__file__).parent.parent / "test_data" / "finetuning" training_file_path = test_data_dir / self.test_finetuning_params[job_type]["training_file_name"] validation_file_path = test_data_dir / self.test_finetuning_params[job_type]["validation_file_name"] @@ -104,7 +111,7 @@ async def _upload_test_files_async(self, openai_client, job_type="sft"): assert train_processed_file is not None assert train_processed_file.id is not None TestBase.assert_equal_or_not_none(train_processed_file.status, "processed") - print(f"[test_finetuning_{job_type}_async] Uploaded training file: {train_processed_file.id}") + print(f"[test_finetuning] Uploaded training file: {train_processed_file.id}") with open(validation_file_path, "rb") as f: validation_file = await openai_client.files.create(file=f, purpose="fine-tune") @@ -112,194 +119,351 @@ async def _upload_test_files_async(self, openai_client, job_type="sft"): assert validation_processed_file is not None assert validation_processed_file.id is not None TestBase.assert_equal_or_not_none(validation_processed_file.status, "processed") - print(f"[test_finetuning_{job_type}_async] Uploaded validation file: {validation_processed_file.id}") + print(f"[test_finetuning] Uploaded validation file: {validation_processed_file.id}") return train_processed_file, validation_processed_file - async def _cleanup_test_files_async(self, openai_client, train_file, validation_file, job_type): - """Helper method to clean up uploaded files after testing asynchronously.""" - await openai_client.files.delete(train_file.id) - print(f"[test_finetuning_{job_type}_async] Deleted training file: {train_file.id}") - - await openai_client.files.delete(validation_file.id) - print(f"[test_finetuning_{job_type}_async] Deleted validation file: {validation_file.id}") - - @servicePreparer() - @recorded_by_proxy_async - async def test_sft_finetuning_create_job_async(self, **kwargs): + async def _cleanup_test_file_async(self, openai_client, file_id): + """Helper method to clean up uploaded file.""" + await openai_client.files.delete(file_id) + print(f"[test_finetuning] Deleted file: {file_id}") + async def _test_cancel_job_helper_async(self, job_type, model_type, training_type, expected_method_type, **kwargs): + """Helper method for testing canceling fine-tuning jobs across different configurations.""" + project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") - - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id + + train_file, validation_file = await self._upload_test_files_async(openai_client, job_type) + + if job_type == self.SFT_JOB_TYPE: + fine_tuning_job = await self._create_sft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + elif job_type == self.DPO_JOB_TYPE: + fine_tuning_job = await self._create_dpo_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + elif job_type == self.RFT_JOB_TYPE: + fine_tuning_job = await self._create_rft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + else: + raise ValueError(f"Unsupported job type: {job_type}") + + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Created job: {fine_tuning_job.id}") + + cancelled_job = await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Cancelled job: {cancelled_job.id}") + + TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") + TestBase.assert_equal_or_not_none(cancelled_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(cancelled_job.validation_file, validation_file.id) + + assert cancelled_job.method is not None, f"Method should not be None for {job_type} job" + TestBase.assert_equal_or_not_none(cancelled_job.method.type, expected_method_type) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}") + + retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}") + TestBase.validate_fine_tuning_job( + retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" ) - print(f"[test_finetuning_sft_async] Created fine-tuning job: {fine_tuning_job.id}") + + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) + async def _test_sft_create_job_helper_async(self, model_type, training_type, **kwargs): + """Helper method for testing SFT fine-tuning job creation across different configurations.""" + + project_client = self.create_async_client(**kwargs) + openai_client = project_client.get_openai_client() + + async with project_client: + + train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) + + fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_sft_{model_type}_{training_type}] Created fine-tuning job: {fine_tuning_job.id}") + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft_async] SFT method validation passed - type: {fine_tuning_job.method.type}") + print(f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}") + + if model_type == "oss": + TestBase.validate_fine_tuning_job( + fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] + ) + + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_sft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) + + async def _test_dpo_create_job_helper_async(self, model_type, training_type, **kwargs): + """Helper method for testing DPO fine-tuning job creation across different configurations.""" + + project_client = self.create_async_client(**kwargs) + openai_client = project_client.get_openai_client() + async with project_client: + + train_file, validation_file = await self._upload_test_files_async(openai_client, self.DPO_JOB_TYPE) + + fine_tuning_job = await self._create_dpo_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}") + print(fine_tuning_job) + + TestBase.validate_fine_tuning_job(fine_tuning_job) + TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) + assert fine_tuning_job.method is not None, "Method should not be None for DPO job" + TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") + + print(f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}") + + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_dpo_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) + + async def _test_rft_create_job_helper_async(self, model_type, training_type, **kwargs): + """Helper method for testing RFT fine-tuning job creation across different configurations.""" + + project_client = self.create_async_client(**kwargs) + openai_client = project_client.get_openai_client() + + async with project_client: + + train_file, validation_file = await self._upload_test_files_async(openai_client, self.RFT_JOB_TYPE) + + fine_tuning_job = await self._create_rft_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) + print(f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}") + + TestBase.validate_fine_tuning_job(fine_tuning_job) + TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) + assert fine_tuning_job.method is not None, "Method should not be None for RFT job" + TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") + + print(f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}") + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_rft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") + + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft") + @servicePreparer() + @recorded_by_proxy_async + async def test_sft_finetuning_create_job_openai_standard_async(self, **kwargs): + """Test creating SFT fine-tuning job with OpenAI model and Standard training.""" + await self._test_sft_create_job_helper_async("openai", self.STANDARD_TRAINING_TYPE, **kwargs) @servicePreparer() @recorded_by_proxy_async - async def test_finetuning_retrieve_job_async(self, **kwargs): + async def test_sft_finetuning_create_job_openai_globalstandard_async(self, **kwargs): + """Test creating SFT fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_sft_create_job_helper_async("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) + @servicePreparer() + @recorded_by_proxy_async + async def test_sft_finetuning_create_job_oss_globalstandard_async(self, **kwargs): + """Test creating SFT fine-tuning job with OSS model and GlobalStandard training.""" + await self._test_sft_create_job_helper_async("oss", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) + + @servicePreparer() + @recorded_by_proxy_async + async def test_finetuning_retrieve_sft_job_async(self, **kwargs): + """Test retrieving SFT fine-tuning job.""" project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") + train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_sft_async] Created job: {fine_tuning_job.id}") + fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_sft] Created job: {fine_tuning_job.id}") retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Retrieved job: {retrieved_job.id}") + print(f"[test_finetuning_retrieve_sft] Retrieved job: {retrieved_job.id}") TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for SFT job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "supervised") + assert self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_retrieve_sft] Cancelled job: {fine_tuning_job.id}") - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft") + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy_async - async def test_finetuning_list_jobs_async(self, **kwargs): - + async def test_finetuning_retrieve_dpo_job_async(self, **kwargs): + """Test retrieving DPO fine-tuning job.""" project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") + train_file, validation_file = await self._upload_test_files_async(openai_client, self.DPO_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_sft_async] Created job: {fine_tuning_job.id}") + fine_tuning_job = await self._create_dpo_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_dpo] Created job: {fine_tuning_job.id}") - jobs_list_async = openai_client.fine_tuning.jobs.list() - jobs_list = [] - async for job in jobs_list_async: - jobs_list.append(job) - print(f"[test_finetuning_sft_async] Listed {len(jobs_list)} jobs") - - assert len(jobs_list) > 0 + retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_dpo] Retrieved job: {retrieved_job.id}") - job_ids = [job.id for job in jobs_list] - assert fine_tuning_job.id in job_ids + TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for DPO job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "dpo") + assert self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_retrieve_dpo] Cancelled job: {fine_tuning_job.id}") - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft") + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy_async - async def test_finetuning_cancel_job_async(self, **kwargs): - + async def test_finetuning_retrieve_rft_job_async(self, **kwargs): + """Test retrieving RFT fine-tuning job.""" project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") + train_file, validation_file = await self._upload_test_files_async(openai_client, self.RFT_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_sft_async] Created job: {fine_tuning_job.id}") + fine_tuning_job = await self._create_rft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_retrieve_rft] Created job: {fine_tuning_job.id}") - cancelled_job = await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Cancelled job: {cancelled_job.id}") + retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_rft] Retrieved job: {retrieved_job.id}") - TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) - TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") + TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) + TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) + TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + assert retrieved_job.method is not None, "Method should not be None for RFT job" + TestBase.assert_equal_or_not_none(retrieved_job.method.type, "reinforcement") + assert self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" - retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Verified cancellation persisted for job: {retrieved_job.id}") - TestBase.validate_fine_tuning_job( - retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" - ) + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) + print(f"[test_finetuning_retrieve_rft] Cancelled job: {fine_tuning_job.id}") - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft") + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) @servicePreparer() @recorded_by_proxy_async - async def test_dpo_finetuning_create_job_async(self, **kwargs): - + async def test_finetuning_list_jobs_async(self, **kwargs): project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - train_file, validation_file = await self._upload_test_files_async(openai_client, "dpo") - - fine_tuning_job = await self._create_dpo_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_dpo_async] Created DPO fine-tuning job: {fine_tuning_job.id}") - print(fine_tuning_job) - - TestBase.validate_fine_tuning_job(fine_tuning_job) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for DPO job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") + jobs_list_async = openai_client.fine_tuning.jobs.list() + jobs_list = [] + async for job in jobs_list_async: + jobs_list.append(job) + print(f"[test_finetuning_list] Listed {len(jobs_list)} jobs") - print(f"[test_finetuning_dpo_async] DPO method validation passed - type: {fine_tuning_job.method.type}") + assert isinstance(jobs_list, list), "Jobs list should be a list" + + for job in jobs_list: + assert job.id is not None, "Job should have an ID" + assert job.created_at is not None, "Job should have a creation timestamp" + assert job.status is not None, "Job should have a status" + print(f"[test_finetuning_list] Validated job {job.id} with status {job.status}") + print(f"[test_finetuning_list] Successfully validated list functionality with {len(jobs_list)} jobs") - await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_dpo_async] Cancelled job: {fine_tuning_job.id}") + @servicePreparer() + @recorded_by_proxy_async + async def test_sft_cancel_job_openai_standard_async(self, **kwargs): + """Test canceling SFT fine-tuning job with OpenAI model and Standard training.""" + await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "supervised", **kwargs) - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "dpo") + @servicePreparer() + @recorded_by_proxy_async + async def test_sft_cancel_job_openai_globalstandard_async(self, **kwargs): + """Test canceling SFT fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) @servicePreparer() @recorded_by_proxy_async - async def test_rft_finetuning_create_job_async(self, **kwargs): + async def test_sft_cancel_job_oss_globalstandard_async(self, **kwargs): + """Test canceling SFT fine-tuning job with OSS model and GlobalStandard training.""" + await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) - project_client = self.create_async_client(**kwargs) - openai_client = project_client.get_openai_client() + @servicePreparer() + @recorded_by_proxy_async + async def test_dpo_cancel_job_openai_standard_async(self, **kwargs): + """Test canceling DPO fine-tuning job with OpenAI model and Standard training.""" + await self._test_cancel_job_helper_async(self.DPO_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "dpo", **kwargs) - async with project_client: + @servicePreparer() + @recorded_by_proxy_async + async def test_dpo_cancel_job_openai_globalstandard_async(self, **kwargs): + """Test canceling DPO fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_cancel_job_helper_async(self.DPO_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "dpo", **kwargs) - train_file, validation_file = await self._upload_test_files_async(openai_client, "rft") + @servicePreparer() + @recorded_by_proxy_async + async def test_rft_cancel_job_openai_standard_async(self, **kwargs): + """Test canceling RFT fine-tuning job with OpenAI model and Standard training.""" + await self._test_cancel_job_helper_async(self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) - fine_tuning_job = await self._create_rft_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_rft_async] Created RFT fine-tuning job: {fine_tuning_job.id}") + @servicePreparer() + @recorded_by_proxy_async + async def test_rft_cancel_job_openai_globalstandard_async(self, **kwargs): + """Test canceling RFT fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_cancel_job_helper_async(self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) - TestBase.validate_fine_tuning_job(fine_tuning_job) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for RFT job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") + @servicePreparer() + @recorded_by_proxy_async + async def test_dpo_finetuning_create_job_openai_standard_async(self, **kwargs): + """Test creating DPO fine-tuning job with OpenAI model and Standard training.""" + await self._test_dpo_create_job_helper_async("openai", self.STANDARD_TRAINING_TYPE, **kwargs) - print(f"[test_finetuning_rft_async] RFT method validation passed - type: {fine_tuning_job.method.type}") + @servicePreparer() + @recorded_by_proxy_async + async def test_dpo_finetuning_create_job_openai_globalstandard_async(self, **kwargs): + """Test creating DPO fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_dpo_create_job_helper_async("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) - await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_rft_async] Cancelled job: {fine_tuning_job.id}") + @servicePreparer() + @recorded_by_proxy_async + async def test_rft_finetuning_create_job_openai_standard_async(self, **kwargs): + """Test creating RFT fine-tuning job with OpenAI model and Standard training.""" + await self._test_rft_create_job_helper_async("openai", self.STANDARD_TRAINING_TYPE, **kwargs) - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "rft") + @servicePreparer() + @recorded_by_proxy_async + async def test_rft_finetuning_create_job_openai_globalstandard_async(self, **kwargs): + """Test creating RFT fine-tuning job with OpenAI model and GlobalStandard training.""" + await self._test_rft_create_job_helper_async("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) @servicePreparer() @recorded_by_proxy_async @@ -310,30 +474,26 @@ async def test_finetuning_list_events_async(self, **kwargs): async with project_client: - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") + train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id - ) - print(f"[test_finetuning_sft_async] Created job: {fine_tuning_job.id}") + fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_async] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_sft] Cancelled job: {fine_tuning_job.id}") events_list_async = openai_client.fine_tuning.jobs.list_events(fine_tuning_job.id) events_list = [] async for event in events_list_async: events_list.append(event) - print(f"[test_finetuning_sft_async] Listed {len(events_list)} events for job: {fine_tuning_job.id}") + print(f"[test_finetuning_sft] Listed {len(events_list)} events for job: {fine_tuning_job.id}") - # Verify that events exist (at minimum, job creation event should be present) assert len(events_list) > 0, "Fine-tuning job should have at least one event" - # Verify events have required attributes for event in events_list: assert event.id is not None, "Event should have an ID" assert event.object is not None, "Event should have an object type" @@ -341,36 +501,7 @@ async def test_finetuning_list_events_async(self, **kwargs): assert event.level is not None, "Event should have a level" assert event.message is not None, "Event should have a message" assert event.type is not None, "Event should have a type" - print(f"[test_finetuning_sft_async] Successfully validated {len(events_list)} events") - - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft") - - @servicePreparer() - @recorded_by_proxy_async - async def test_sft_finetuning_create_job_oss_using_model_async(self, **kwargs): - - project_client = self.create_async_client(**kwargs) - openai_client = project_client.get_openai_client() - - async with project_client: - - train_file, validation_file = await self._upload_test_files_async(openai_client, "sft") - - fine_tuning_job = await self._create_sft_finetuning_job_async( - openai_client, train_file.id, validation_file.id, "oss", "GlobalStandard" - ) - print(f"[test_finetuning_sft_oss_async] Created fine-tuning job: {fine_tuning_job.id}") - TestBase.validate_fine_tuning_job( - fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] - ) - TestBase.validate_fine_tuning_job(fine_tuning_job) - TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) - TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) - assert fine_tuning_job.method is not None, "Method should not be None for SFT job" - TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft_oss_async] SFT method validation passed - type: {fine_tuning_job.method.type}") - - await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_sft_oss_async] Cancelled job: {fine_tuning_job.id}") + print(f"[test_finetuning_sft] Successfully validated {len(events_list)} events") - await self._cleanup_test_files_async(openai_client, train_file, validation_file, "sft_oss") + await self._cleanup_test_file_async(openai_client, train_file.id) + await self._cleanup_test_file_async(openai_client, validation_file.id) From 0ff6db79b20d7b22f73f99622269eb2d632e7ac1 Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Fri, 21 Nov 2025 14:37:22 +0530 Subject: [PATCH 4/6] removing changes --- .../sample_finetuning_supervised_job.py | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 41c3f0dabbcd..7229cf3a167c 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -43,7 +43,7 @@ load_dotenv() # For fine-tuning -endpoint = "https://foundrysdk-eastus2-foundry-resou.services.ai.azure.com/api/projects/foundrysdk-eastus2-project" +endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"] model_name = os.environ.get("MODEL_NAME", "gpt-4.1") script_dir = Path(__file__).parent training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl")) @@ -52,9 +52,9 @@ ) # For Deployment and inferencing on model -subscription_id = "856c80fd-f14e-436b-b434-fbc44a9103f7" -resource_group = "foundrysdk-eastus2-rg" -account_name = "foundrysdk-eastus2-foundry-resou" +subscription_id = os.environ["AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID"] +resource_group = os.environ["AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP"] +account_name = os.environ["AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT"] def pause_job(openai_client, job_id): @@ -167,7 +167,34 @@ def main() -> None: AIProjectClient(endpoint=endpoint, credential=credential) as project_client, project_client.get_openai_client() as openai_client, ): - + print("Uploading training file...") + with open(training_file_path, "rb") as f: + train_file = openai_client.files.create(file=f, purpose="fine-tune") + print(f"Uploaded training file with ID: {train_file.id}") + + print("Uploading validation file...") + with open(validation_file_path, "rb") as f: + validation_file = openai_client.files.create(file=f, purpose="fine-tune") + print(f"Uploaded validation file with ID: {validation_file.id}") + + print("Waits for the training and validation files to be processed...") + openai_client.files.wait_for_processing(train_file.id) + openai_client.files.wait_for_processing(validation_file.id) + + print("Creating supervised fine-tuning job") + fine_tuning_job = openai_client.fine_tuning.jobs.create( + training_file=train_file.id, + validation_file=validation_file.id, + model=model_name, + method={ + "type": "supervised", + "supervised": {"hyperparameters": {"n_epochs": 3, "batch_size": 1, "learning_rate_multiplier": 1.0}}, + }, + extra_body={ + "trainingType": "Standard" + }, # Recommended approach to set trainingType. Omitting this field may lead to unsupported behavior. + ) + print(fine_tuning_job) # Uncomment any of the following methods to test specific functionalities: # retrieve_job(openai_client, fine_tuning_job.id) @@ -186,8 +213,8 @@ def main() -> None: # deployment_name = deploy_model(openai_client, credential, fine_tuning_job.id) - infer(openai_client, "gpt-4-1-fine-tuned") + # infer(openai_client, deployment_name) if __name__ == "__main__": - main() + main() \ No newline at end of file From 35905a14797596c0e0de4551d74df8063cf2e5ed Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Fri, 21 Nov 2025 14:42:09 +0530 Subject: [PATCH 5/6] applying black --- .../sample_finetuning_supervised_job.py | 2 +- .../tests/finetuning/test_finetuning.py | 190 +++++++++++------- .../tests/finetuning/test_finetuning_async.py | 170 ++++++++++------ 3 files changed, 232 insertions(+), 130 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py index 7229cf3a167c..8057ab43cf12 100644 --- a/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py +++ b/sdk/ai/azure-ai-projects/samples/finetuning/sample_finetuning_supervised_job.py @@ -217,4 +217,4 @@ def main() -> None: if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py index 529f33b53b24..9dc26b549ad4 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning.py @@ -14,11 +14,11 @@ reason="Skipped because we cannot record network calls with AOAI client", ) class TestFineTuning(TestBase): - + SFT_JOB_TYPE = "sft" DPO_JOB_TYPE = "dpo" RFT_JOB_TYPE = "rft" - + STANDARD_TRAINING_TYPE = "Standard" GLOBAL_STANDARD_TRAINING_TYPE = "GlobalStandard" @@ -43,7 +43,9 @@ def _create_sft_finetuning_job( extra_body={"trainingType": training_type}, ) - def _create_dpo_finetuning_job(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + def _create_dpo_finetuning_job( + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" + ): """Helper method to create a DPO fine-tuning job.""" return openai_client.fine_tuning.jobs.create( training_file=train_file_id, @@ -62,7 +64,9 @@ def _create_dpo_finetuning_job(self, openai_client, train_file_id, validation_fi extra_body={"trainingType": training_type}, ) - def _create_rft_finetuning_job(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + def _create_rft_finetuning_job( + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" + ): """Helper method to create an RFT fine-tuning job.""" grader = { "name": "Response Quality Grader", @@ -129,12 +133,12 @@ def _cleanup_test_file(self, openai_client, file_id): def _test_cancel_job_helper(self, job_type, model_type, training_type, expected_method_type, **kwargs): """Helper method for testing canceling fine-tuning jobs across different configurations.""" - + with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - + train_file, validation_file = self._upload_test_files(openai_client, job_type) - + if job_type == self.SFT_JOB_TYPE: fine_tuning_job = self._create_sft_finetuning_job( openai_client, train_file.id, validation_file.id, training_type, model_type @@ -149,114 +153,140 @@ def _test_cancel_job_helper(self, job_type, model_type, training_type, expected_ ) else: raise ValueError(f"Unsupported job type: {job_type}") - - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Created job: {fine_tuning_job.id}") - + + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Created job: {fine_tuning_job.id}" + ) + cancelled_job = openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Cancelled job: {cancelled_job.id}") - + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Cancelled job: {cancelled_job.id}" + ) + # Validate the cancelled job TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") TestBase.assert_equal_or_not_none(cancelled_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(cancelled_job.validation_file, validation_file.id) - + # Validate method type assert cancelled_job.method is not None, f"Method should not be None for {job_type} job" TestBase.assert_equal_or_not_none(cancelled_job.method.type, expected_method_type) - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}") - + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}" + ) + # Verify cancellation persisted by retrieving the job retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}") + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}" + ) TestBase.validate_fine_tuning_job( retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" ) - + self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) def _test_sft_create_job_helper(self, model_type, training_type, **kwargs): """Helper method for testing SFT fine-tuning job creation across different configurations.""" - + with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - + train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) - print(f"[test_finetuning_sft_{model_type}_{training_type}] Created fine-tuning job: {fine_tuning_job.id}") - + + fine_tuning_job = self._create_sft_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + print( + f"[test_finetuning_sft_{model_type}_{training_type}] Created fine-tuning job: {fine_tuning_job.id}" + ) + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}") - + print( + f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}" + ) + # For OSS models, validate the specific model name if model_type == "oss": TestBase.validate_fine_tuning_job( fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] ) - + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_sft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) def _test_dpo_create_job_helper(self, model_type, training_type, **kwargs): """Helper method for testing DPO fine-tuning job creation across different configurations.""" - + with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - + train_file, validation_file = self._upload_test_files(openai_client, self.DPO_JOB_TYPE) - - fine_tuning_job = self._create_dpo_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) - print(f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}") + + fine_tuning_job = self._create_dpo_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + print( + f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}" + ) print(fine_tuning_job) - + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for DPO job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") - - print(f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}") - + + print( + f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}" + ) + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_dpo_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) def _test_rft_create_job_helper(self, model_type, training_type, **kwargs): """Helper method for testing RFT fine-tuning job creation across different configurations.""" - + with self.create_client(**kwargs) as project_client: with project_client.get_openai_client() as openai_client: - + train_file, validation_file = self._upload_test_files(openai_client, self.RFT_JOB_TYPE) - - fine_tuning_job = self._create_rft_finetuning_job(openai_client, train_file.id, validation_file.id, training_type, model_type) - print(f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}") - + + fine_tuning_job = self._create_rft_finetuning_job( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + print( + f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}" + ) + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for RFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") - - print(f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}") - + + print( + f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}" + ) + openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_rft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) @@ -287,7 +317,9 @@ def test_finetuning_retrieve_sft_job(self, **kwargs): train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = self._create_sft_finetuning_job( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_sft] Created job: {fine_tuning_job.id}") retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -296,10 +328,14 @@ def test_finetuning_retrieve_sft_job(self, **kwargs): TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) - TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + TestBase.assert_equal_or_not_none( + retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower() + ) assert retrieved_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "supervised") - assert self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_sft] Cancelled job: {fine_tuning_job.id}") @@ -316,7 +352,9 @@ def test_finetuning_retrieve_dpo_job(self, **kwargs): train_file, validation_file = self._upload_test_files(openai_client, self.DPO_JOB_TYPE) - fine_tuning_job = self._create_dpo_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = self._create_dpo_finetuning_job( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_dpo] Created job: {fine_tuning_job.id}") retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -325,10 +363,14 @@ def test_finetuning_retrieve_dpo_job(self, **kwargs): TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) - TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + TestBase.assert_equal_or_not_none( + retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower() + ) assert retrieved_job.method is not None, "Method should not be None for DPO job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "dpo") - assert self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_dpo] Cancelled job: {fine_tuning_job.id}") @@ -345,7 +387,9 @@ def test_finetuning_retrieve_rft_job(self, **kwargs): train_file, validation_file = self._upload_test_files(openai_client, self.RFT_JOB_TYPE) - fine_tuning_job = self._create_rft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = self._create_rft_finetuning_job( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_rft] Created job: {fine_tuning_job.id}") retrieved_job = openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -354,10 +398,14 @@ def test_finetuning_retrieve_rft_job(self, **kwargs): TestBase.validate_fine_tuning_job(retrieved_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(retrieved_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(retrieved_job.validation_file, validation_file.id) - TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) + TestBase.assert_equal_or_not_none( + retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower() + ) assert retrieved_job.method is not None, "Method should not be None for RFT job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "reinforcement") - assert self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_rft] Cancelled job: {fine_tuning_job.id}") @@ -375,7 +423,7 @@ def test_finetuning_list_jobs(self, **kwargs): print(f"[test_finetuning_list] Listed {len(jobs_list)} jobs") assert isinstance(jobs_list, list), "Jobs list should be a list" - + for job in jobs_list: assert job.id is not None, "Job should have an ID" assert job.created_at is not None, "Job should have a creation timestamp" @@ -383,8 +431,6 @@ def test_finetuning_list_jobs(self, **kwargs): print(f"[test_finetuning_list] Validated job {job.id} with status {job.status}") print(f"[test_finetuning_list] Successfully validated list functionality with {len(jobs_list)} jobs") - - @servicePreparer() @recorded_by_proxy def test_sft_cancel_job_openai_standard(self, **kwargs): @@ -395,13 +441,17 @@ def test_sft_cancel_job_openai_standard(self, **kwargs): @recorded_by_proxy def test_sft_cancel_job_openai_globalstandard(self, **kwargs): """Test canceling SFT fine-tuning job with OpenAI model and GlobalStandard training.""" - self._test_cancel_job_helper(self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) + self._test_cancel_job_helper( + self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs + ) @servicePreparer() @recorded_by_proxy def test_sft_cancel_job_oss_globalstandard(self, **kwargs): """Test canceling SFT fine-tuning job with OSS model and GlobalStandard training.""" - self._test_cancel_job_helper(self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) + self._test_cancel_job_helper( + self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs + ) @servicePreparer() @recorded_by_proxy @@ -419,15 +469,17 @@ def test_dpo_cancel_job_openai_globalstandard(self, **kwargs): @recorded_by_proxy def test_rft_cancel_job_openai_standard(self, **kwargs): """Test canceling RFT fine-tuning job with OpenAI model and Standard training.""" - self._test_cancel_job_helper(self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) + self._test_cancel_job_helper( + self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs + ) @servicePreparer() @recorded_by_proxy def test_rft_cancel_job_openai_globalstandard(self, **kwargs): """Test canceling RFT fine-tuning job with OpenAI model and GlobalStandard training.""" - self._test_cancel_job_helper(self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) - - + self._test_cancel_job_helper( + self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs + ) @servicePreparer() @recorded_by_proxy @@ -441,8 +493,6 @@ def test_dpo_finetuning_create_job_openai_globalstandard(self, **kwargs): """Test creating DPO fine-tuning job with OpenAI model and GlobalStandard training.""" self._test_dpo_create_job_helper("openai", self.GLOBAL_STANDARD_TRAINING_TYPE, **kwargs) - - @servicePreparer() @recorded_by_proxy def test_rft_finetuning_create_job_openai_standard(self, **kwargs): @@ -465,7 +515,9 @@ def test_finetuning_list_events(self, **kwargs): train_file, validation_file = self._upload_test_files(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = self._create_sft_finetuning_job(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = self._create_sft_finetuning_job( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") TestBase.validate_fine_tuning_job(fine_tuning_job) @@ -491,5 +543,3 @@ def test_finetuning_list_events(self, **kwargs): self._cleanup_test_file(openai_client, train_file.id) self._cleanup_test_file(openai_client, validation_file.id) - - diff --git a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py index 4d5d5ca39da7..838483474a04 100644 --- a/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py +++ b/sdk/ai/azure-ai-projects/tests/finetuning/test_finetuning_async.py @@ -15,11 +15,11 @@ reason="Skipped because we cannot record network calls with AOAI client", ) class TestFineTuningAsync(TestBase): - + SFT_JOB_TYPE = "sft" DPO_JOB_TYPE = "dpo" RFT_JOB_TYPE = "rft" - + STANDARD_TRAINING_TYPE = "Standard" GLOBAL_STANDARD_TRAINING_TYPE = "GlobalStandard" @@ -44,7 +44,9 @@ async def _create_sft_finetuning_job_async( extra_body={"trainingType": training_type}, ) - async def _create_dpo_finetuning_job_async(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + async def _create_dpo_finetuning_job_async( + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" + ): """Helper method to create a DPO fine-tuning job.""" return await openai_client.fine_tuning.jobs.create( training_file=train_file_id, @@ -63,7 +65,9 @@ async def _create_dpo_finetuning_job_async(self, openai_client, train_file_id, v extra_body={"trainingType": training_type}, ) - async def _create_rft_finetuning_job_async(self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai"): + async def _create_rft_finetuning_job_async( + self, openai_client, train_file_id, validation_file_id, training_type, model_type="openai" + ): """Helper method to create an RFT fine-tuning job.""" grader = { "name": "Response Quality Grader", @@ -130,14 +134,14 @@ async def _cleanup_test_file_async(self, openai_client, file_id): async def _test_cancel_job_helper_async(self, job_type, model_type, training_type, expected_method_type, **kwargs): """Helper method for testing canceling fine-tuning jobs across different configurations.""" - + project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - + train_file, validation_file = await self._upload_test_files_async(openai_client, job_type) - + if job_type == self.SFT_JOB_TYPE: fine_tuning_job = await self._create_sft_finetuning_job_async( openai_client, train_file.id, validation_file.id, training_type, model_type @@ -152,116 +156,136 @@ async def _test_cancel_job_helper_async(self, job_type, model_type, training_typ ) else: raise ValueError(f"Unsupported job type: {job_type}") - + print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Created job: {fine_tuning_job.id}") - + cancelled_job = await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Cancelled job: {cancelled_job.id}") - + TestBase.validate_fine_tuning_job(cancelled_job, expected_job_id=fine_tuning_job.id) TestBase.assert_equal_or_not_none(cancelled_job.status, "cancelled") TestBase.assert_equal_or_not_none(cancelled_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(cancelled_job.validation_file, validation_file.id) - + assert cancelled_job.method is not None, f"Method should not be None for {job_type} job" TestBase.assert_equal_or_not_none(cancelled_job.method.type, expected_method_type) - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}") - + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Method validation passed - type: {cancelled_job.method.type}" + ) + retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) - print(f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}") + print( + f"[test_finetuning_cancel_{job_type}_{model_type}_{training_type}] Verified cancellation persisted for job: {retrieved_job.id}" + ) TestBase.validate_fine_tuning_job( retrieved_job, expected_job_id=fine_tuning_job.id, expected_status="cancelled" ) - + await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) async def _test_sft_create_job_helper_async(self, model_type, training_type, **kwargs): """Helper method for testing SFT fine-tuning job creation across different configurations.""" - + project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - + train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) - - fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) + + fine_tuning_job = await self._create_sft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) print(f"[test_finetuning_sft_{model_type}_{training_type}] Created fine-tuning job: {fine_tuning_job.id}") - + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "supervised") - print(f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}") - + print( + f"[test_finetuning_sft_{model_type}_{training_type}] SFT method validation passed - type: {fine_tuning_job.method.type}" + ) + if model_type == "oss": TestBase.validate_fine_tuning_job( fine_tuning_job, expected_model=self.test_finetuning_params["sft"]["oss"]["model_name"] ) - + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_sft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) async def _test_dpo_create_job_helper_async(self, model_type, training_type, **kwargs): """Helper method for testing DPO fine-tuning job creation across different configurations.""" - + project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - + train_file, validation_file = await self._upload_test_files_async(openai_client, self.DPO_JOB_TYPE) - - fine_tuning_job = await self._create_dpo_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) - print(f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}") + + fine_tuning_job = await self._create_dpo_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + print( + f"[test_finetuning_dpo_{model_type}_{training_type}] Created DPO fine-tuning job: {fine_tuning_job.id}" + ) print(fine_tuning_job) - + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for DPO job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "dpo") - - print(f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}") - + + print( + f"[test_finetuning_dpo_{model_type}_{training_type}] DPO method validation passed - type: {fine_tuning_job.method.type}" + ) + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_dpo_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) async def _test_rft_create_job_helper_async(self, model_type, training_type, **kwargs): """Helper method for testing RFT fine-tuning job creation across different configurations.""" - + project_client = self.create_async_client(**kwargs) openai_client = project_client.get_openai_client() async with project_client: - + train_file, validation_file = await self._upload_test_files_async(openai_client, self.RFT_JOB_TYPE) - - fine_tuning_job = await self._create_rft_finetuning_job_async(openai_client, train_file.id, validation_file.id, training_type, model_type) - print(f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}") - + + fine_tuning_job = await self._create_rft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, training_type, model_type + ) + print( + f"[test_finetuning_rft_{model_type}_{training_type}] Created RFT fine-tuning job: {fine_tuning_job.id}" + ) + TestBase.validate_fine_tuning_job(fine_tuning_job) TestBase.assert_equal_or_not_none(fine_tuning_job.training_file, train_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.validation_file, validation_file.id) TestBase.assert_equal_or_not_none(fine_tuning_job.trainingType.lower(), training_type.lower()) assert fine_tuning_job.method is not None, "Method should not be None for RFT job" TestBase.assert_equal_or_not_none(fine_tuning_job.method.type, "reinforcement") - - print(f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}") - + + print( + f"[test_finetuning_rft_{model_type}_{training_type}] RFT method validation passed - type: {fine_tuning_job.method.type}" + ) + await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_rft_{model_type}_{training_type}] Cancelled job: {fine_tuning_job.id}") - + await self._cleanup_test_file_async(openai_client, train_file.id) await self._cleanup_test_file_async(openai_client, validation_file.id) @@ -294,7 +318,9 @@ async def test_finetuning_retrieve_sft_job_async(self, **kwargs): train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = await self._create_sft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_sft] Created job: {fine_tuning_job.id}") retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -306,7 +332,9 @@ async def test_finetuning_retrieve_sft_job_async(self, **kwargs): TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) assert retrieved_job.method is not None, "Method should not be None for SFT job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "supervised") - assert self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["sft"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['sft']['openai']['model_name']} not found in {retrieved_job.model}" await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_sft] Cancelled job: {fine_tuning_job.id}") @@ -325,7 +353,9 @@ async def test_finetuning_retrieve_dpo_job_async(self, **kwargs): train_file, validation_file = await self._upload_test_files_async(openai_client, self.DPO_JOB_TYPE) - fine_tuning_job = await self._create_dpo_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = await self._create_dpo_finetuning_job_async( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_dpo] Created job: {fine_tuning_job.id}") retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -337,7 +367,9 @@ async def test_finetuning_retrieve_dpo_job_async(self, **kwargs): TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) assert retrieved_job.method is not None, "Method should not be None for DPO job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "dpo") - assert self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["dpo"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['dpo']['openai']['model_name']} not found in {retrieved_job.model}" await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_dpo] Cancelled job: {fine_tuning_job.id}") @@ -356,7 +388,9 @@ async def test_finetuning_retrieve_rft_job_async(self, **kwargs): train_file, validation_file = await self._upload_test_files_async(openai_client, self.RFT_JOB_TYPE) - fine_tuning_job = await self._create_rft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = await self._create_rft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_retrieve_rft] Created job: {fine_tuning_job.id}") retrieved_job = await openai_client.fine_tuning.jobs.retrieve(fine_tuning_job.id) @@ -368,7 +402,9 @@ async def test_finetuning_retrieve_rft_job_async(self, **kwargs): TestBase.assert_equal_or_not_none(retrieved_job.trainingType.lower(), self.STANDARD_TRAINING_TYPE.lower()) assert retrieved_job.method is not None, "Method should not be None for RFT job" TestBase.assert_equal_or_not_none(retrieved_job.method.type, "reinforcement") - assert self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model, f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" + assert ( + self.test_finetuning_params["rft"]["openai"]["model_name"] in retrieved_job.model + ), f"Expected model name {self.test_finetuning_params['rft']['openai']['model_name']} not found in {retrieved_job.model}" await openai_client.fine_tuning.jobs.cancel(fine_tuning_job.id) print(f"[test_finetuning_retrieve_rft] Cancelled job: {fine_tuning_job.id}") @@ -391,7 +427,7 @@ async def test_finetuning_list_jobs_async(self, **kwargs): print(f"[test_finetuning_list] Listed {len(jobs_list)} jobs") assert isinstance(jobs_list, list), "Jobs list should be a list" - + for job in jobs_list: assert job.id is not None, "Job should have an ID" assert job.created_at is not None, "Job should have a creation timestamp" @@ -403,43 +439,57 @@ async def test_finetuning_list_jobs_async(self, **kwargs): @recorded_by_proxy_async async def test_sft_cancel_job_openai_standard_async(self, **kwargs): """Test canceling SFT fine-tuning job with OpenAI model and Standard training.""" - await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "supervised", **kwargs) + await self._test_cancel_job_helper_async( + self.SFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "supervised", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_sft_cancel_job_openai_globalstandard_async(self, **kwargs): """Test canceling SFT fine-tuning job with OpenAI model and GlobalStandard training.""" - await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) + await self._test_cancel_job_helper_async( + self.SFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_sft_cancel_job_oss_globalstandard_async(self, **kwargs): """Test canceling SFT fine-tuning job with OSS model and GlobalStandard training.""" - await self._test_cancel_job_helper_async(self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs) + await self._test_cancel_job_helper_async( + self.SFT_JOB_TYPE, "oss", self.GLOBAL_STANDARD_TRAINING_TYPE, "supervised", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_dpo_cancel_job_openai_standard_async(self, **kwargs): """Test canceling DPO fine-tuning job with OpenAI model and Standard training.""" - await self._test_cancel_job_helper_async(self.DPO_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "dpo", **kwargs) + await self._test_cancel_job_helper_async( + self.DPO_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "dpo", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_dpo_cancel_job_openai_globalstandard_async(self, **kwargs): """Test canceling DPO fine-tuning job with OpenAI model and GlobalStandard training.""" - await self._test_cancel_job_helper_async(self.DPO_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "dpo", **kwargs) + await self._test_cancel_job_helper_async( + self.DPO_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "dpo", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_rft_cancel_job_openai_standard_async(self, **kwargs): """Test canceling RFT fine-tuning job with OpenAI model and Standard training.""" - await self._test_cancel_job_helper_async(self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) + await self._test_cancel_job_helper_async( + self.RFT_JOB_TYPE, "openai", self.STANDARD_TRAINING_TYPE, "reinforcement", **kwargs + ) @servicePreparer() @recorded_by_proxy_async async def test_rft_cancel_job_openai_globalstandard_async(self, **kwargs): """Test canceling RFT fine-tuning job with OpenAI model and GlobalStandard training.""" - await self._test_cancel_job_helper_async(self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs) + await self._test_cancel_job_helper_async( + self.RFT_JOB_TYPE, "openai", self.GLOBAL_STANDARD_TRAINING_TYPE, "reinforcement", **kwargs + ) @servicePreparer() @recorded_by_proxy_async @@ -476,7 +526,9 @@ async def test_finetuning_list_events_async(self, **kwargs): train_file, validation_file = await self._upload_test_files_async(openai_client, self.SFT_JOB_TYPE) - fine_tuning_job = await self._create_sft_finetuning_job_async(openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE) + fine_tuning_job = await self._create_sft_finetuning_job_async( + openai_client, train_file.id, validation_file.id, self.STANDARD_TRAINING_TYPE + ) print(f"[test_finetuning_sft] Created job: {fine_tuning_job.id}") TestBase.validate_fine_tuning_job(fine_tuning_job) From b83367effdffb636451292f7ed4a23a07b637731 Mon Sep 17 00:00:00 2001 From: Jayesh Tanna Date: Fri, 21 Nov 2025 14:50:13 +0530 Subject: [PATCH 6/6] updating env template file --- sdk/ai/azure-ai-projects/.env.template | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/.env.template b/sdk/ai/azure-ai-projects/.env.template index 4c5e51b6c5d3..c5afe392774c 100644 --- a/sdk/ai/azure-ai-projects/.env.template +++ b/sdk/ai/azure-ai-projects/.env.template @@ -55,4 +55,10 @@ BING_CUSTOM_SEARCH_INSTANCE_NAME= SHAREPOINT_PROJECT_CONNECTION_ID= A2A_PROJECT_CONNECTION_ID= BROWSER_AUTOMATION_PROJECT_CONNECTION_ID= -OPENAPI_PROJECT_CONNECTION_ID= \ No newline at end of file +OPENAPI_PROJECT_CONNECTION_ID= + +# Used in Fine-tuning tests +AZURE_AI_PROJECTS_TESTS_COMPLETED_OAI_MODEL_FINE_TUNING_JOB_ID= +AZURE_AI_PROJECTS_TESTS_COMPLETED_OSS_MODEL_FINE_TUNING_JOB_ID= +AZURE_AI_PROJECTS_TESTS_RUNNING_FINE_TUNING_JOB_ID= +AZURE_AI_PROJECTS_TESTS_PAUSED_FINE_TUNING_JOB_ID= \ No newline at end of file