Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions sdk/ai/azure-ai-projects/.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,13 @@ AZURE_AI_PROJECTS_TESTS_CONTAINER_PROJECT_ENDPOINT=
AZURE_AI_PROJECTS_TESTS_CONTAINER_APP_RESOURCE_ID=
AZURE_AI_PROJECTS_TESTS_CONTAINER_INGRESS_SUBDOMAIN_SUFFIX=

# Used in fine-tuning deployment and inferencing tests
AZURE_AI_PROJECTS_TESTS_AZURE_SUBSCRIPTION_ID=
AZURE_AI_PROJECTS_TESTS_AZURE_RESOURCE_GROUP=
AZURE_AI_PROJECTS_TESTS_AZURE_AOAI_ACCOUNT=
# Used in fine-tuning job submission, deployment and inferencing
AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID=
AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP=
AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT=
VALIDATION_FILE_PATH=
TRAINING_FILE_PATH=
MODEL_NAME=



4 changes: 2 additions & 2 deletions sdk/ai/azure-ai-projects/samples/files/sample_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@
Before running the sample:
pip install azure-ai-projects azure-identity openai python-dotenv
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv
Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@
Before running the sample:
pip install azure-ai-projects azure-identity openai python-dotenv aiohttp
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv aiohttp
Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) FILE_PATH - Optional. Path to the file to upload. Defaults to the `data` folder.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the synchronous
`.fine_tuning_jobs` methods to create DPO (Direct Preference Optimization) fine-tuning jobs.
Supported OAI Models: GPT 4o, 4.1, 4.1-mini, 4.1-nano, gpt-4o-mini
Supported OpenAI models: GPT-4o, GPT-4.1, GPT-4.1-mini, GPT-4.1-nano, and GPT-4o-mini.

USAGE:
python sample_finetuning_dpo_job.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -33,7 +33,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "dpo_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the asynchronous
`.fine_tuning_jobs` methods to create DPO (Direct Preference Optimization) fine-tuning jobs.
Supported OAI Models: GPT 4o, 4.1, 4.1-mini, 4.1-nano, gpt-4o-mini
Supported OpenAI models: GPT-4o, GPT-4.1, GPT-4.1-mini, GPT-4.1-nano, and GPT-4o-mini.

USAGE:
python sample_finetuning_dpo_job_async.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv aiohttp
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv aiohttp

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4o` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -34,7 +34,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "gpt-4o-mini")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "dpo_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,18 @@
"""
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the synchronous
`.fine_tuning.jobs` methods to create supervised fine-tuning jobs using OSS model.
Supported OSS models with SFT technique: Ministral-3b , Llama-3.3-70b , Qwen3-32b , Gpt-oss-20b
`.fine_tuning.jobs` methods to create supervised fine-tuning jobs using open-source model.
Supported open-source models with SFT: Ministral-3b , Llama-3.3-70b , Qwen3-32b , Gpt-oss-20b

USAGE:
python sample_finetuning_supervised_job.py
python sample_finetuning_oss_models_supervised_job.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -33,7 +33,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "Ministral-3B")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,18 @@
"""
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the asynchronous
`.fine_tuning.jobs` methods to create supervised fine-tuning jobs using OSS model.
Supported OSS models with SFT technique: Ministral-3b , Llama-3.3-70b , Qwen3-32b , Gpt-oss-20b
`.fine_tuning.jobs` methods to create supervised fine-tuning jobs using open-source model.
Supported open-source models with SFT: Ministral-3b , Llama-3.3-70b , Qwen3-32b , Gpt-oss-20b

USAGE:
python sample_finetuning_oss_models_supervised_job_async.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv aiohttp
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv aiohttp

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `Ministral-3B` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -34,7 +34,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "Ministral-3B")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the synchronous
`.fine_tuning.jobs` methods to create reinforcement fine-tuning jobs.
Supported OAI Models: o4-mini
Supported OpenAI models: o4-mini

USAGE:
python sample_finetuning_reinforcement_job.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -33,7 +33,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "o4-mini")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "rft_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the asynchronous
`.fine_tuning.jobs` methods to create reinforcement fine-tuning jobs.
Supported OAI Models: o4-mini
Supported OpenAI models: o4-mini

USAGE:
python sample_finetuning_reinforcement_job_async.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv aiohttp
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv aiohttp

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `o4-mini` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
Expand All @@ -34,7 +34,7 @@

load_dotenv()

endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "o4-mini")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "rft_training_set.jsonl"))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,28 @@
"""
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the synchronous
`.fine_tuning.jobs` methods to create, get, list, cancel, pause, resume, list events and list checkpoints supervised fine-tuning jobs.
It also shows how to deploy the fine-tuned model using Azure Cognitive Services Management Client and perform inference on the deployed model.
Supported OAI Models: GPT 4o, 4o-mini, 4.1, 4.1-mini
`.fine_tuning.jobs` methods to create, get, list, cancel, pause, resume, list events
and list checkpoints supervised fine-tuning jobs.
It also shows how to deploy the fine-tuned model using Azure Cognitive Services Management
Client and perform inference on the deployed model.
Supported OpenAI models: GPT 4o, 4o-mini, 4.1, 4.1-mini

USAGE:
python sample_finetuning_supervised_job.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv azure-mgmt-cognitiveservices
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv azure-mgmt-cognitiveservices

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
4) VALIDATION_FILE_PATH - Optional. Path to the validation data file. Default to the `data` folder.
5) AZURE_SUBSCRIPTION_ID - Required. Your Azure subscription ID for fine-tuned model deployment and inferencing.
6) AZURE_RESOURCE_GROUP - Required. The resource group name containing your Azure OpenAI resource.
7) AZURE_AOAI_ACCOUNT - Required. The name of your Azure OpenAI account for fine-tuned model deployment and inferencing.
5) AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID - Required. Your Azure subscription ID for fine-tuned model deployment and inferencing.
6) AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP - Required. The resource group name containing your Azure OpenAI resource.
7) AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT - Required. The name of your Azure OpenAI account for fine-tuned model deployment and inferencing.
"""

import os
Expand All @@ -41,7 +43,7 @@
load_dotenv()

# For fine-tuning
endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "gpt-4.1")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl"))
Expand All @@ -50,9 +52,9 @@
)

# For Deployment and inferencing on model
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
resource_group = os.environ.get("AZURE_RESOURCE_GROUP")
account_name = os.environ.get("AZURE_AOAI_ACCOUNT")
subscription_id = os.environ.get("AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID")
resource_group = os.environ.get("AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP")
account_name = os.environ.get("AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT")

with DefaultAzureCredential(exclude_interactive_browser_credential=False) as credential:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,28 @@
"""
DESCRIPTION:
Given an AIProjectClient, this sample demonstrates how to use the asynchronous
.fine_tuning.jobs methods to create, get, list, cancel, pause, resume, list events and list checkpoints supervised fine-tuning jobs.
It also shows how to deploy the fine-tuned model using Azure Cognitive Services Management Client and perform inference on the deployed model.
Supported OAI Models: GPT 4o, 4o-mini, 4.1, 4.1-mini
.fine_tuning.jobs methods to create, get, list, cancel, pause, resume, list events
and list checkpoints supervised fine-tuning jobs.
It also shows how to deploy the fine-tuned model using Azure Cognitive Services Management
Client and perform inference on the deployed model.
Supported OpenAI models: GPT 4o, 4o-mini, 4.1, 4.1-mini

USAGE:
python sample_finetuning_supervised_job_async.py

Before running the sample:

pip install azure-ai-projects azure-identity openai python-dotenv aiohttp azure-mgmt-cognitiveservices
pip install azure-ai-projects>=2.0.0b1 azure-identity openai python-dotenv aiohttp azure-mgmt-cognitiveservices

Set these environment variables with your own values:
1) PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
1) AZURE_AI_PROJECT_ENDPOINT - Required. The Azure AI Project endpoint, as found in the overview page of your
Azure AI Foundry project.
2) MODEL_NAME - Optional. The base model name to use for fine-tuning. Default to the `gpt-4.1` model.
3) TRAINING_FILE_PATH - Optional. Path to the training data file. Default to the `data` folder.
4) VALIDATION_FILE_PATH - Optional. Path to the validation data file. Default to the `data` folder.
5) AZURE_SUBSCRIPTION_ID - Required. Your Azure subscription ID for fine-tuned model deployment and inferencing.
6) AZURE_RESOURCE_GROUP - Required. The resource group name containing your Azure OpenAI resource.
7) AZURE_AOAI_ACCOUNT - Required. The name of your Azure OpenAI account for fine-tuned model deployment and inferencing.
5) AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID - Required. Your Azure subscription ID for fine-tuned model deployment and inferencing.
6) AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP - Required. The resource group name containing your Azure OpenAI resource.
7) AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT - Required. The name of your Azure OpenAI account for fine-tuned model deployment and inferencing.
"""

import asyncio
Expand All @@ -42,7 +44,7 @@
load_dotenv()

# For fine-tuning
endpoint = os.environ["PROJECT_ENDPOINT"]
endpoint = os.environ["AZURE_AI_PROJECT_ENDPOINT"]
model_name = os.environ.get("MODEL_NAME", "gpt-4.1")
script_dir = Path(__file__).parent
training_file_path = os.environ.get("TRAINING_FILE_PATH", os.path.join(script_dir, "data", "sft_training_set.jsonl"))
Expand All @@ -51,9 +53,9 @@
)

# For Deployment and inferencing on model
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
resource_group = os.environ.get("AZURE_RESOURCE_GROUP")
account_name = os.environ.get("AZURE_AOAI_ACCOUNT")
subscription_id = os.environ.get("AZURE_AI_PROJECTS_AZURE_SUBSCRIPTION_ID")
resource_group = os.environ.get("AZURE_AI_PROJECTS_AZURE_RESOURCE_GROUP")
account_name = os.environ.get("AZURE_AI_PROJECTS_AZURE_AOAI_ACCOUNT")


async def main():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -406,8 +406,8 @@ def test_sft_pre_finetuning_job_deploy_infer(self, **kwargs):
pre_finetuned_model = self.test_finetuning_params["sft"]["openai"]["deployment"]["pre_finetuned_model"]
deployment_name = f"{self.test_finetuning_params['sft']['openai']['deployment']['deployment_name']}-{int(time.time())}"

resource_group = kwargs.get("azure_ai_projects_tests_azure_resource_group", "")
account_name = kwargs.get("azure_ai_projects_tests_azure_aoai_account", "")
resource_group = kwargs.get("azure_ai_projects_azure_resource_group", "")
account_name = kwargs.get("azure_ai_projects_azure_aoai_account", "")

assert resource_group, "Azure resource group is required for deployment test"
assert account_name, "Azure OpenAI account name is required for deployment test"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -436,8 +436,8 @@ async def test_sft_pre_finetuning_job_deploy_infer_async(self, **kwargs):
pre_finetuned_model = self.test_finetuning_params["sft"]["openai"]["deployment"]["pre_finetuned_model"]
deployment_name = f"{self.test_finetuning_params['sft']['openai']['deployment']['deployment_name']}-async-{int(time.time())}"

resource_group = kwargs.get("azure_ai_projects_tests_azure_resource_group", "")
account_name = kwargs.get("azure_ai_projects_tests_azure_aoai_account", "")
resource_group = kwargs.get("azure_ai_projects_azure_resource_group", "")
account_name = kwargs.get("azure_ai_projects_azure_aoai_account", "")

assert resource_group, "Azure resource group is required for deployment test"
assert account_name, "Azure OpenAI account name is required for deployment test"
Expand Down
Loading
Loading