|
36 | 36 | "\n", |
37 | 37 | "```bash\n", |
38 | 38 | "pip install uv\n", |
39 | | - "uv pip install azure-ai-evaluation[redteam] termcolor==2.5.0 azure-identity openai\n", |
| 39 | + "uv pip install azure-ai-evaluation[redteam] azure-identity openai\n", |
40 | 40 | "```\n", |
41 | 41 | "\n", |
42 | 42 | "\n", |
|
64 | 64 | "\n", |
65 | 65 | "# Azure imports\n", |
66 | 66 | "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", |
67 | | - "from azure.ai.evaluation import RedTeam, RiskCategory, AttackStrategy\n", |
| 67 | + "from azure.ai.evaluation.red_team import RedTeam, RiskCategory, AttackStrategy\n", |
68 | 68 | "\n", |
69 | 69 | "# OpenAI imports\n", |
70 | 70 | "from openai import AzureOpenAI\n", |
|
126 | 126 | "azure_openai_api_version = \"2023-12-01-preview\" # Use the latest API version" |
127 | 127 | ] |
128 | 128 | }, |
| 129 | + { |
| 130 | + "cell_type": "code", |
| 131 | + "execution_count": null, |
| 132 | + "metadata": {}, |
| 133 | + "outputs": [], |
| 134 | + "source": [ |
| 135 | + "# Azure AI Project information\n", |
| 136 | + "azure_ai_project = {\n", |
| 137 | + " \"subscription_id\": os.environ.get(\"AZURE_SUBSCRIPTION_ID\"),\n", |
| 138 | + " \"resource_group_name\": os.environ.get(\"AZURE_RESOURCE_GROUP_NAME\"),\n", |
| 139 | + " \"project_name\": os.environ.get(\"AZURE_PROJECT_NAME\"),\n", |
| 140 | + "}\n", |
| 141 | + "\n", |
| 142 | + "# Azure OpenAI deployment information\n", |
| 143 | + "azure_openai_deployment = os.environ.get(\"AZURE_OPENAI_DEPLOYMENT\") # e.g., \"gpt-4\"\n", |
| 144 | + "azure_openai_endpoint = os.environ.get(\n", |
| 145 | + " \"AZURE_OPENAI_ENDPOINT\"\n", |
| 146 | + ") # e.g., \"https://endpoint-name.openai.azure.com/openai/deployments/deployment-name/chat/completions\"\n", |
| 147 | + "azure_openai_api_key = os.environ.get(\"AZURE_OPENAI_API_KEY\") # e.g., \"your-api-key\"\n", |
| 148 | + "azure_openai_api_version = \"2023-12-01-preview\" # Use the latest API version" |
| 149 | + ] |
| 150 | + }, |
129 | 151 | { |
130 | 152 | "cell_type": "markdown", |
131 | 153 | "metadata": {}, |
|
322 | 344 | " messages=[\n", |
323 | 345 | " {\"role\": \"user\", \"content\": latest_message},\n", |
324 | 346 | " ],\n", |
325 | | - " max_tokens=500,\n", |
326 | | - " temperature=0.7,\n", |
| 347 | + " # max_tokens=500, # If using an o1 base model, comment this line out\n", |
| 348 | + " max_completion_tokens=500, # If using an o1 base model, uncomment this line\n", |
| 349 | + " # temperature=0.7, # If using an o1 base model, comment this line out (temperature param not supported for o1 base models)\n", |
327 | 350 | " )\n", |
328 | 351 | "\n", |
329 | 352 | " # Format the response to follow the expected chat protocol format\n", |
330 | 353 | " formatted_response = {\"content\": response.choices[0].message.content, \"role\": \"assistant\"}\n", |
331 | | - "\n", |
332 | | - " return {\"messages\": [formatted_response]}\n", |
333 | 354 | " except Exception as e:\n", |
334 | 355 | " print(f\"Error calling Azure OpenAI: {e!s}\")\n", |
335 | | - " return \"I encountered an error and couldn't process your request.\"" |
| 356 | + " formatted_response = \"I encountered an error and couldn't process your request.\"\n", |
| 357 | + " return {\"messages\": [formatted_response]}" |
336 | 358 | ] |
337 | 359 | }, |
338 | 360 | { |
|
428 | 450 | ], |
429 | 451 | "metadata": { |
430 | 452 | "kernelspec": { |
431 | | - "display_name": "3-28", |
| 453 | + "display_name": ".venv", |
432 | 454 | "language": "python", |
433 | 455 | "name": "python3" |
434 | 456 | }, |
|
0 commit comments