|
| 1 | +# Copyright (c) Microsoft. All rights reserved. |
| 2 | + |
| 3 | +import asyncio |
| 4 | + |
| 5 | +from agent_framework import HostedMCPTool, HostedWebSearchTool, TextReasoningContent, UsageContent |
| 6 | +from agent_framework.anthropic import AnthropicClient |
| 7 | +from anthropic import AsyncAnthropicFoundry |
| 8 | + |
| 9 | +""" |
| 10 | +Anthropic Foundry Chat Agent Example |
| 11 | +
|
| 12 | +This sample demonstrates using Anthropic with: |
| 13 | +- Setting up an Anthropic-based agent with hosted tools. |
| 14 | +- Using the `thinking` feature. |
| 15 | +- Displaying both thinking and usage information during streaming responses. |
| 16 | +
|
| 17 | +This example requires `anthropic>=0.74.0` and an endpoint in Foundry for Anthropic. |
| 18 | +
|
| 19 | +To use the Foundry integration ensure you have the following environment variables set: |
| 20 | +- ANTHROPIC_FOUNDRY_API_KEY |
| 21 | + Alternatively you can pass in a azure_ad_token_provider function to the AsyncAnthropicFoundry constructor. |
| 22 | +- ANTHROPIC_FOUNDRY_ENDPOINT |
| 23 | + Should be something like https://<your-resource-name>.services.ai.azure.com/anthropic/ |
| 24 | +- ANTHROPIC_CHAT_MODEL_ID |
| 25 | + Should be something like claude-haiku-4-5 |
| 26 | +""" |
| 27 | + |
| 28 | + |
| 29 | +async def main() -> None: |
| 30 | + """Example of streaming response (get results as they are generated).""" |
| 31 | + agent = AnthropicClient(anthropic_client=AsyncAnthropicFoundry()).create_agent( |
| 32 | + name="DocsAgent", |
| 33 | + instructions="You are a helpful agent for both Microsoft docs questions and general questions.", |
| 34 | + tools=[ |
| 35 | + HostedMCPTool( |
| 36 | + name="Microsoft Learn MCP", |
| 37 | + url="https://learn.microsoft.com/api/mcp", |
| 38 | + ), |
| 39 | + HostedWebSearchTool(), |
| 40 | + ], |
| 41 | + # anthropic needs a value for the max_tokens parameter |
| 42 | + # we set it to 1024, but you can override like this: |
| 43 | + max_tokens=20000, |
| 44 | + additional_chat_options={"thinking": {"type": "enabled", "budget_tokens": 10000}}, |
| 45 | + ) |
| 46 | + |
| 47 | + query = "Can you compare Python decorators with C# attributes?" |
| 48 | + print(f"User: {query}") |
| 49 | + print("Agent: ", end="", flush=True) |
| 50 | + async for chunk in agent.run_stream(query): |
| 51 | + for content in chunk.contents: |
| 52 | + if isinstance(content, TextReasoningContent): |
| 53 | + print(f"\033[32m{content.text}\033[0m", end="", flush=True) |
| 54 | + if isinstance(content, UsageContent): |
| 55 | + print(f"\n\033[34m[Usage so far: {content.details}]\033[0m\n", end="", flush=True) |
| 56 | + if chunk.text: |
| 57 | + print(chunk.text, end="", flush=True) |
| 58 | + |
| 59 | + print("\n") |
| 60 | + |
| 61 | + |
| 62 | +if __name__ == "__main__": |
| 63 | + asyncio.run(main()) |
0 commit comments