Skip to content

Commit 2b1edf9

Browse files
committed
add an example code for running Azure AI Foundry
1 parent 1a43d98 commit 2b1edf9

File tree

3 files changed

+120
-0
lines changed

3 files changed

+120
-0
lines changed

.env.template

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ AZURE_OPENAI_MODEL_CHAT="gpt-5"
2020
AZURE_OPENAI_MODEL_EMBEDDING="text-embedding-3-small"
2121
AZURE_OPENAI_MODEL_REASONING="o4-mini"
2222

23+
## Azure AI Foundry
24+
AZURE_AI_FOUNDRY_INFERENCE_ENDPOINT="https://xxx.services.ai.azure.com/api/projects/xxx"
25+
AZURE_AI_FOUNDRY_INFERENCE_CREDENTIAL="xxx"
26+
AZURE_AI_FOUNDRY_INFERENCE_MODEL_CHAT="gpt-5"
27+
2328
## Ollama Settings
2429
OLLAMA_MODEL_CHAT="gemma3:270m"
2530

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import logging
2+
3+
import typer
4+
from dotenv import load_dotenv
5+
6+
from template_langgraph.llms.azure_ai_foundrys import AzureAiFoundryWrapper
7+
from template_langgraph.loggers import get_logger
8+
9+
# Initialize the Typer application
10+
app = typer.Typer(
11+
add_completion=False,
12+
help="Azure AI Foundry operator CLI",
13+
)
14+
15+
# Set up logging
16+
logger = get_logger(__name__)
17+
18+
19+
@app.command()
20+
def chat(
21+
query: str = typer.Option(
22+
"Hello",
23+
"--query",
24+
"-q",
25+
help="The query to send to the AI",
26+
),
27+
verbose: bool = typer.Option(
28+
False,
29+
"--verbose",
30+
"-v",
31+
help="Enable verbose output",
32+
),
33+
):
34+
# Set up logging
35+
if verbose:
36+
logger.setLevel(logging.DEBUG)
37+
38+
logger.info("Running Azure AI Foundry chat...")
39+
# https://learn.microsoft.com/azure/ai-foundry/quickstarts/get-started-code?tabs=python&pivots=fdp-project
40+
from azure.ai.projects import AIProjectClient
41+
from azure.identity import DefaultAzureCredential
42+
43+
settings = AzureAiFoundryWrapper().settings
44+
45+
project = AIProjectClient(
46+
endpoint=settings.azure_ai_foundry_inference_endpoint,
47+
credential=DefaultAzureCredential(),
48+
)
49+
models = project.get_openai_client(api_version=settings.azure_ai_foundry_inference_api_version)
50+
response = models.chat.completions.create(
51+
model=settings.azure_ai_foundry_inference_model_chat,
52+
messages=[
53+
{"role": "user", "content": query},
54+
],
55+
)
56+
logger.info(response.choices[0].message.content)
57+
58+
59+
@app.command()
60+
def chat_langchain(
61+
query: str = typer.Option(
62+
"Hello",
63+
"--query",
64+
"-q",
65+
help="The query to send to the AI",
66+
),
67+
verbose: bool = typer.Option(
68+
False,
69+
"--verbose",
70+
"-v",
71+
help="Enable verbose output",
72+
),
73+
):
74+
# Set up logging
75+
if verbose:
76+
logger.setLevel(logging.DEBUG)
77+
78+
logger.info("Running Azure AI Foundry chat...")
79+
# FIXME: impl
80+
81+
82+
if __name__ == "__main__":
83+
load_dotenv(
84+
override=True,
85+
verbose=True,
86+
)
87+
app()
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
from functools import lru_cache
2+
3+
from pydantic_settings import BaseSettings, SettingsConfigDict
4+
5+
6+
class Settings(BaseSettings):
7+
azure_ai_foundry_inference_endpoint: str = "https://xxx.services.ai.azure.com/api/projects/xxx"
8+
azure_ai_foundry_inference_credential: str = "<YOUR_CREDENTIAL>"
9+
azure_ai_foundry_inference_api_version: str = "2025-04-01-preview"
10+
azure_ai_foundry_inference_model_chat: str = "gpt-5"
11+
12+
model_config = SettingsConfigDict(
13+
env_file=".env",
14+
env_ignore_empty=True,
15+
extra="ignore",
16+
)
17+
18+
19+
@lru_cache
20+
def get_azure_ai_foundry_settings() -> Settings:
21+
return Settings()
22+
23+
24+
class AzureAiFoundryWrapper:
25+
def __init__(self, settings: Settings = None):
26+
if settings is None:
27+
settings = get_azure_ai_foundry_settings()
28+
self.settings = settings

0 commit comments

Comments
 (0)