Skip to content

Commit cb8835c

Browse files
authored
Merge pull request #102 from ks6088ts-labs/feature/issue-83_support-entra-id
add Azure OpenAI config for Microsoft Entra ID
2 parents 62558a4 + 959c7e9 commit cb8835c

File tree

3 files changed

+152
-48
lines changed

3 files changed

+152
-48
lines changed

.env.template

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ LANGSMITH_API_KEY="lsv2_xxx"
1313
# ---------
1414

1515
## Azure OpenAI Service
16+
AZURE_OPENAI_USE_MICROSOFT_ENTRA_ID="False"
1617
AZURE_OPENAI_ENDPOINT="https://xxx.openai.azure.com/"
1718
AZURE_OPENAI_API_KEY="xxx"
1819
AZURE_OPENAI_API_VERSION="2025-04-01-preview"

scripts/azure_openai_operator.py

Lines changed: 98 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,10 @@
11
import logging
22
from base64 import b64encode
3+
from logging import basicConfig
34

45
import typer
56
from dotenv import load_dotenv
7+
from langchain_core.messages import HumanMessage
68

79
from template_langgraph.llms.azure_openais import AzureOpenAiWrapper
810
from template_langgraph.loggers import get_logger
@@ -22,6 +24,12 @@ def load_image_to_base64(image_path: str) -> str:
2224
return b64encode(image_file.read()).decode("utf-8")
2325

2426

27+
def set_verbose_logging(verbose: bool):
28+
if verbose:
29+
logger.setLevel(logging.DEBUG)
30+
basicConfig(level=logging.DEBUG)
31+
32+
2533
@app.command()
2634
def chat(
2735
query: str = typer.Option(
@@ -30,28 +38,49 @@ def chat(
3038
"-q",
3139
help="Query to run with the Azure OpenAI chat model",
3240
),
41+
stream: bool = typer.Option(
42+
False,
43+
"--stream",
44+
"-s",
45+
help="Enable streaming output",
46+
),
3347
verbose: bool = typer.Option(
3448
False,
3549
"--verbose",
3650
"-v",
3751
help="Enable verbose output",
3852
),
3953
):
40-
# Set up logging
41-
if verbose:
42-
logger.setLevel(logging.DEBUG)
54+
set_verbose_logging(verbose)
4355

4456
logger.info("Running...")
45-
response = AzureOpenAiWrapper().chat_model.invoke(
46-
input=query,
47-
)
48-
logger.debug(
49-
response.model_dump_json(
50-
indent=2,
51-
exclude_none=True,
57+
llm = AzureOpenAiWrapper().chat_model
58+
59+
if stream:
60+
response = ""
61+
for chunk in llm.stream(
62+
input=[
63+
HumanMessage(content=query),
64+
],
65+
):
66+
print(
67+
chunk.content,
68+
end="|",
69+
flush=True,
70+
)
71+
response += str(chunk.content)
72+
logger.info(f"Output: {response}")
73+
else:
74+
response = llm.invoke(
75+
input=query,
5276
)
53-
)
54-
logger.info(f"Output: {response.content}")
77+
logger.debug(
78+
response.model_dump_json(
79+
indent=2,
80+
exclude_none=True,
81+
)
82+
)
83+
logger.info(f"Output: {response.content}")
5584

5685

5786
@app.command()
@@ -62,28 +91,70 @@ def reasoning(
6291
"-q",
6392
help="Query to run with the Azure OpenAI reasoning model",
6493
),
94+
stream: bool = typer.Option(
95+
False,
96+
"--stream",
97+
"-s",
98+
help="Enable streaming output",
99+
),
65100
verbose: bool = typer.Option(
66101
False,
67102
"--verbose",
68103
"-v",
69104
help="Enable verbose output",
70105
),
71106
):
72-
# Set up logging
73-
if verbose:
74-
logger.setLevel(logging.DEBUG)
75-
76-
logger.info("Running...")
77-
response = AzureOpenAiWrapper().reasoning_model.invoke(
78-
input=query,
79-
)
80-
logger.debug(
81-
response.model_dump_json(
82-
indent=2,
83-
exclude_none=True,
107+
set_verbose_logging(verbose)
108+
109+
llm = AzureOpenAiWrapper().reasoning_model
110+
if stream:
111+
response = ""
112+
for chunk in llm.stream(
113+
input=[
114+
HumanMessage(content=query),
115+
],
116+
):
117+
print(
118+
chunk.content,
119+
end="|",
120+
flush=True,
121+
)
122+
response += str(chunk.content)
123+
logger.info(f"Output: {response}")
124+
else:
125+
response = llm.invoke(
126+
input=query,
84127
)
85-
)
86-
logger.info(f"Output: {response.content}")
128+
logger.debug(
129+
response.model_dump_json(
130+
indent=2,
131+
exclude_none=True,
132+
)
133+
)
134+
logger.info(f"Output: {response.content}")
135+
136+
137+
@app.command()
138+
def embedding(
139+
query: str = typer.Option(
140+
"患者のデータから考えられる病名を診断してください。年齢: 55歳, 性別: 男性, 主訴: 激しい胸の痛み、息切れ, 検査データ: 心電図異常、トロポニン値上昇, 病歴: 高血圧、喫煙歴あり", # noqa: E501
141+
"--query",
142+
"-q",
143+
help="Query to run with the Azure OpenAI embedding model",
144+
),
145+
verbose: bool = typer.Option(
146+
False,
147+
"--verbose",
148+
"-v",
149+
help="Enable verbose output",
150+
),
151+
):
152+
set_verbose_logging(verbose)
153+
154+
llm = AzureOpenAiWrapper().embedding_model
155+
156+
vector = llm.embed_query(text=query)
157+
logger.info(f"Dimension: {len(vector)}, Vector: {vector[:5]}")
87158

88159

89160
@app.command()
@@ -107,9 +178,7 @@ def image(
107178
help="Enable verbose output",
108179
),
109180
):
110-
# Set up logging
111-
if verbose:
112-
logger.setLevel(logging.DEBUG)
181+
set_verbose_logging(verbose)
113182

114183
base64_image = load_image_to_base64(file_path)
115184
messages = {

template_langgraph/llms/azure_openais.py

Lines changed: 53 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,16 @@
11
from functools import lru_cache
22

3+
from azure.identity import DefaultAzureCredential
34
from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings
45
from pydantic_settings import BaseSettings, SettingsConfigDict
56

7+
from template_langgraph.loggers import get_logger
8+
9+
logger = get_logger(__name__)
10+
611

712
class Settings(BaseSettings):
13+
azure_openai_use_microsoft_entra_id: str = "False"
814
azure_openai_endpoint: str = "https://<YOUR_AOAI_NAME>.openai.azure.com/"
915
azure_openai_api_key: str = "<YOUR_API_KEY>"
1016
azure_openai_api_version: str = "2024-10-21"
@@ -29,25 +35,53 @@ def __init__(self, settings: Settings = None):
2935
if settings is None:
3036
settings = get_azure_openai_settings()
3137

32-
self.chat_model = AzureChatOpenAI(
33-
azure_endpoint=settings.azure_openai_endpoint,
34-
api_key=settings.azure_openai_api_key,
35-
api_version=settings.azure_openai_api_version,
36-
azure_deployment=settings.azure_openai_model_chat,
37-
streaming=True,
38-
)
39-
self.embedding_model = AzureOpenAIEmbeddings(
40-
azure_endpoint=settings.azure_openai_endpoint,
41-
api_key=settings.azure_openai_api_key,
42-
api_version=settings.azure_openai_api_version,
43-
azure_deployment=settings.azure_openai_model_embedding,
44-
)
45-
self.reasoning_model = AzureChatOpenAI(
46-
azure_endpoint=settings.azure_openai_endpoint,
47-
api_key=settings.azure_openai_api_key,
48-
api_version=settings.azure_openai_api_version,
49-
azure_deployment=settings.azure_openai_model_reasoning,
50-
)
38+
if settings.azure_openai_use_microsoft_entra_id.lower() == "true":
39+
logger.info("Using Microsoft Entra ID for authentication")
40+
credential = DefaultAzureCredential()
41+
token = credential.get_token("https://cognitiveservices.azure.com/.default").token
42+
43+
self.chat_model = AzureChatOpenAI(
44+
azure_ad_token=token,
45+
azure_endpoint=settings.azure_openai_endpoint,
46+
api_version=settings.azure_openai_api_version,
47+
azure_deployment=settings.azure_openai_model_chat,
48+
streaming=True,
49+
)
50+
self.reasoning_model = AzureChatOpenAI(
51+
azure_ad_token=token,
52+
azure_endpoint=settings.azure_openai_endpoint,
53+
api_version=settings.azure_openai_api_version,
54+
azure_deployment=settings.azure_openai_model_reasoning,
55+
streaming=True,
56+
)
57+
self.embedding_model = AzureOpenAIEmbeddings(
58+
azure_ad_token=token,
59+
azure_endpoint=settings.azure_openai_endpoint,
60+
api_version=settings.azure_openai_api_version,
61+
azure_deployment=settings.azure_openai_model_embedding,
62+
)
63+
else:
64+
logger.info("Using API key for authentication")
65+
self.chat_model = AzureChatOpenAI(
66+
api_key=settings.azure_openai_api_key,
67+
azure_endpoint=settings.azure_openai_endpoint,
68+
api_version=settings.azure_openai_api_version,
69+
azure_deployment=settings.azure_openai_model_chat,
70+
streaming=True,
71+
)
72+
self.reasoning_model = AzureChatOpenAI(
73+
api_key=settings.azure_openai_api_key,
74+
azure_endpoint=settings.azure_openai_endpoint,
75+
api_version=settings.azure_openai_api_version,
76+
azure_deployment=settings.azure_openai_model_reasoning,
77+
streaming=True,
78+
)
79+
self.embedding_model = AzureOpenAIEmbeddings(
80+
api_key=settings.azure_openai_api_key,
81+
azure_endpoint=settings.azure_openai_endpoint,
82+
api_version=settings.azure_openai_api_version,
83+
azure_deployment=settings.azure_openai_model_embedding,
84+
)
5185

5286
def create_embedding(self, text: str):
5387
"""Create an embedding for the given text."""

0 commit comments

Comments
 (0)