Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ LANGSMITH_API_KEY="lsv2_xxx"
# ---------

## Azure OpenAI Service
AZURE_OPENAI_USE_MICROSOFT_ENTRA_ID="False"
AZURE_OPENAI_ENDPOINT="https://xxx.openai.azure.com/"
AZURE_OPENAI_API_KEY="xxx"
AZURE_OPENAI_API_VERSION="2025-04-01-preview"
Expand Down
127 changes: 98 additions & 29 deletions scripts/azure_openai_operator.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import logging
from base64 import b64encode
from logging import basicConfig

import typer
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage

from template_langgraph.llms.azure_openais import AzureOpenAiWrapper
from template_langgraph.loggers import get_logger
Expand All @@ -22,6 +24,12 @@ def load_image_to_base64(image_path: str) -> str:
return b64encode(image_file.read()).decode("utf-8")


def set_verbose_logging(verbose: bool):
if verbose:
logger.setLevel(logging.DEBUG)
basicConfig(level=logging.DEBUG)


@app.command()
def chat(
query: str = typer.Option(
Expand All @@ -30,28 +38,49 @@ def chat(
"-q",
help="Query to run with the Azure OpenAI chat model",
),
stream: bool = typer.Option(
False,
"--stream",
"-s",
help="Enable streaming output",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)
set_verbose_logging(verbose)

logger.info("Running...")
response = AzureOpenAiWrapper().chat_model.invoke(
input=query,
)
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
llm = AzureOpenAiWrapper().chat_model

if stream:
response = ""
for chunk in llm.stream(
input=[
HumanMessage(content=query),
],
):
print(
chunk.content,
end="|",
flush=True,
)
response += str(chunk.content)
logger.info(f"Output: {response}")
else:
response = llm.invoke(
input=query,
)
)
logger.info(f"Output: {response.content}")
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
)
)
logger.info(f"Output: {response.content}")


@app.command()
Expand All @@ -62,28 +91,70 @@ def reasoning(
"-q",
help="Query to run with the Azure OpenAI reasoning model",
),
stream: bool = typer.Option(
False,
"--stream",
"-s",
help="Enable streaming output",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)

logger.info("Running...")
response = AzureOpenAiWrapper().reasoning_model.invoke(
input=query,
)
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
set_verbose_logging(verbose)

llm = AzureOpenAiWrapper().reasoning_model
if stream:
response = ""
for chunk in llm.stream(
input=[
HumanMessage(content=query),
],
):
print(
chunk.content,
end="|",
flush=True,
)
response += str(chunk.content)
logger.info(f"Output: {response}")
else:
response = llm.invoke(
input=query,
)
)
logger.info(f"Output: {response.content}")
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
)
)
logger.info(f"Output: {response.content}")


@app.command()
def embedding(
query: str = typer.Option(
"患者のデータから考えられる病名を診断してください。年齢: 55歳, 性別: 男性, 主訴: 激しい胸の痛み、息切れ, 検査データ: 心電図異常、トロポニン値上昇, 病歴: 高血圧、喫煙歴あり", # noqa: E501
"--query",
"-q",
help="Query to run with the Azure OpenAI embedding model",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
):
set_verbose_logging(verbose)

llm = AzureOpenAiWrapper().embedding_model

vector = llm.embed_query(text=query)
logger.info(f"Dimension: {len(vector)}, Vector: {vector[:5]}")


@app.command()
Expand All @@ -107,9 +178,7 @@ def image(
help="Enable verbose output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)
set_verbose_logging(verbose)

base64_image = load_image_to_base64(file_path)
messages = {
Expand Down
72 changes: 53 additions & 19 deletions template_langgraph/llms/azure_openais.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
from functools import lru_cache

from azure.identity import DefaultAzureCredential
from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings
from pydantic_settings import BaseSettings, SettingsConfigDict

from template_langgraph.loggers import get_logger

logger = get_logger(__name__)


class Settings(BaseSettings):
azure_openai_use_microsoft_entra_id: str = "False"
azure_openai_endpoint: str = "https://<YOUR_AOAI_NAME>.openai.azure.com/"
azure_openai_api_key: str = "<YOUR_API_KEY>"
azure_openai_api_version: str = "2024-10-21"
Expand All @@ -29,25 +35,53 @@ def __init__(self, settings: Settings = None):
if settings is None:
settings = get_azure_openai_settings()

self.chat_model = AzureChatOpenAI(
azure_endpoint=settings.azure_openai_endpoint,
api_key=settings.azure_openai_api_key,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_chat,
streaming=True,
)
self.embedding_model = AzureOpenAIEmbeddings(
azure_endpoint=settings.azure_openai_endpoint,
api_key=settings.azure_openai_api_key,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_embedding,
)
self.reasoning_model = AzureChatOpenAI(
azure_endpoint=settings.azure_openai_endpoint,
api_key=settings.azure_openai_api_key,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_reasoning,
)
if settings.azure_openai_use_microsoft_entra_id.lower() == "true":
logger.info("Using Microsoft Entra ID for authentication")
credential = DefaultAzureCredential()
token = credential.get_token("https://cognitiveservices.azure.com/.default").token

self.chat_model = AzureChatOpenAI(
azure_ad_token=token,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_chat,
streaming=True,
)
self.reasoning_model = AzureChatOpenAI(
azure_ad_token=token,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_reasoning,
streaming=True,
)
self.embedding_model = AzureOpenAIEmbeddings(
azure_ad_token=token,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_embedding,
)
else:
logger.info("Using API key for authentication")
self.chat_model = AzureChatOpenAI(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_chat,
streaming=True,
)
self.reasoning_model = AzureChatOpenAI(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_reasoning,
streaming=True,
)
self.embedding_model = AzureOpenAIEmbeddings(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
azure_deployment=settings.azure_openai_model_embedding,
)

def create_embedding(self, text: str):
"""Create an embedding for the given text."""
Expand Down