diff --git a/adalflow/adalflow/components/model_client/__init__.py b/adalflow/adalflow/components/model_client/__init__.py index ae508ece4..d29f75ada 100644 --- a/adalflow/adalflow/components/model_client/__init__.py +++ b/adalflow/adalflow/components/model_client/__init__.py @@ -65,6 +65,12 @@ OptionalPackages.OPENAI, ) +# Azure OpenAI Client +AzureAIClient = LazyImport( + "adalflow.components.model_client.azureai_client.AzureAIClient", + OptionalPackages.AZURE, +) + __all__ = [ "CohereAPIClient", "TransformerReranker", @@ -76,6 +82,7 @@ "GroqAPIClient", "OpenAIClient", "GoogleGenAIClient", + "AzureAIClient", ] for name in __all__: diff --git a/adalflow/adalflow/components/model_client/azureai_client.py b/adalflow/adalflow/components/model_client/azureai_client.py index 28ef0e057..0263177c0 100644 --- a/adalflow/adalflow/components/model_client/azureai_client.py +++ b/adalflow/adalflow/components/model_client/azureai_client.py @@ -129,67 +129,12 @@ class AzureAIClient(ModelClient): authentication. It is recommended to set environment variables for sensitive data like API keys. Args: - api_key (Optional[str]): Azure OpenAI API key. Default is None. - api_version (Optional[str]): API version to use. Default is None. - azure_endpoint (Optional[str]): Azure OpenAI endpoint URL. Default is None. - credential (Optional[DefaultAzureCredential]): Azure AD credential for token-based authentication. Default is None. - chat_completion_parser (Callable[[Completion], Any]): Function to parse chat completions. Default is `get_first_message_content`. - input_type (Literal["text", "messages"]): Format for input, either "text" or "messages". Default is "text". - - **Setup Instructions:** - - - **Using API Key:** - Set up the following environment variables: - ```bash - export AZURE_OPENAI_API_KEY="your_api_key" - export AZURE_OPENAI_ENDPOINT="your_endpoint" - export AZURE_OPENAI_VERSION="your_version" - ``` - - - **Using Azure AD Token:** - Ensure you have configured Azure AD credentials. The `DefaultAzureCredential` will automatically use your configured credentials. - - **Example Usage:** - - .. code-block:: python - - from azure.identity import DefaultAzureCredential - from your_module import AzureAIClient # Adjust import based on your module name - - # Initialize with API key - client = AzureAIClient( - api_key="your_api_key", - api_version="2023-05-15", - azure_endpoint="https://your-endpoint.openai.azure.com/" - ) - - # Or initialize with Azure AD token - client = AzureAIClient( - api_version="2023-05-15", - azure_endpoint="https://your-endpoint.openai.azure.com/", - credential=DefaultAzureCredential() - ) - - # Example call to the chat completion API - api_kwargs = { - "model": "gpt-3.5-turbo", - "messages": [{"role": "user", "content": "What is the meaning of life?"}], - "stream": True - } - response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM) - - for chunk in response: - print(chunk) - - - **Notes:** - - Ensure that the API key or credentials are correctly set up and accessible to avoid authentication errors. - - Use `chat_completion_parser` to define how to extract and handle the chat completion responses. - - The `input_type` parameter determines how input is formatted for the API call. - - **References:** - - [Azure OpenAI API Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview) - - [OpenAI API Documentation](https://platform.openai.com/docs/guides/text-generation) + api_key: Azure OpenAI API key. + api_version: Azure OpenAI API version. + azure_endpoint: Azure OpenAI endpoint. + credential: Azure AD credential for token-based authentication. + chat_completion_parser: Function to parse chat completions. + input_type: Input format, either "text" or "messages". """ def __init__( @@ -201,22 +146,11 @@ def __init__( chat_completion_parser: Callable[[Completion], Any] = None, input_type: Literal["text", "messages"] = "text", ): - r"""It is recommended to set the API_KEY into the environment variable instead of passing it as an argument. - - - Initializes the Azure OpenAI client with either API key or AAD token authentication. - - Args: - api_key: Azure OpenAI API key. - api_version: Azure OpenAI API version. - azure_endpoint: Azure OpenAI endpoint. - credential: Azure AD credential for token-based authentication. - chat_completion_parser: Function to parse chat completions. - input_type: Input format, either "text" or "messages". - - """ super().__init__() + # Model type will be set dynamically based on the operation + self._model_type = None + # added api_type azure for azure Ai self.api_type = "azure" self._api_key = api_key @@ -230,6 +164,16 @@ def __init__( ) self._input_type = input_type + @property + def model_type(self) -> ModelType: + """Get the current model type. Defaults to LLM if not set.""" + return self._model_type or ModelType.LLM + + @model_type.setter + def model_type(self, value: ModelType): + """Set the model type.""" + self._model_type = value + def init_sync_client(self): api_key = self._api_key or os.getenv("AZURE_OPENAI_API_KEY") azure_endpoint = self._azure_endpoint or os.getenv("AZURE_OPENAI_ENDPOINT") @@ -357,6 +301,10 @@ def convert_inputs_to_api_kwargs( """ final_model_kwargs = model_kwargs.copy() + # If model_type is UNDEFINED, use the current model_type property + if model_type == ModelType.UNDEFINED: + model_type = self.model_type + if model_type == ModelType.EMBEDDER: if isinstance(input, str): input = [input] @@ -383,14 +331,13 @@ def convert_inputs_to_api_kwargs( if match: system_prompt = match.group(1) input_str = match.group(2) - else: print("No match found.") if system_prompt and input_str: messages.append({"role": "system", "content": system_prompt}) messages.append({"role": "user", "content": input_str}) if len(messages) == 0: - messages.append({"role": "system", "content": input}) + messages.append({"role": "user", "content": input}) final_model_kwargs["messages"] = messages else: raise ValueError(f"model_type {model_type} is not supported") @@ -409,8 +356,13 @@ def convert_inputs_to_api_kwargs( ) def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED): """ - kwargs is the combined input and model_kwargs. Support streaming call. + kwargs is the combined input and model_kwargs. Support streaming call. + Also updates the internal model_type based on the operation. """ + # Update internal model type based on the operation + if model_type != ModelType.UNDEFINED: + self.model_type = model_type + log.info(f"api_kwargs: {api_kwargs}") if model_type == ModelType.EMBEDDER: return self.sync_client.embeddings.create(**api_kwargs) diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..081ab2056 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,89 @@ +# Azure AI Client Example + +This example demonstrates how to use the AdalFlow Azure AI client with both API key and Azure AD authentication methods. + +## Prerequisites + +1. Install the required packages: +```bash +pip install adalflow azure-identity python-dotenv +``` + +2. Set up your Azure OpenAI service and get the necessary credentials: + - API key authentication: Get your API key from the Azure portal + - Azure AD authentication: Set up an Azure AD application and get the client ID, tenant ID, and client secret + +3. Configure your environment variables by copying the `.env.example` file to `.env` and filling in your values: +```bash +cp .env.example .env +``` + +## Environment Variables + +Edit the `.env` file and fill in your values: + +```env +# Azure OpenAI API Configuration +AZURE_OPENAI_API_KEY="your_api_key_here" +AZURE_OPENAI_ENDPOINT="https://your-endpoint.openai.azure.com/" +AZURE_OPENAI_VERSION="2024-02-15-preview" + +# Azure AD Authentication (Optional - if using AAD auth) +AZURE_CLIENT_ID="your_client_id_here" +AZURE_TENANT_ID="your_tenant_id_here" +AZURE_CLIENT_SECRET="your_client_secret_here" + +# Azure Model Deployment +AZURE_MODEL_NAME="your_model_deployment_name" +AZURE_OPENAI_EMBEDDING_DEPLOYMENT="text-embedding-ada-002" +``` + +## Running the Example + +The example script demonstrates: +1. Chat completion with API key authentication +2. Streaming chat completion +3. Text embeddings +4. Chat completion with Azure AD authentication + +To run the example: + +```bash +python azure_client_example.py +``` + +## Features Demonstrated + +1. **Multiple Authentication Methods**: + - API key-based authentication + - Azure AD authentication using DefaultAzureCredential + +2. **Chat Completions**: + - Regular chat completion + - Streaming chat completion + - System and user message handling + +3. **Text Embeddings**: + - Generate embeddings for multiple texts + - Embedding dimension output + +## Example Output + +You should see output similar to this: + +``` +Testing with API key authentication: + +=== Testing Chat Completion === +[Response from the model about Paris tourist attractions] + +=== Testing Chat Completion (Streaming) === +[Streamed response from the model] + +=== Testing Embeddings === +Generated 2 embeddings +Embedding dimension: 1536 + +Testing with Azure AD authentication: +[Response from the model using AAD authentication] +``` diff --git a/examples/azure_client_example.py b/examples/azure_client_example.py new file mode 100644 index 000000000..dc5b7472e --- /dev/null +++ b/examples/azure_client_example.py @@ -0,0 +1,215 @@ +""" +Azure AI Client Example for AdalFlow +================================== + +This example demonstrates how to use the AdalFlow Azure AI client with both API key +and Azure AD authentication methods. It shows various features including chat completions, +streaming responses, and text embeddings. + +Setup Instructions: +----------------- +1. Install dependencies using Poetry: + ```bash + poetry install + ``` + +2. Configure environment variables: + Create a .env file with the following variables: + ``` + # Azure OpenAI API Configuration + AZURE_OPENAI_API_KEY="2r27KgyhYBPHA2h1tR51BUZegp64jNRm40QW7O0xyqQRu1q6EGGsJQQJ99BAACfhMk5XJ3w3AAAAACOGuKK6" + AZURE_OPENAI_ENDPOINT="https://filip-m67wzyeo-swedencentral.cognitiveservices.azure.com/" + AZURE_OPENAI_VERSION="2024-02-15-preview" + + # Model Deployments + AZURE_OPENAI_DEPLOYMENT="gpt-4o-mini" + AZURE_OPENAI_EMBEDDING_DEPLOYMENT="text-embedding-ada-002-2" + ``` + +3. Run the example: + ```bash + poetry run python examples/azure_client_example.py + ``` + +Features Demonstrated: +-------------------- +1. Multiple Authentication Methods: + - API key-based authentication + - Azure AD authentication using DefaultAzureCredential + +2. Chat Completions: + - Regular chat completion + - Streaming chat completion + - System and user message handling + +3. Text Embeddings: + - Generate embeddings for multiple texts + - Embedding dimension output +""" + +import os +import asyncio +from dotenv import load_dotenv +from azure.identity import DefaultAzureCredential + +from adalflow.components.model_client.azureai_client import AzureAIClient +from adalflow.core import Generator, Embedder +from adalflow.core.types import ModelType +from adalflow.utils import setup_env, get_logger + +# Setup logging +log = get_logger(level="DEBUG") + + +def init_azure_client(use_aad: bool = False) -> AzureAIClient: + """Initialize Azure client with either API key or AAD authentication. + + Args: + use_aad (bool): If True, uses Azure AD authentication. If False, uses API key. + + Returns: + AzureAIClient: Configured client instance + """ + if use_aad: + # Using Azure AD authentication + credential = DefaultAzureCredential() + return AzureAIClient( + api_version=os.getenv("AZURE_OPENAI_VERSION"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + credential=credential, + ) + else: + # Using API key authentication + return AzureAIClient( + api_key=os.getenv("AZURE_OPENAI_API_KEY"), + api_version=os.getenv("AZURE_OPENAI_VERSION"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + ) + + +async def test_chat_completion(client: AzureAIClient, stream: bool = False): + """Test chat completion with optional streaming.""" + print("\n=== Testing Chat Completion ===") + + # Set model type for chat completion + client.model_type = ModelType.LLM + + deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT") + print(f"Using deployment: {deployment_name}") + + # Initialize Generator with the Azure client + generator = Generator( + model_client=client, + model_kwargs={ + "model": deployment_name, + "stream": stream, + "temperature": 0.7, + "max_tokens": 500, + }, + ) + + # Example system and user prompts + system_prompt = "You are a helpful assistant." + user_prompt = "What are the top 3 tourist attractions in Paris?" + + # Format the input for the generator + input_text = f"{system_prompt}{user_prompt}" + + try: + # Generate response using acall + response = await generator.acall(prompt_kwargs={"input_str": input_text}) + + if response.error: + print(f"Error: {response.error}") + return + + if stream and response.data: + async for chunk in response.data: + print(chunk, end="", flush=True) + print("\n") + else: + print(response.raw_response) + except Exception as e: + print(f"Error during chat completion: {str(e)}") + + +async def test_embeddings(client: AzureAIClient): + """Test text embeddings functionality.""" + print("\n=== Testing Embeddings ===") + + # Set model type for embeddings + client.model_type = ModelType.EMBEDDER + + deployment_name = os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT") + print(f"Using embedding deployment: {deployment_name}") + + # Initialize Embedder with the Azure client + embedder = Embedder( + model_client=client, + model_kwargs={ + "model": deployment_name, + }, + ) + + # Example texts to embed + texts = [ + "The quick brown fox jumps over the lazy dog", + "Paris is the capital of France", + ] + + try: + # Generate embeddings + embeddings = await embedder.acall(input=texts) + if embeddings.error: + print(f"Error: {embeddings.error}") + return + + print(f"Generated {len(embeddings.data)} embeddings") + print(f"Embedding dimension: {len(embeddings.data[0].embedding)}") + except Exception as e: + print(f"Error during embedding generation: {str(e)}") + + +async def main(): + """Main function demonstrating all features.""" + # Load environment variables + load_dotenv() + setup_env() + + # Print available deployments + print("\nEnvironment Configuration:") + print(f"Endpoint: {os.getenv('AZURE_OPENAI_ENDPOINT')}") + print(f"Chat Model Deployment: {os.getenv('AZURE_OPENAI_DEPLOYMENT')}") + print( + f"Embedding Model Deployment: {os.getenv('AZURE_OPENAI_EMBEDDING_DEPLOYMENT')}" + ) + + # Test with API key authentication + print("\nTesting with API key authentication:") + client = init_azure_client(use_aad=False) + + try: + # Test regular chat completion (no streaming) + print("\nTesting Chat Completion:") + await test_chat_completion(client, stream=False) + + # Test embeddings + print("\nTesting Embeddings:") + await test_embeddings(client) + + print("\nAll tests completed successfully!") + + except Exception as e: + print(f"\nError during testing: {str(e)}") + if "429" in str(e): + print( + "Hit rate limit - the basic functionality works, but we're being throttled." + ) + elif "DeploymentNotFound" in str(e): + print( + "Check if your deployment names in .env match exactly with what's in Azure Portal." + ) + + +if __name__ == "__main__": + asyncio.run(main())