1818import logging
1919import os
2020
21- from azure .identity import DefaultAzureCredential
22- from azure .identity .aio import get_bearer_token_provider
21+ from azure .identity .aio import DefaultAzureCredential , get_bearer_token_provider
2322from dotenv import load_dotenv
2423from openai import AsyncOpenAI
2524from pydantic import BaseModel , Field
4039load_dotenv (override = True )
4140API_HOST = os .getenv ("API_HOST" , "github" )
4241
43-
42+ async_credential = None
4443if API_HOST == "azure" :
45- token_provider = get_bearer_token_provider (DefaultAzureCredential (), "https://cognitiveservices.azure.com/.default" )
44+ async_credential = DefaultAzureCredential ()
45+ token_provider = get_bearer_token_provider (async_credential , "https://cognitiveservices.azure.com/.default" )
4646 client = AsyncOpenAI (
4747 base_url = os .environ ["AZURE_OPENAI_ENDPOINT" ] + "/openai/v1" ,
4848 api_key = token_provider ,
4949 )
50- model = OpenAIChatModel (
51- os .environ ["AZURE_OPENAI_CHAT_DEPLOYMENT" ],
52- provider = OpenAIProvider (openai_client = client ),
53- )
50+ model = OpenAIChatModel (os .environ ["AZURE_OPENAI_CHAT_DEPLOYMENT" ], provider = OpenAIProvider (openai_client = client ))
5451elif API_HOST == "github" :
5552 client = AsyncOpenAI (api_key = os .environ ["GITHUB_TOKEN" ], base_url = "https://models.inference.ai.azure.com" )
56- model = OpenAIChatModel (os .environ . get ("GITHUB_MODEL" , "gpt-4o-mini " ), provider = OpenAIProvider (openai_client = client ))
53+ model = OpenAIChatModel (os .getenv ("GITHUB_MODEL" , "gpt-4o" ), provider = OpenAIProvider (openai_client = client ))
5754elif API_HOST == "ollama" :
58- client = AsyncOpenAI (base_url = os .environ [ "OLLAMA_ENDPOINT" ] , api_key = "none" )
55+ client = AsyncOpenAI (base_url = os .environ . get ( "OLLAMA_ENDPOINT" , "http://localhost:11434/v1" ) , api_key = "none" )
5956 model = OpenAIChatModel (os .environ ["OLLAMA_MODEL" ], provider = OpenAIProvider (openai_client = client ))
6057else :
61- client = AsyncOpenAI ()
62- model = OpenAIChatModel (os .environ .get ("OPENAI_MODEL" , "gpt-4o-mini " ), provider = OpenAIProvider (openai_client = client ))
58+ client = AsyncOpenAI (api_key = os . environ [ "OPENAI_API_KEY" ] )
59+ model = OpenAIChatModel (os .environ .get ("OPENAI_MODEL" , "gpt-4o" ), provider = OpenAIProvider (openai_client = client ))
6360
6461
6562class IssueProposal (BaseModel ):
@@ -73,13 +70,18 @@ class IssueProposal(BaseModel):
7370
7471
7572async def main ():
76- server = MCPServerStreamableHTTP (url = "https://api.githubcopilot.com/mcp/" , headers = {"Authorization" : f"Bearer { os .getenv ('GITHUB_TOKEN' , '' )} " })
73+ server = MCPServerStreamableHTTP (
74+ url = "https://api.githubcopilot.com/mcp/" , headers = {"Authorization" : f"Bearer { os .getenv ('GITHUB_TOKEN' , '' )} " }
75+ )
7776 desired_tool_names = ("list_issues" , "search_code" , "search_issues" , "search_pull_requests" )
7877 filtered_tools = server .filtered (lambda ctx , tool_def : tool_def .name in desired_tool_names )
7978
8079 agent : Agent [None , IssueProposal ] = Agent (
8180 model ,
82- system_prompt = ("You are an issue triage assistant. Use the provided tools to find an issue that can be closed " "and produce an IssueProposal." ),
81+ system_prompt = (
82+ "You are an issue triage assistant. Use the provided tools to find an issue that can be closed "
83+ "and produce an IssueProposal."
84+ ),
8385 output_type = IssueProposal ,
8486 toolsets = [filtered_tools ],
8587 )
@@ -96,6 +98,9 @@ async def main():
9698
9799 print (agent_run .result .output )
98100
101+ if async_credential :
102+ await async_credential .close ()
103+
99104
100105if __name__ == "__main__" :
101106 logger .setLevel (logging .INFO )
0 commit comments