11from  templates .common .suffix  import  suffix 
22from  templates .common .format_instructions  import  format_instructions 
33from  templates .common .docs_system_instructions  import  docs_system_instructions 
4- from  langchain .schema  import  (
5-     # AIMessage, 
6-     HumanMessage ,
7-     SystemMessage 
8- )
9- from  langchain .tools .json .tool  import  JsonSpec 
10- from  langchain .agents .agent_toolkits .json .toolkit  import  JsonToolkit 
11- from  langchain .chat_models  import  ChatOpenAI , AzureChatOpenAI 
12- from  langchain .llms .openai  import  OpenAI 
13- from  langchain .agents  import  create_json_agent , ZeroShotAgent , AgentExecutor 
4+ from  langchain .schema  import  HumanMessage 
5+ from  langchain .agents .react .agent  import  create_react_agent 
6+ from  langchain_community .agent_toolkits  import  JsonToolkit , create_json_agent 
7+ from  langchain_community .tools .json .tool  import  JsonSpec 
8+ 
9+ import  openai 
10+ from  langchain_openai .chat_models .base  import  ChatOpenAI 
11+ from  langchain .agents  import  ZeroShotAgent , AgentExecutor 
1412from  langchain .chains  import  LLMChain 
1513from  config .config  import  config 
16- import  openai   # required 
1714from  dotenv  import  load_dotenv 
1815load_dotenv ()
1916
@@ -32,22 +29,15 @@ def __init__(self, docs, templates, auth_example, parsed_common_files):
3229        system_instructions  =  format_template (
3330            f"{ templates .system_instructions (auth_example , parsed_common_files )} \n { docs_system_instructions }  )
3431
32+         model  =  ChatOpenAI (model_name = config ['openai' ]['model' ])
3533        tools  =  OpenAPIExplorerTool .create_tools (docs )
36-         tool_names  =  [tool .name  for  tool  in  tools ]
37- 
38-         prompt_template  =  ZeroShotAgent .create_prompt (
39-             tools = tools ,
40-             prefix = system_instructions ,
41-             suffix = suffix ,
42-             format_instructions = format_instructions ,
43-             input_variables = ['input' , 'agent_scratchpad' ]
44-         )
45- 
46-         llm_chain  =  LLMChain (llm = get_llm (), prompt = prompt_template )
47-         agent  =  ZeroShotAgent (llm_chain = llm_chain , allowed_tools = tool_names )
48-         verbose  =  True  if  config ['logging' ]['level' ] ==  'DEBUG'  else  False 
4934
50-         self .agent_executor  =  AgentExecutor .from_agent_and_tools (
35+         # o1-preview doesn't support system instruction, so we just concatenate into the prompt 
36+         prompt  =  f"{ system_instructions } \n \n { format_instructions }  
37+ 
38+         agent  =  create_react_agent (model , tools , prompt )
39+         verbose  =  True  if  config ['logging' ]['level' ] ==  'DEBUG'  else  False 
40+         self .agent_executor  =  AgentExecutor (
5141            agent = agent , tools = tools , verbose = verbose )
5242
5343    def  run (self , input ):
@@ -87,15 +77,9 @@ def create_user_prompt(prompt, urls_content):
8777
8878
8979def  get_llm ():
90-     if  config ['openai_api_type' ] ==  "azure" :
91-         azure_config  =  config ["azure" ]
92-         return  AzureChatOpenAI (deployment_name = azure_config ['deployment_name' ],
93-                                model_name = azure_config ["model" ], temperature = config ["temperature" ], request_timeout = 300 )
94-     else :
95-         openai_config  =  config ["openai" ]
96-         print (f"Using OpenAI API: { openai_config ['model' ]}  )
97-         return  ChatOpenAI (
98-             model_name = openai_config ["model" ], temperature = config ["temperature" ])
80+     openai_config  =  config ["openai" ]
81+     print (f"Using OpenAI API: { openai_config ['model' ]}  )
82+     return  ChatOpenAI (model_name = openai_config ["model" ], temperature = 1 )
9983
10084
10185def  ask_agent (prompt , docs , templates , auth_example , parsed_common_files , urls_content ):
@@ -111,8 +95,7 @@ def no_docs(prompt, templates, auth_example, parsed_common_files, urls_content,
11195    pd_instructions  =  format_template (
11296        templates .system_instructions (auth_example , parsed_common_files ))
11397
114-     result  =  get_llm ()(messages = [
115-         SystemMessage (content = "You are the most intelligent software engineer in the world. You carefully provide accurate, factual, thoughtful, nuanced code, and are brilliant at reasoning. Follow all of the instructions below — they are all incredibly important. This code will be shipped directly to production, so it's important that it's accurate and complete." ),
98+     result  =  get_llm ().invoke ([
11699        HumanMessage (content = user_prompt  + 
117100                     pd_instructions  if  normal_order  else  pd_instructions + user_prompt ),
118101    ])
0 commit comments