33from langchain_openai import ChatOpenAI
44from langchain_core .prompts import ChatPromptTemplate
55from config import settings
6+ from agents import Agent , Runner , SQLiteSession , function_tool , set_default_openai_key
67
78db = {
89 "job_descriptions" : {
1617 }
1718}
1819
19-
20+ @ function_tool
2021def extract_skills (session_id : str , job_id : int ) -> list [str ]:
2122 """Given a job_id, lookup job descriptiona and extract the skills for that job description"""
2223 job_id = int (job_id )
@@ -25,7 +26,8 @@ def extract_skills(session_id: str, job_id: int) -> list[str]:
2526 db ["state" ][session_id ]["skills" ] = skills
2627 print (f"\n 📋 Extracted skills: { ', ' .join (skills )} " )
2728 return skills
28-
29+
30+ @function_tool
2931def update_evaluation (session_id : str , skill : str , evaluation_result : bool ) -> bool :
3032 """This function takes the session_id, skill, and the evaluation result and saves it to the database. Returns success or failure (bool)"""
3133 try :
@@ -37,18 +39,13 @@ def update_evaluation(session_id: str, skill: str, evaluation_result: bool) -> b
3739 except KeyError :
3840 return False
3941
42+ @function_tool
4043def transfer_to_skill_evaluator (session_id : str , skill : str ) -> bool :
4144 """This function takes a skill, evaluates it and returns the evaluation result for the skill as a boolean pass / fail"""
4245 result = True
4346 print (f"Evaluating skill: { skill } . Result { result } " )
4447 return result
4548
46- tools_mapping = {
47- "extract_skills" : extract_skills ,
48- "update_evaluation" : update_evaluation ,
49- "transfer_to_skill_evaluator" : transfer_to_skill_evaluator
50- }
51-
5249ORCHESTRATOR_SYSTEM_PROMPT = """
5350You are an interview orchestrator. Your goal is to evaluate the candidate on the required skills.
5451
@@ -61,48 +58,6 @@ def transfer_to_skill_evaluator(session_id: str, skill: str) -> bool:
61583. Then, for EACH skill in the list, use transfer_to_skill_evaluator tool to delegate evaluation
62594. Once you get the response, use the update_evaluation tool to save the evaluation result into the database
63605. Once all skills are evaluated, mention that the screening is complete and thank the candidate for their time
64-
65- # OUTPUT FORMAT
66-
67- Output as a JSON following the JSON schema below:
68-
69- ```
70- {{
71- "type": "object",
72- "properties": {{
73- "response": {{ "type": "string" }}
74- "tool_name": {{ "type": "string"}},
75- "tool_params": {{
76- "type": "array",
77- "items": {{
78- "type": "object",
79- "properties": {{
80- "param": {{ "type": "string" }},
81- "value": {{ "type": "string" }}
82- }}
83- }}
84- }}
85- }},
86- "required": []
87- }}
88-
89- Use the "tool_name" and "tool_params" properties to execute a tool and use the "response" property to reply to the user without a tool call.
90-
91- # TOOLS
92-
93- You have access to the following tools
94-
95- 1. `extract_skills(session_id: str, job_id: int) -> list[str]`
96-
97- Given a job_id, lookup job descriptiona and extract the skills for that job description
98-
99- 2. `update_evaluation(session_id: str, skill: str, evaluation_result: bool) -> bool`
100-
101- This function takes the session_id, skill, and the evaluation result and saves it to the database. Returns success or failure (bool)
102-
103- 3. `transfer_to_skill_evaluator(session_id, skill: str) -> bool`
104-
105- This function takes a skill, evaluates it and returns the evaluation result for the skill as a boolean pass / fail
10661"""
10762
10863ORCHESTRATOR_USER_PROMPT = """
@@ -115,33 +70,22 @@ def transfer_to_skill_evaluator(session_id: str, skill: str) -> bool:
11570"""
11671
11772def run_orchestrator_agent (session_id , job_id ):
118- llm = ChatOpenAI (model = "gpt-5.1" , temperature = 0 , api_key = settings .OPENAI_API_KEY )
119- messages = [
120- ("system" , ORCHESTRATOR_SYSTEM_PROMPT ),
121- ("human" , ORCHESTRATOR_USER_PROMPT ),
122- ]
123- user_reply = ""
124- while user_reply != "bye" :
125- orchestrator_prompt = ChatPromptTemplate .from_messages (messages )
126- orchestrator_chain = orchestrator_prompt | llm
127- output = orchestrator_chain .invoke ({"job_id" : job_id , "session_id" : session_id })
128- data = json .loads (output .content )
129- print (f"Output by LLM: { data } " )
130- if "response" in data :
131- print (data ["response" ])
132- if "tool_name" in data and data ["tool_name" ] != "" :
133- tool_name = data ["tool_name" ]
134- params = {param ["param" ]: param ["value" ] for param in data ["tool_params" ]}
135- tools = tools_mapping [tool_name ]
136- tool_output = tools (** params )
137- print (f"TOOL OUTPUT = { tool_output } " )
138- messages .append (("assistant" , output .content .replace ("{" , "{{" ).replace ("}" , "}}" )))
139- messages .append (("ai" , str (tool_output )))
140- else :
141- user_reply = input ("User: " )
142- messages .append (("human" , user_reply ))
73+ session = SQLiteSession (f"screening-{ session_id } " )
74+ agent = Agent (
75+ name = "Interview Orchestrator Agent" ,
76+ instructions = ORCHESTRATOR_SYSTEM_PROMPT ,
77+ model = "gpt-5.1" ,
78+ tools = [extract_skills , transfer_to_skill_evaluator , update_evaluation ]
79+ )
80+ user_input = ORCHESTRATOR_USER_PROMPT .format (job_id = job_id , session_id = session_id )
81+ while user_input != 'bye' :
82+ result = Runner .run_sync (agent , user_input , session = session )
83+ print (result .final_output )
84+ user_input = input ("User: " )
85+ return
14386
14487def main ():
88+ set_default_openai_key (settings .OPENAI_API_KEY )
14589 job_id = 1
14690 session_id = "session123"
14791 run_orchestrator_agent (session_id , job_id )
0 commit comments