1+ import asyncio
2+ import json
3+ from langchain_openai import ChatOpenAI
4+ from langchain_core .prompts import ChatPromptTemplate
5+ from config import settings
6+
7+ db = {
8+ "job_descriptions" : {
9+ 1 : "I need an AI Engineer who knows langchain"
10+ },
11+ "state" : {
12+ "session123" : {
13+ "skills" : [],
14+ "evaluation" : []
15+ }
16+ }
17+ }
18+
19+
20+ def extract_skills (session_id : str , job_id : int ) -> list [str ]:
21+ """Given a job_id, lookup job descriptiona and extract the skills for that job description"""
22+ job_id = int (job_id )
23+ job_description = db ["job_descriptions" ][job_id ]
24+ skills = ["Python" , "SQL" , "System Design" ]
25+ db ["state" ][session_id ]["skills" ] = skills
26+ print (f"\n 📋 Extracted skills: { ', ' .join (skills )} " )
27+ return skills
28+
29+ def update_evaluation (session_id : str , skill : str , evaluation_result : bool ) -> bool :
30+ """This function takes the session_id, skill, and the evaluation result and saves it to the database. Returns success or failure (bool)"""
31+ try :
32+ print (f"Saving to DB: { skill } - { evaluation_result } " )
33+ if isinstance (evaluation_result , str ):
34+ evaluation_result = True if evaluation_result == "True" else False
35+ db ["state" ][session_id ]["evaluation" ]. append ((skill , evaluation_result ))
36+ return True
37+ except KeyError :
38+ return False
39+
40+ def transfer_to_skill_evaluator (session_id : str , skill : str ) -> bool :
41+ """This function takes a skill, evaluates it and returns the evaluation result for the skill as a boolean pass / fail"""
42+ result = True
43+ print (f"Evaluating skill: { skill } . Result { result } " )
44+ return result
45+
46+ tools_mapping = {
47+ "extract_skills" : extract_skills ,
48+ "update_evaluation" : update_evaluation ,
49+ "transfer_to_skill_evaluator" : transfer_to_skill_evaluator
50+ }
51+
52+ ORCHESTRATOR_SYSTEM_PROMPT = """
53+ You are an interview orchestrator. Your goal is to evaluate the candidate on the required skills.
54+
55+ # INSTRUCTIONS
56+
57+ Follow the following steps exactly
58+
59+ 1. Extract key skills from the job description using extract_skills tool
60+ 2. Then welcome the candidate, explain the screening process and ask the candidate if they are ready
61+ 3. Then, for EACH skill in the list, use transfer_to_skill_evaluator tool to delegate evaluation
62+ 4. Once you get the response, use the update_evaluation tool to save the evaluation result into the database
63+ 5. Once all skills are evaluated, mention that the screening is complete and thank the candidate for their time
64+
65+ # OUTPUT FORMAT
66+
67+ Output as a JSON following the JSON schema below:
68+
69+ ```
70+ {{
71+ "type": "object",
72+ "properties": {{
73+ "response": {{ "type": "string" }}
74+ "tool_name": {{ "type": "string"}},
75+ "tool_params": {{
76+ "type": "array",
77+ "items": {{
78+ "type": "object",
79+ "properties": {{
80+ "param": {{ "type": "string" }},
81+ "value": {{ "type": "string" }}
82+ }}
83+ }}
84+ }}
85+ }},
86+ "required": []
87+ }}
88+
89+ Use the "tool_name" and "tool_params" properties to execute a tool and use the "response" property to reply to the user without a tool call.
90+
91+ # TOOLS
92+
93+ You have access to the following tools
94+
95+ 1. `extract_skills(session_id: str, job_id: int) -> list[str]`
96+
97+ Given a job_id, lookup job descriptiona and extract the skills for that job description
98+
99+ 2. `update_evaluation(session_id: str, skill: str, evaluation_result: bool) -> bool`
100+
101+ This function takes the session_id, skill, and the evaluation result and saves it to the database. Returns success or failure (bool)
102+
103+ 3. `transfer_to_skill_evaluator(session_id, skill: str) -> bool`
104+
105+ This function takes a skill, evaluates it and returns the evaluation result for the skill as a boolean pass / fail
106+ """
107+
108+ ORCHESTRATOR_USER_PROMPT = """
109+ Start an interview for the following values:
110+
111+ session_id: {session_id}
112+ job_id: {job_id}
113+
114+ Begin by welcoming the applicant, extracting the key skills, then evaluate each one.
115+ """
116+
117+ def run_orchestrator_agent (session_id , job_id ):
118+ llm = ChatOpenAI (model = "gpt-5.1" , temperature = 0 , api_key = settings .OPENAI_API_KEY )
119+ messages = [
120+ ("system" , ORCHESTRATOR_SYSTEM_PROMPT ),
121+ ("human" , ORCHESTRATOR_USER_PROMPT ),
122+ ]
123+ user_reply = ""
124+ while user_reply != "bye" :
125+ orchestrator_prompt = ChatPromptTemplate .from_messages (messages )
126+ orchestrator_chain = orchestrator_prompt | llm
127+ output = orchestrator_chain .invoke ({"job_id" : job_id , "session_id" : session_id })
128+ data = json .loads (output .content )
129+ print (f"Output by LLM: { data } " )
130+ if "response" in data :
131+ print (data ["response" ])
132+ if "tool_name" in data and data ["tool_name" ] != "" :
133+ tool_name = data ["tool_name" ]
134+ params = {param ["param" ]: param ["value" ] for param in data ["tool_params" ]}
135+ tools = tools_mapping [tool_name ]
136+ tool_output = tools (** params )
137+ print (f"TOOL OUTPUT = { tool_output } " )
138+ messages .append (("assistant" , output .content .replace ("{" , "{{" ).replace ("}" , "}}" )))
139+ messages .append (("ai" , str (tool_output )))
140+ else :
141+ user_reply = input ("User: " )
142+ messages .append (("human" , user_reply ))
143+
144+ def main ():
145+ job_id = 1
146+ session_id = "session123"
147+ run_orchestrator_agent (session_id , job_id )
148+ print (f"FINAL EVALUATION STATUS: { db ['state' ][session_id ]} " )
149+
150+ if __name__ == "__main__" :
151+ main ()
0 commit comments