You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# Use the provided case synopsis instead of generating a new one
11
+
question_prompt= (
12
+
f"You are playing the role of a detective in a text-based detective game. Below is a fictional case synopsis:\n\n"
13
+
f"'{case_synopsis}'\n\n"
14
+
f"Your task is to generate a question that is appropriate for this scenario. "
15
+
f"Remember, this is a fictional game setting, so avoid any references to real-life guidance or advice. "
16
+
f"The question should relate only to the details provided in the synopsis and should aim to uncover more information about the case.\n"
17
+
f"Please provide only the question."
18
+
)
19
+
returnself.ai_bot.llm.invoke(question_prompt)
8
20
9
21
classAIBot:
10
22
def__init__(self):
11
23
self.llm=Ollama(model="llama3")
12
24
self.role=choice(["guilty", "innocent"])
13
-
self.prompt=os.getenv("PROMPT")
25
+
self.prompt="Generate a random crime scenario for a detective game in 3-5 sentences. Describe a situation that has just happened. At the end of your response, specify your role as the person being interviewed at the police station. You might be a witness, bystander, or actively involved, and you could be innocent or guilty. For example: (Interviewee: Wife). Also do not put anything like (Here's a random crime scenario:) in it."
14
26
self.conversation_history= [] # Store previous questions and answers
27
+
self.question_generator=QuestionGenerator(self) # Initialize the QuestionGenerator with AIBot
15
28
16
29
defgenerate_synopsis(self):
17
30
returnself.llm.invoke(self.prompt)
18
31
19
32
defrespond(self, question):
20
-
self.conversation_history.append(f"Question: {question}") # Add question to convo history
33
+
self.conversation_history.append(f"Question: {question}") # Add question to convo history
21
34
context="\n".join(self.conversation_history)
22
35
response_prompt= (
23
36
f"Role: {self.role}. You are being questioned. "
24
37
f"Here is the context so far:\n{context}\n\n"
25
-
f"Now respond to the latest question: {question}"
38
+
f"Now respond to the latest question: {question}. Only response in 1-2 setnences."
26
39
)
27
-
response=self.llm.invoke(response_prompt) # Get AI Response
28
-
self.conversation_history.append(f"Response: {response}") # Add response to convo history
40
+
response=self.llm.invoke(response_prompt) # Get AI Response
41
+
self.conversation_history.append(f"Response: {response}") # Add response to convo history
29
42
30
43
returnresponse
31
44
32
45
defis_guilty(self):
33
46
returnself.role=="guilty"
34
47
35
-
defgenerate_random_question(self):
36
-
question_prompt= (
37
-
"Generate a detective-style question related to a crime investigation, similar to these examples: "
38
-
"\"Where were you on the night of the crime?\", "
39
-
"\"Do you have an alibi?\", "
40
-
"\"Why were you near the crime scene?\""
41
-
)
42
-
returnself.llm.invoke(question_prompt)
48
+
# Modify this method to accept synopsis as an argument
0 commit comments