3
3
# dependencies = [
4
4
# "openai",
5
5
# "pandas",
6
+ # "python-dotenv",
6
7
# ]
7
8
# ///
8
- from openai import OpenAI
9
9
import os
10
10
import ast
11
11
import argparse
12
12
from pathlib import Path
13
13
import pandas as pd
14
14
from typing import Self
15
+ from openai .types .responses .response_input_param import Message
15
16
17
+ from tenantfirstaid .chat import DEFAULT_INSTRUCTIONS , ChatManager
16
18
17
- BOT_INSTRUCTIONS = """Pretend you're a legal expert who giving advice about eviction notices in Oregon.
18
- Please give shorter answers.
19
- Please only ask one question at a time so that the user isn't confused.
20
- If the user is being evicted for non-payment of rent and they are too poor to pay the rent and you have confirmed in various ways that the notice is valid and there is a valid court hearing date, then tell them to call Oregon Law Center at 5131234567.
21
- Focus on finding technicalities that would legally prevent someone getting evicted, such as deficiencies in notice.
22
- Make sure to inclue a citation to the relevant law in your answer.
19
+ dot_env_path = Path (__file__ ).parent .parent .parent / ".env"
20
+ print (f"Loading environment variables from { dot_env_path } " )
21
+ if dot_env_path .exists ():
22
+ from dotenv import load_dotenv
23
23
24
- Only reference the laws below.
25
- Oregon Chapter 90 - Residential Landlord and Tenant
26
- Oregon Chapter 91 - Tenancy
27
- Oregon Chapter 105 - Property Rights
28
- Portland City Code Chapter 30.01 - Affordable Housing Preservation and Portland Renter Protections
29
- """
24
+ load_dotenv (dotenv_path = dot_env_path , override = True )
25
+
26
+ BOT_INSTRUCTIONS = DEFAULT_INSTRUCTIONS
30
27
31
28
USER_INSTRUCTIONS_BASE = """You are a user of the Oregon Tenant First Aid chatbot.
32
29
You are seeking legal advice about tenant rights in Oregon.
35
32
If the bot asks you a question, you should answer it to the best of your ability, if you do not know the answer you should make something up that is plausible.
36
33
"""
37
34
38
- API_KEY = os .getenv ("OPENAI_API_KEY" , os .getenv ("GITHUB_API_KEY" ))
39
- BASE_URL = os .getenv ("MODEL_ENDPOINT" , "https://api.openai.com/v1" )
40
-
41
- MODEL = os .getenv ("MODEL_NAME" , "o3" )
42
- MODEL_REASONING_EFFORT = os .getenv ("MODEL_REASONING_EFFORT" , "medium" )
43
35
USER_MODEL = os .getenv ("USER_MODEL_NAME" , "gpt-4o-2024-11-20" )
44
36
45
37
46
38
class ChatView :
47
39
client : Self
48
40
49
- def __init__ (self , starting_message , user_facts ):
50
- self .client = OpenAI (
51
- api_key = API_KEY ,
52
- base_url = BASE_URL ,
53
- )
54
- VECTOR_STORE_ID = os .getenv ("VECTOR_STORE_ID" )
55
- NUM_FILE_SEARCH_RESULTS = os .getenv ("NUM_FILE_SEARCH_RESULTS" , 10 )
56
- self .input_messages = [{"role" : "user" , "content" : starting_message }]
41
+ def __init__ (self , starting_message , user_facts , city , state ):
42
+ self .chat_manager = ChatManager ()
43
+ self .client = self .chat_manager .get_client ()
44
+ self .city = city
45
+ self .state = state
46
+
47
+ self .input_messages : list [Message ] = [
48
+ Message (role = "user" , content = starting_message )
49
+ ]
57
50
self .starting_message = starting_message # Store the starting message
58
51
59
52
self .openai_tools = []
60
53
self .USER_INSTRUCTIONS = (
61
54
USER_INSTRUCTIONS_BASE + "\n " + "Facts: " + "\n " .join (user_facts )
62
55
)
63
56
64
- if VECTOR_STORE_ID :
65
- self .openai_tools .append (
66
- {
67
- "type" : "file_search" ,
68
- "vector_store_ids" : [VECTOR_STORE_ID ],
69
- "max_num_results" : NUM_FILE_SEARCH_RESULTS ,
70
- }
71
- )
72
-
73
57
# Prompt iteration idea
74
58
# If the user starts off by saying something unclear, start off by asking me \"What are you here for?\"
75
59
def _reverse_message_roles (self , messages ):
@@ -78,11 +62,11 @@ def _reverse_message_roles(self, messages):
78
62
for message in messages :
79
63
if message ["role" ] == "user" :
80
64
reversed_messages .append (
81
- { " role" : " assistant" , " content" : message ["content" ]}
65
+ Message ( role = " assistant" , content = message ["content" ])
82
66
)
83
67
elif message ["role" ] == "assistant" :
84
68
reversed_messages .append (
85
- { " role" : " user" , " content" : message ["content" ]}
69
+ Message ( role = " user" , content = message ["content" ])
86
70
)
87
71
else :
88
72
reversed_messages .append (message )
@@ -93,16 +77,15 @@ def bot_response(self):
93
77
tries = 0
94
78
while tries < 3 :
95
79
try :
96
- response = self . client . responses . create (
97
- model = MODEL ,
98
- input = self .input_messages ,
99
- instructions = BOT_INSTRUCTIONS ,
100
- reasoning = { "effort" : MODEL_REASONING_EFFORT } ,
80
+ # Use the BOT_INSTRUCTIONS for bot responses
81
+ response = self . chat_manager . generate_chat_response (
82
+ self .input_messages ,
83
+ city = self . city ,
84
+ state = self . state ,
101
85
stream = False ,
102
- tools = self .openai_tools ,
103
86
)
104
87
self .input_messages .append (
105
- { " role" : " assistant" , " content" : response .output_text }
88
+ Message ( role = " assistant" , content = response .output_text )
106
89
)
107
90
self .input_messages = self ._reverse_message_roles (self .input_messages )
108
91
return response .output_text
@@ -111,7 +94,7 @@ def bot_response(self):
111
94
tries += 1
112
95
# If all attempts fail, return a failure message
113
96
failure_message = "I'm sorry, I am unable to generate a response at this time. Please try again later."
114
- self .input_messages .append ({ " role" : " assistant" , " content" : failure_message } )
97
+ self .input_messages .append (Message ( role = " assistant" , content = failure_message ) )
115
98
return failure_message
116
99
117
100
def user_response (self ):
@@ -127,15 +110,15 @@ def user_response(self):
127
110
stream = False ,
128
111
)
129
112
self .input_messages .append (
130
- { " role" : " user" , " content" : response .output_text }
113
+ Message ( role = " user" , content = response .output_text )
131
114
)
132
115
return response .output_text
133
116
except Exception as e :
134
117
print (f"Error generating user response: { e } " )
135
118
tries += 1
136
119
# If all attempts fail, return a failure message
137
120
failure_message = "I'm sorry, I am unable to generate a user response at this time. Please try again later."
138
- self .input_messages .append ({ " role" : " user" , " content" : failure_message } )
121
+ self .input_messages .append (Message ( role = " user" , content = failure_message ) )
139
122
return failure_message
140
123
141
124
def generate_conversation (self , num_turns = 5 ):
@@ -163,9 +146,12 @@ def process_conversation(row, num_turns=5):
163
146
164
147
# Convert string representation of list to actual list
165
148
facts = ast .literal_eval (row ["facts" ])
149
+ # if row["city"] is NaN, set it to "null"
150
+ if pd .isna (row ["city" ]):
151
+ row ["city" ] = "null"
166
152
167
153
# Create chat view with the starting question and facts
168
- chat = ChatView (row ["first_question" ], facts )
154
+ chat = ChatView (row ["first_question" ], facts , row [ "city" ], row [ "state" ] )
169
155
170
156
# Generate a new conversation
171
157
return chat .generate_conversation (num_turns )
0 commit comments