Skip to content

Commit 5e539fe

Browse files
authored
Merge pull request #121 from apkostka/feat/chat-refactor
chat module refactor
2 parents 9268462 + 2ba0880 commit 5e539fe

File tree

11 files changed

+539
-587
lines changed

11 files changed

+539
-587
lines changed

.github/workflows/deploy.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,6 @@ jobs:
8383
cat > /etc/tenantfirstaid/env <<EOF
8484
ENV=prod
8585
OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
86-
FEEDBACK_PASSWORD=${{ secrets.FEEDBACK_PASSWORD }}
8786
FLASK_SECRET_KEY=${{ secrets.FLASK_SECRET_KEY }}
8887
DB_HOST=${{secrets.DB_HOST}}
8988
DB_PASSWORD=${{secrets.DB_PASSWORD}}

README.md

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,4 @@ We currently have regular project meetups: https://www.meetup.com/codepdx/ . Als
7979
On DO, we:
8080
1. added our ssh public keys
8181
2. install nginx
82-
3. Kent got the tls cert (just ask chatgpt?)
83-
84-
## Additional features
85-
86-
go to the route `/feedback` for extra features. You will need to provide the password defined in your `.env` file.
82+
3. Kent got the tls cert (just ask chatgpt?)

backend/.env.example

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
FEEDBACK_PASSWORD=password
21
OPENAI_API_KEY=openai_api_key
32
# If you want to use Github Models instead, omit OPENAI_API_KEY and use
43
# GITHUB_API_KEY and MODEL_ENDPOINT

backend/PERSISTENT_DATA_README.md

Lines changed: 0 additions & 38 deletions
This file was deleted.

backend/scripts/generate_conversation/chat.py

Lines changed: 34 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -3,30 +3,27 @@
33
# dependencies = [
44
# "openai",
55
# "pandas",
6+
# "python-dotenv",
67
# ]
78
# ///
8-
from openai import OpenAI
99
import os
1010
import ast
1111
import argparse
1212
from pathlib import Path
1313
import pandas as pd
1414
from typing import Self
15+
from openai.types.responses.response_input_param import Message
1516

17+
from tenantfirstaid.chat import DEFAULT_INSTRUCTIONS, ChatManager
1618

17-
BOT_INSTRUCTIONS = """Pretend you're a legal expert who giving advice about eviction notices in Oregon.
18-
Please give shorter answers.
19-
Please only ask one question at a time so that the user isn't confused.
20-
If the user is being evicted for non-payment of rent and they are too poor to pay the rent and you have confirmed in various ways that the notice is valid and there is a valid court hearing date, then tell them to call Oregon Law Center at 5131234567.
21-
Focus on finding technicalities that would legally prevent someone getting evicted, such as deficiencies in notice.
22-
Make sure to inclue a citation to the relevant law in your answer.
19+
dot_env_path = Path(__file__).parent.parent.parent / ".env"
20+
print(f"Loading environment variables from {dot_env_path}")
21+
if dot_env_path.exists():
22+
from dotenv import load_dotenv
2323

24-
Only reference the laws below.
25-
Oregon Chapter 90 - Residential Landlord and Tenant
26-
Oregon Chapter 91 - Tenancy
27-
Oregon Chapter 105 - Property Rights
28-
Portland City Code Chapter 30.01 - Affordable Housing Preservation and Portland Renter Protections
29-
"""
24+
load_dotenv(dotenv_path=dot_env_path, override=True)
25+
26+
BOT_INSTRUCTIONS = DEFAULT_INSTRUCTIONS
3027

3128
USER_INSTRUCTIONS_BASE = """You are a user of the Oregon Tenant First Aid chatbot.
3229
You are seeking legal advice about tenant rights in Oregon.
@@ -35,41 +32,28 @@
3532
If the bot asks you a question, you should answer it to the best of your ability, if you do not know the answer you should make something up that is plausible.
3633
"""
3734

38-
API_KEY = os.getenv("OPENAI_API_KEY", os.getenv("GITHUB_API_KEY"))
39-
BASE_URL = os.getenv("MODEL_ENDPOINT", "https://api.openai.com/v1")
40-
41-
MODEL = os.getenv("MODEL_NAME", "o3")
42-
MODEL_REASONING_EFFORT = os.getenv("MODEL_REASONING_EFFORT", "medium")
4335
USER_MODEL = os.getenv("USER_MODEL_NAME", "gpt-4o-2024-11-20")
4436

4537

4638
class ChatView:
4739
client: Self
4840

49-
def __init__(self, starting_message, user_facts):
50-
self.client = OpenAI(
51-
api_key=API_KEY,
52-
base_url=BASE_URL,
53-
)
54-
VECTOR_STORE_ID = os.getenv("VECTOR_STORE_ID")
55-
NUM_FILE_SEARCH_RESULTS = os.getenv("NUM_FILE_SEARCH_RESULTS", 10)
56-
self.input_messages = [{"role": "user", "content": starting_message}]
41+
def __init__(self, starting_message, user_facts, city, state):
42+
self.chat_manager = ChatManager()
43+
self.client = self.chat_manager.get_client()
44+
self.city = city
45+
self.state = state
46+
47+
self.input_messages: list[Message] = [
48+
Message(role="user", content=starting_message)
49+
]
5750
self.starting_message = starting_message # Store the starting message
5851

5952
self.openai_tools = []
6053
self.USER_INSTRUCTIONS = (
6154
USER_INSTRUCTIONS_BASE + "\n" + "Facts: " + "\n".join(user_facts)
6255
)
6356

64-
if VECTOR_STORE_ID:
65-
self.openai_tools.append(
66-
{
67-
"type": "file_search",
68-
"vector_store_ids": [VECTOR_STORE_ID],
69-
"max_num_results": NUM_FILE_SEARCH_RESULTS,
70-
}
71-
)
72-
7357
# Prompt iteration idea
7458
# If the user starts off by saying something unclear, start off by asking me \"What are you here for?\"
7559
def _reverse_message_roles(self, messages):
@@ -78,11 +62,11 @@ def _reverse_message_roles(self, messages):
7862
for message in messages:
7963
if message["role"] == "user":
8064
reversed_messages.append(
81-
{"role": "assistant", "content": message["content"]}
65+
Message(role="assistant", content=message["content"])
8266
)
8367
elif message["role"] == "assistant":
8468
reversed_messages.append(
85-
{"role": "user", "content": message["content"]}
69+
Message(role="user", content=message["content"])
8670
)
8771
else:
8872
reversed_messages.append(message)
@@ -93,16 +77,15 @@ def bot_response(self):
9377
tries = 0
9478
while tries < 3:
9579
try:
96-
response = self.client.responses.create(
97-
model=MODEL,
98-
input=self.input_messages,
99-
instructions=BOT_INSTRUCTIONS,
100-
reasoning={"effort": MODEL_REASONING_EFFORT},
80+
# Use the BOT_INSTRUCTIONS for bot responses
81+
response = self.chat_manager.generate_chat_response(
82+
self.input_messages,
83+
city=self.city,
84+
state=self.state,
10185
stream=False,
102-
tools=self.openai_tools,
10386
)
10487
self.input_messages.append(
105-
{"role": "assistant", "content": response.output_text}
88+
Message(role="assistant", content=response.output_text)
10689
)
10790
self.input_messages = self._reverse_message_roles(self.input_messages)
10891
return response.output_text
@@ -111,7 +94,7 @@ def bot_response(self):
11194
tries += 1
11295
# If all attempts fail, return a failure message
11396
failure_message = "I'm sorry, I am unable to generate a response at this time. Please try again later."
114-
self.input_messages.append({"role": "assistant", "content": failure_message})
97+
self.input_messages.append(Message(role="assistant", content=failure_message))
11598
return failure_message
11699

117100
def user_response(self):
@@ -127,15 +110,15 @@ def user_response(self):
127110
stream=False,
128111
)
129112
self.input_messages.append(
130-
{"role": "user", "content": response.output_text}
113+
Message(role="user", content=response.output_text)
131114
)
132115
return response.output_text
133116
except Exception as e:
134117
print(f"Error generating user response: {e}")
135118
tries += 1
136119
# If all attempts fail, return a failure message
137120
failure_message = "I'm sorry, I am unable to generate a user response at this time. Please try again later."
138-
self.input_messages.append({"role": "user", "content": failure_message})
121+
self.input_messages.append(Message(role="user", content=failure_message))
139122
return failure_message
140123

141124
def generate_conversation(self, num_turns=5):
@@ -163,9 +146,12 @@ def process_conversation(row, num_turns=5):
163146

164147
# Convert string representation of list to actual list
165148
facts = ast.literal_eval(row["facts"])
149+
# if row["city"] is NaN, set it to "null"
150+
if pd.isna(row["city"]):
151+
row["city"] = "null"
166152

167153
# Create chat view with the starting question and facts
168-
chat = ChatView(row["first_question"], facts)
154+
chat = ChatView(row["first_question"], facts, row["city"], row["state"])
169155

170156
# Generate a new conversation
171157
return chat.generate_conversation(num_turns)

0 commit comments

Comments
 (0)