-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathanswerUserQuery.py
More file actions
96 lines (72 loc) · 3.49 KB
/
answerUserQuery.py
File metadata and controls
96 lines (72 loc) · 3.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import openai
import utilityFunctions as util
import config
import promptStorage as prompts
import time
import asyncio
import json
import app
def main():
pass
# 15047 tokens
#response_list = separate_answer(question_list[2], True, lawful[0:12], "gpt-3.5-turbo")
#print(response_list)
def answering_stage(question_list, legal_text, user_query):
print("Starting answering stage...")
responses_list = separate_answer(question_list[2], legal_text[2], "gpt-3.5-turbo-16k")
begin = time.time()
print(" - Creating answer template with GPT 4")
summaryTemplate = create_summary_template(question_list[2], responses_list)
end = time.time()
print(" * Total time: {}".format(round(end-begin, 2)))
return summaryTemplate, responses_list, question_list[2]
def separate_answer(question, legal_text, model):
message_list = []
response_list = []
prompt_tokens = 0
completion_tokens = 0
total_tokens = 0
for section in legal_text:
message_list.append(prompts.get_prompt_simple_answer(section, question))
begin = time.time()
results = asyncio.run(util.get_completion_list(message_list, 100, used_model=model))
for completion in results:
#print(completion)
#prompt_tokens += completion["usage"]["prompt_tokens"]
#completion_tokens += completion["usage"]["completion_tokens"]
#total_tokens += completion["usage"]["total_tokens"]
response_list.append(completion["choices"][0]["message"]["content"])
#total_cost = util.calculate_prompt_cost(model, prompt_tokens, completion_tokens)
response_str = ""
for response in response_list:
if "[IGNORE]" in response:
continue
total_tokens += util.num_tokens_from_string(response)
response_str = response_str + "====\n" + response + "\n"
end = time.time()
#print(" * Total time: {}, Total Tokens: {}, Total Cost: ${}".format(round(end-begin, 2), total_tokens, round(total_cost, 2)))
return response_str
#print(response_list)
def create_summary_template(question, legal_documentation):
prompt_summarize = prompts.get_prompt_summary_template(question, legal_documentation)
chat_completion = util.create_chat_completion(used_model="gpt-4", prompt_messages=prompt_summarize, temp=1, api_key_choice="will", debug_print=True)
result_str = chat_completion.choices[0].message.content
return result_str
def populate_summary_template(question, legal_documentation, template):
prompt_populate = prompts.get_prompt_populate_summary_template(question, template, legal_documentation)
chat_completion = util.create_chat_completion(used_model="gpt-3.5-turbo-16k", prompt_messages=prompt_populate, temp=0, api_key_choice="will", debug_print=True)
result_str = chat_completion.choices[0].message.content
return result_str
def answer_one_question(prompt_final_answer, use_gpt_4):
model = "gpt-3.5-turbo-16k"
if use_gpt_4:
model="gpt-4"
who = "will"
chat_completion = util.create_chat_completion(used_model=model, prompt_messages=prompt_final_answer, temp=0.2, api_key_choice=who)
result_str = chat_completion.choices[0].message.content
prompt_tokens = chat_completion.usage["prompt_tokens"]
completion_tokens = chat_completion.usage["completion_tokens"]
cost = util.calculate_prompt_cost(model, prompt_tokens, completion_tokens)
return result_str, prompt_tokens, completion_tokens, cost
if __name__ == "__main__":
main()