forked from blotterfyi/velocity
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm.py
More file actions
64 lines (57 loc) · 2.36 KB
/
llm.py
File metadata and controls
64 lines (57 loc) · 2.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import openai
import re
import json
from logger import get_logger
logger = get_logger(__name__)
def generate_llm_response(prompt, model = "gpt-4o-mini", temperature = 1):
"""
Generate a response using OpenAI's language model.
Args:
prompt (str): The input prompt for the language model.
model (str): The name of the OpenAI model to use. Default is "gpt-4o-mini".
temperature (float): The temperature parameter for response generation. Default is 1.
Returns:
str: The generated response from the language model.
"""
openai.api_key = os.environ.get('OPENAI_API_KEY')
response = openai.chat.completions.create(
model=model,
temperature=temperature,
messages=[
{"role": "system", "content": """"""},
{"role": "user", "content": prompt}
],
)
text = response.choices[0].message.content.strip()
return text
def self_reflect(prompt, response):
"""
Perform self-reflection on a given prompt and response, then generate a revised output.
Args:
prompt (str): The original prompt given to the language model.
response (str): The initial response generated by the language model.
Returns:
tuple: A tuple containing two strings:
1. updated_plan: The self-reflection and critique of the initial response.
2. revision: The revised output based on the self-reflection.
"""
# reflect
self_reflect_prompt = f"""
You were given a task and your results are at the end. Self critique, find flaws in your plan and thought, and update the plan i.e tell me what would you change if you were given the opportunity again. Do not return a json, just your thoughts and reasoning.
You were given a prompt.
{prompt}
Then you produced some results.
{response}
Now talk about why your response was not up to the mark. And discuss what you will change. Your plan should be at max one paragraph.
"""
updated_plan = generate_llm_response(self_reflect_prompt)
# revise
revise_prompt = f"""
You produced some results.
{response}
But then someone provided you some critique on how to improve the results. Incorporate the critique into your plan. Return revised output.
{updated_plan}
"""
revision = generate_llm_response(revise_prompt)
return updated_plan, revision