|
| 1 | +import openai |
| 2 | +import numpy as np |
1 | 3 | from langchain.chat_models import ChatOpenAI |
2 | | -from langchain.prompts.chat import ( |
3 | | - ChatPromptTemplate, |
4 | | - SystemMessagePromptTemplate, |
5 | | - HumanMessagePromptTemplate, |
6 | | -) |
7 | 4 | from langchain.chains import LLMChain |
8 | 5 | from langchain.schema import BaseOutputParser |
9 | | - |
10 | | - |
11 | | -def generate_travel_recommendations(travel_requests): |
12 | | - """ |
13 | | - Generate travel recommendations based on user requests |
14 | | - """ |
15 | | - # create templates |
16 | | - system_template_travel_agent = """You are travel recommendation agent. Provide a short recommendation based on the user request.""" |
17 | | - system_message_prompt = SystemMessagePromptTemplate.from_template( |
18 | | - system_template_travel_agent) |
19 | | - |
20 | | - human_template_travel_agent = "{text}" |
21 | | - human_message_prompt = HumanMessagePromptTemplate.from_template( |
22 | | - human_template_travel_agent) |
23 | | - |
24 | | - # create full prompt |
25 | | - chat_prompt = ChatPromptTemplate.from_messages( |
26 | | - [system_message_prompt, human_message_prompt]) |
27 | | - |
28 | | - chain = LLMChain( |
29 | | - llm=ChatOpenAI(temperature=1), |
30 | | - prompt=chat_prompt |
31 | | - ) |
32 | | - |
33 | | - recommendations = [] |
34 | | - for travel_request in travel_requests: |
35 | | - recommendations.append(chain.run(travel_request)) |
36 | | - |
37 | | - return recommendations |
38 | | - |
39 | | - |
40 | | -def generate_travel_requests(n=5) -> list[str]: |
41 | | - """ Generate travel requests |
42 | | - n: number of requests |
43 | | - """ |
44 | | - # create templates |
45 | | - system_template_travel_agent = """Generate one utterance for how someone would to travel for a {text}""" |
46 | | - system_message_prompt = SystemMessagePromptTemplate.from_template( |
47 | | - system_template_travel_agent) |
48 | | - |
49 | | - # create full prompt |
50 | | - chat_prompt = ChatPromptTemplate.from_messages( |
51 | | - [system_message_prompt]) |
52 | | - |
53 | | - chain = LLMChain( |
54 | | - llm=ChatOpenAI(model='gpt-4'), |
55 | | - prompt=chat_prompt |
56 | | - ) |
57 | | - |
58 | | - results = [] |
59 | | - |
60 | | - for _ in range(0, n): |
61 | | - results.append(chain.run("beach vacation")) |
62 | | - |
63 | | - return results |
64 | | - |
65 | | - |
66 | | -# generate some requests |
67 | | -travel_requests = generate_travel_requests() |
68 | | -print(travel_requests) |
69 | | -# get the recommendations |
70 | | -recommendations = generate_travel_recommendations(travel_requests) |
71 | | -print(recommendations) |
| 6 | +from langchain.evaluation import QAEvalChain |
| 7 | + |
| 8 | +question_answers = [ |
| 9 | + {'question': "When was tea discovered?", |
| 10 | + 'answer': "3rd century"}, |
| 11 | + {'question': "I'd like a 1 line ice cream slogan", |
| 12 | + 'answer': "It's the coolest thing around!"} |
| 13 | +] |
| 14 | +llm = ChatOpenAI(model="gpt-4") |
| 15 | +predictions = [] |
| 16 | +responses = [] |
| 17 | +for pairs in question_answers: |
| 18 | + q = pairs["question"] |
| 19 | + response = llm.predict( |
| 20 | + f"Generate the response to the question: {q}. Only print the answer.") |
| 21 | + responses.append(response) |
| 22 | + predictions.append({"result": {response}}) |
| 23 | + |
| 24 | +print("\nGenerating text matchs:") |
| 25 | + |
| 26 | +for i in range(0, len(responses)): |
| 27 | + print(question_answers[i]["answer"] == response[i]) |
| 28 | + |
| 29 | + |
| 30 | +resp = openai.Embedding.create( |
| 31 | + input=[r["answer"] for r in question_answers] + responses, |
| 32 | + engine="text-embedding-ada-002") |
| 33 | + |
| 34 | +print("\nGenerating Similarity Score:!") |
| 35 | +for i in range(0, len(question_answers)*2, 2): |
| 36 | + embedding_a = resp['data'][i]['embedding'] |
| 37 | + embedding_b = resp['data'][len(question_answers)]['embedding'] |
| 38 | + similarity_score = np.dot(embedding_a, embedding_b) |
| 39 | + print(similarity_score, similarity_score > 0.8) |
| 40 | + |
| 41 | + |
| 42 | +print("\nGenerating Self eval:") |
| 43 | + |
| 44 | +# Start your eval chain |
| 45 | +eval_chain = QAEvalChain.from_llm(llm) |
| 46 | + |
| 47 | +# Have it grade itself. The code below helps the eval_chain know where the different parts are |
| 48 | +graded_outputs = eval_chain.evaluate(question_answers, |
| 49 | + predictions, |
| 50 | + question_key="question", |
| 51 | + prediction_key="result", |
| 52 | + answer_key='answer') |
| 53 | +print(graded_outputs) |
0 commit comments