Skip to content

Commit c75d7b6

Browse files
committed
wip
1 parent 6488769 commit c75d7b6

File tree

3 files changed

+18
-20
lines changed

3 files changed

+18
-20
lines changed

parea/cookbook/simple_experiment_with_openai.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from openai import OpenAI
77

88
from parea import Parea, trace
9-
from parea.schemas import Log
9+
from parea.schemas import Log, EvaluationResult
1010

1111
load_dotenv()
1212

@@ -15,12 +15,16 @@
1515
p.wrap_openai_client(client)
1616

1717

18-
def eval_func(log: Log) -> float:
18+
def eval_func(log: Log) -> EvaluationResult:
1919
from random import random
2020
from time import sleep
2121

2222
sleep(random() * 10)
23-
return random()
23+
return EvaluationResult(
24+
name="eval_func",
25+
score=random(),
26+
reason="Random score",
27+
)
2428

2529

2630
# @trace(eval_funcs=[eval_func])
@@ -43,9 +47,9 @@ def func(topic: str) -> dict[str, str | None]:
4347

4448

4549
if __name__ == "__main__":
46-
# print(func("Python"))
47-
p.experiment(
48-
name="hello-world-example-ch",
49-
data=[{"topic": "Python"}, {"topic": "javascript"}],
50-
func=func,
51-
).run()
50+
print(func("Python"))
51+
# p.experiment(
52+
# name="hello-world-example-ch",
53+
# data=[{"topic": "Fish"}, {"topic": "Python"}],
54+
# func=func,
55+
# ).run()

parea/cookbook/tracing_without_deployed_prompt.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,15 @@
1313
p = Parea(api_key=os.getenv("PAREA_API_KEY"))
1414

1515

16-
@trace # <--- If you want to log the inputs to the LLM call you can optionally add a trace decorator here
1716
def call_llm(
1817
data: List[dict],
1918
model: str = "gpt-3.5-turbo-1106",
20-
provider: str = "openai",
2119
temperature: float = 0.0,
2220
) -> CompletionResponse:
2321
return p.completion(
2422
data=Completion(
2523
llm_configuration=LLMInputs(
2624
model=model,
27-
provider=provider,
2825
model_params=ModelParams(temp=temperature),
2926
messages=[Message(**d) for d in data],
3027
)
@@ -106,8 +103,7 @@ def refiner2(query: str, additional_description: str, current_arg: str, criticis
106103
"content": "Please generate a new argument that incorporates the feedback from the user.",
107104
},
108105
],
109-
model="claude-2",
110-
provider="anthropic",
106+
model="claude-3-haiku-20240307",
111107
)
112108

113109

@@ -128,7 +124,6 @@ def json_call():
128124
data=Completion(
129125
llm_configuration=LLMInputs(
130126
model="gpt-3.5-turbo-1106",
131-
provider="openai",
132127
model_params=ModelParams(temp=0.0, response_format={"type": "json_object"}),
133128
messages=[Message(**d) for d in json_messages],
134129
)
@@ -147,12 +142,12 @@ def json_call():
147142
"Whether wine is good for you.",
148143
additional_description="Provide a concise, few sentence argument on why wine is good for you.",
149144
)
150-
print(result2)
145+
print(trace_id2, result2)
151146
p.record_feedback(
152147
FeedbackRequest(
153148
trace_id=trace_id2,
154-
score=0.0, # 0.0 (bad) to 1.0 (good)
155-
target="Moonshine is wonderful.",
149+
score=0.7, # 0.0 (bad) to 1.0 (good)
150+
target="Wine is wonderful.",
156151
)
157152
)
158153

@@ -164,7 +159,7 @@ def json_call():
164159
p.record_feedback(
165160
FeedbackRequest(
166161
trace_id=result3.inference_id,
167-
score=0.7, # 0.0 (bad) to 1.0 (good)
162+
score=0.5, # 0.0 (bad) to 1.0 (good)
168163
target="Moonshine is wonderful. End of story.",
169164
)
170165
)

parea/schemas/models.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ class UseDeployedPromptResponse:
7777
class FeedbackRequest:
7878
score: float = field(validator=[validators.ge(0), validators.le(1)])
7979
trace_id: Optional[str] = None
80-
inference_id: Optional[str] = None
8180
name: Optional[str] = None
8281
target: Optional[str] = None
8382

0 commit comments

Comments
 (0)