Skip to content

Commit 00e5c82

Browse files
committed
Update the python examples
1 parent d52c958 commit 00e5c82

File tree

13 files changed

+27
-27
lines changed

13 files changed

+27
-27
lines changed

examples/babyagi.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -78,15 +78,15 @@ def one_cycle(objective: str, task_list, next_task_id: int):
7878
task = task_list.popleft()
7979

8080
prompt = perform_task_ppt(objective=objective, task=task)
81-
result = complete(prompt)
81+
result = complete(prompt).content
8282

8383
prompt = create_tasks_ppt(
8484
objective=objective,
8585
task=first_task["task_name"],
8686
result=result,
8787
previous_tasks=[first_task["task_name"]],
8888
)
89-
new_tasks = complete(prompt)
89+
new_tasks = complete(prompt).content
9090

9191
new_tasks = create_tasks_fmt(new_tasks)
9292

@@ -99,7 +99,7 @@ def one_cycle(objective: str, task_list, next_task_id: int):
9999
tasks=[task["task_name"] for task in task_list],
100100
next_task_id=next_task_id,
101101
)
102-
prioritized_tasks = complete(prompt)
102+
prioritized_tasks = complete(prompt).content
103103

104104
prioritized_tasks = prioritize_tasks_fmt(prioritized_tasks)
105105

examples/beam-cloud/app.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,5 +42,5 @@ def predict(context, **inputs):
4242
model = context.on_start_value
4343
# Inference
4444
generator = outlines.Generator(model, Literal["Positive", "Negative"])
45-
answer = generator(prompt)
45+
answer = generator(prompt).content
4646
return {"answer": answer}

examples/bentoml/service.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,6 @@ async def generate(
7777
import outlines
7878

7979
generator = outlines.Generator(self.model, outlines.json_schema(json_schema))
80-
character = generator(prompt)
80+
character = generator(prompt).content
8181

8282
return character

examples/cerebrium/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def generate(
4343
character = model(
4444
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]",
4545
outlines.json_schema(schema),
46-
)
46+
).content
4747

4848
print(character)
4949
return character

examples/dating_profile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class Example:
9696

9797
dating_profile_prompt = Template.from_file("prompts/dating_profile.txt")
9898
prompt = dating_profile_prompt(description=new_description, examples=samples)
99-
profile = model(prompt, outlines.json_schema(DatingProfile), max_tokens=500) # type: ignore
99+
profile = model(prompt, outlines.json_schema(DatingProfile), max_tokens=500).content # type: ignore
100100
print(profile)
101101

102102
# Sample generated profiles

examples/llamacpp_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,4 +42,4 @@ class Character(BaseModel):
4242
prompt = "Instruct: You are a leading role play gamer. You have seen thousands of different characters and their attributes.\nPlease return a JSON object with common attributes of an RPG character. Give me a character description\nOutput:"
4343

4444
sequence = generator(prompt, seed=seed, max_tokens=512)
45-
print(sequence)
45+
print(sequence.content)

examples/math_generate_code.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,6 @@ def execute_code(code):
3939

4040
prompt = answer_with_code_prompt(question=question, examples=examples)
4141
model = outlines.from_openai(openai.OpenAI(), "gpt-4o-mini")
42-
answer = model(prompt)
42+
answer = model(prompt).content
4343
result = execute_code(answer)
4444
print(f"It takes Carla {result:.0f} minutes to download the file.")

examples/meta_prompting.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,12 @@ def split_into_steps(question, model_name: str):
3131
model = outlines.from_openai(client, model_name)
3232

3333
prompt = solve(question=question)
34-
answer = model(prompt, max_tokens=500)
34+
answer = model(prompt, max_tokens=500).content
3535
prompt += (
3636
answer
3737
+ "\n what is the only option that displays the same type of relationship as : :?"
3838
)
39-
answer = model(prompt, max_tokens=500)
39+
answer = model(prompt, max_tokens=500).content
4040
completed = prompt + answer
4141

4242
return completed
@@ -55,9 +55,9 @@ def fill_in_the_blanks(question, model_name: str):
5555
model = outlines.from_openai(client, model_name)
5656

5757
prompt = determine_goal(question=question)
58-
answer = model(prompt, stop=["."])
58+
answer = model(prompt, stop=["."]).content
5959
prompt = solve(memory=prompt + answer)
60-
answer = model(prompt, max_tokens=500)
60+
answer = model(prompt, max_tokens=500).content
6161
completed = prompt + answer
6262

6363
return completed
@@ -94,9 +94,9 @@ def ask_an_expert(question, model_name: str):
9494
model = outlines.from_openai(client, model_name)
9595

9696
prompt = find_expert(question=question)
97-
expert = model(prompt, stop=['"'])
97+
expert = model(prompt, stop=['"']).content
9898
prompt = get_answer(question=question, expert=expert, memory=prompt+expert)
99-
answer = model(prompt, max_tokens=500)
99+
answer = model(prompt, max_tokens=500).content
100100
completed = prompt + answer
101101

102102
return completed
@@ -121,9 +121,9 @@ def ask_an_expert_simple(question, model_name: str):
121121
model = outlines.from_openai(client, model_name)
122122

123123
prompt = find_expert(question=question)
124-
expert = model(prompt, stop=["\n", "."])
124+
expert = model(prompt, stop=["\n", "."]).content
125125
prompt = get_answer(expert=expert, memory=prompt+expert)
126-
answer = model(prompt, max_tokens=500)
126+
answer = model(prompt, max_tokens=500).content
127127
completed = prompt + answer
128128

129129
return completed

examples/modal_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def generate(
7676
character = model(
7777
f"<s>[INST]Give me a character description. Describe {prompt}.[/INST]",
7878
outlines.json_schema(schema),
79-
)
79+
).content
8080

8181
print(character)
8282

examples/pick_odd_one_out.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@
3939

4040
prompt = build_ooo_prompt(options=options)
4141
reasoning = gen_text(prompt, stop=["Pick the odd word", "So the odd one"])
42-
prompt += reasoning
42+
prompt += reasoning.content
4343
raw_result = gen_choice(prompt)
44-
result = json.loads(raw_result)["result"]
44+
result = json.loads(raw_result.content)["result"]
4545
prompt += result
4646
print(result)

0 commit comments

Comments
 (0)