Skip to content

Commit 53b6a3d

Browse files
committed
Reduced app code
1 parent b40087b commit 53b6a3d

File tree

1 file changed

+28
-70
lines changed

1 file changed

+28
-70
lines changed

prompt-engineering/app.py

Lines changed: 28 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -3,39 +3,15 @@
33
import tomllib
44
from pathlib import Path
55

6-
import openai
6+
from openai import OpenAI
77

88
# Authenticate
9-
openai.api_key = os.getenv("OPENAI_API_KEY")
9+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
1010

11-
12-
class Settings(dict):
13-
"""Handle loading and accessing application settings from file."""
14-
15-
@classmethod
16-
def load(cls, path) -> "Settings":
17-
"""Load TOML settings file and pass it to class constuctor."""
18-
with path.open("rb") as file:
19-
return cls(tomllib.load(file))
20-
21-
def __init__(self, *args, **kwargs) -> None:
22-
"""Add general settings and prompts as instance attributes."""
23-
super().__init__(*args, **kwargs)
24-
# Settings
25-
self.chat_models = self["general"]["chat_models"]
26-
self.model = self["general"]["model"]
27-
self.max_tokens = self["general"]["max_tokens"]
28-
self.temperature = self["general"]["temperature"]
29-
self.model_supports_chat_completions = self.model in self.chat_models
30-
# Prompts
31-
self.instruction_prompt = self["prompts"]["instruction_prompt"]
32-
self.role_prompt = self["prompts"]["role_prompt"]
33-
self.positive_example = self["prompts"]["positive_example"]
34-
self.positive_reasoning = self["prompts"]["positive_reasoning"]
35-
self.positive_output = self["prompts"]["positive_output"]
36-
self.negative_example = self["prompts"]["negative_example"]
37-
self.negative_reasoning = self["prompts"]["negative_reasoning"]
38-
self.negative_output = self["prompts"]["negative_output"]
11+
# Load settings file
12+
settings_path = Path("settings.toml")
13+
with settings_path.open("rb") as settings_file:
14+
SETTINGS = tomllib.load(settings_file)
3915

4016

4117
def parse_args() -> argparse.Namespace:
@@ -47,52 +23,34 @@ def parse_args() -> argparse.Namespace:
4723

4824
def main(args: argparse.Namespace) -> None:
4925
file_content = args.file_path.read_text("utf-8")
50-
settings = Settings.load(Path("settings.toml"))
51-
if settings.model_supports_chat_completions:
52-
print(get_chat_completion(file_content, settings))
53-
else:
54-
print(get_completion(file_content, settings))
55-
56-
57-
def get_completion(content: str, settings: Settings) -> str:
58-
"""Send a request to the /completions endpoint."""
59-
response = openai.Completion.create(
60-
model=settings.model,
61-
prompt=assemble_prompt(content, settings),
62-
max_tokens=settings.max_tokens,
63-
temperature=settings.temperature,
64-
)
65-
return response["choices"][0]["text"]
26+
print(get_chat_completion(file_content))
6627

6728

68-
def get_chat_completion(content: str, settings: Settings) -> str:
29+
def get_chat_completion(content: str) -> str:
6930
"""Send a request to the /chat/completions endpoint."""
70-
response = openai.ChatCompletion.create(
71-
model=settings.model,
72-
messages=assemble_chat_messages(content, settings),
73-
temperature=settings.temperature,
31+
response = client.chat.completions.create(
32+
model=SETTINGS["general"]["model"],
33+
messages=assemble_chat_messages(content),
34+
temperature=SETTINGS["general"]["temperature"],
35+
seed=12345, # Doesn't do anything for older models
7436
)
75-
return response["choices"][0]["message"]["content"]
76-
77-
78-
def assemble_prompt(content: str, settings: Settings) -> str:
79-
"""Combine all text input into a single prompt."""
80-
return f">>>>>\n{content}\n<<<<<\n\n" + settings.instruction_prompt
81-
82-
83-
def assemble_chat_messages(content: str, settings: Settings) -> list[dict]:
84-
"""Combine all messages into a well-formatted dictionary."""
85-
return [
86-
{"role": "system", "content": settings.role_prompt},
87-
{"role": "user", "content": settings.negative_example},
88-
{"role": "system", "content": settings.negative_reasoning},
89-
{"role": "assistant", "content": settings.negative_output},
90-
{"role": "user", "content": settings.positive_example},
91-
{"role": "system", "content": settings.positive_reasoning},
92-
{"role": "assistant", "content": settings.positive_output},
37+
return response.choices[0].message.content
38+
39+
40+
def assemble_chat_messages(content: str) -> list[dict]:
41+
"""Combine all messages into a well-formatted list of dicts."""
42+
messages = [
43+
{"role": "system", "content": SETTINGS["prompts"]["role_prompt"]},
44+
{"role": "user", "content": SETTINGS["prompts"]["negative_example"]},
45+
{"role": "system", "content": SETTINGS["prompts"]["negative_reasoning"]},
46+
{"role": "assistant", "content": SETTINGS["prompts"]["negative_output"]},
47+
{"role": "user", "content": SETTINGS["prompts"]["positive_example"]},
48+
{"role": "system", "content": SETTINGS["prompts"]["positive_reasoning"]},
49+
{"role": "assistant", "content": SETTINGS["prompts"]["positive_output"]},
9350
{"role": "user", "content": f">>>>>\n{content}\n<<<<<"},
94-
{"role": "user", "content": settings.instruction_prompt},
51+
{"role": "user", "content": SETTINGS["prompts"]["instruction_prompt"]},
9552
]
53+
return messages
9654

9755

9856
if __name__ == "__main__":

0 commit comments

Comments
 (0)