Skip to content

Commit e0cc6ef

Browse files
πŸ“ Add docstrings to patch-1 (#26)
* πŸ“ Add docstrings to `patch-1` Docstrings generation was requested by @JarbasAl. * #19 (comment) The following files were modified: * `ovos_solver_openai_persona/__init__.py` * `ovos_solver_openai_persona/dialog_transformers.py` * `ovos_solver_openai_persona/engines.py` * Update engines.py --------- Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: JarbasAI <33701864+JarbasAl@users.noreply.github.com>
1 parent 8ff7430 commit e0cc6ef

File tree

3 files changed

+88
-14
lines changed

3 files changed

+88
-14
lines changed

β€Žovos_solver_openai_persona/__init__.pyβ€Ž

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@
33

44
class OpenAIPersonaSolver(OpenAIChatCompletionsSolver):
55
def __init__(self, *args, **kwargs):
6+
"""
7+
Initializes the solver and issues a deprecation warning.
8+
9+
A DeprecationWarning is raised advising to use OpenAIChatCompletionsSolver instead.
10+
"""
611
warnings.warn(
712
"use OpenAIChatCompletionsSolver instead",
813
DeprecationWarning,

β€Žovos_solver_openai_persona/dialog_transformers.pyβ€Ž

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,11 @@
77

88
class OpenAIDialogTransformer(DialogTransformer):
99
def __init__(self, name="ovos-dialog-transformer-openai-plugin", priority=10, config=None):
10+
"""
11+
Initializes the OpenAIDialogTransformer with a name, priority, and configuration.
12+
13+
Creates an OpenAIChatCompletionsSolver using the provided API key, API URL, and a system prompt from the configuration or a default prompt if not specified.
14+
"""
1015
super().__init__(name, priority, config)
1116
self.solver = OpenAIChatCompletionsSolver({
1217
"key": self.config.get("key"),
@@ -17,9 +22,16 @@ def __init__(self, name="ovos-dialog-transformer-openai-plugin", priority=10, co
1722

1823
def transform(self, dialog: str, context: dict = None) -> Tuple[str, dict]:
1924
"""
20-
Optionally transform passed dialog and/or return additional context
21-
:param dialog: str utterance to mutate before TTS
22-
:returns: str mutated dialog
25+
Transforms the dialog string using a character-specific prompt if available.
26+
27+
If a prompt is provided in the context or configuration, rewrites the dialog as if spoken by a different character using the solver; otherwise, returns the original dialog unchanged.
28+
29+
Args:
30+
dialog: The dialog string to be transformed.
31+
context: Optional dictionary containing transformation context, such as a prompt or language.
32+
33+
Returns:
34+
A tuple containing the transformed (or original) dialog and the unchanged context.
2335
"""
2436
prompt = context.get("prompt") or self.config.get("rewrite_prompt")
2537
if not prompt:

β€Žovos_solver_openai_persona/engines.pyβ€Ž

Lines changed: 68 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,16 @@ def __init__(self, config=None,
1919
enable_tx: bool = False,
2020
enable_cache: bool = False,
2121
internal_lang: Optional[str] = None):
22+
"""
23+
Initializes the OpenAICompletionsSolver with API configuration and credentials.
24+
25+
Raises:
26+
ValueError: If the API key is not provided in the configuration.
27+
"""
2228
super().__init__(config=config, translator=translator,
23-
detector=detector, priority=priority,
24-
enable_tx=enable_tx, enable_cache=enable_cache,
25-
internal_lang=internal_lang)
29+
detector=detector, priority=priority,
30+
enable_tx=enable_tx, enable_cache=enable_cache,
31+
internal_lang=internal_lang)
2632
self.api_url = f"{self.config.get('api_url', 'https://api.openai.com/v1')}/completions"
2733
self.engine = self.config.get("model", "gpt-4o-mini")
2834
self.key = self.config.get("key")
@@ -94,10 +100,16 @@ def __init__(self, config=None,
94100
enable_tx: bool = False,
95101
enable_cache: bool = False,
96102
internal_lang: Optional[str] = None):
103+
"""
104+
Initializes the OpenAIChatCompletionsSolver with API configuration, memory settings, and system prompt.
105+
106+
Raises:
107+
ValueError: If the API key is not provided in the configuration.
108+
"""
97109
super().__init__(config=config, translator=translator,
98-
detector=detector, priority=priority,
99-
enable_tx=enable_tx, enable_cache=enable_cache,
100-
internal_lang=internal_lang)
110+
detector=detector, priority=priority,
111+
enable_tx=enable_tx, enable_cache=enable_cache,
112+
internal_lang=internal_lang)
101113
self.api_url = f"{self.config.get('api_url', 'https://api.openai.com/v1')}/chat/completions"
102114
self.engine = self.config.get("model", "gpt-4o-mini")
103115
self.key = self.config.get("key")
@@ -118,6 +130,18 @@ def __init__(self, config=None,
118130

119131
# OpenAI API integration
120132
def _do_api_request(self, messages):
133+
"""
134+
Sends a chat completion request to the OpenAI API and returns the assistant's reply.
135+
136+
Args:
137+
messages: A list of message dictionaries representing the conversation history.
138+
139+
Returns:
140+
The content of the assistant's reply as a string.
141+
142+
Raises:
143+
RequestException: If the OpenAI API returns an error in the response.
144+
"""
121145
s = requests.Session()
122146
headers = {
123147
"Content-Type": "application/json",
@@ -148,6 +172,17 @@ def _do_api_request(self, messages):
148172

149173
def _do_streaming_api_request(self, messages):
150174

175+
"""
176+
Streams response content from the OpenAI chat completions API.
177+
178+
Sends a POST request with the provided chat messages and yields content chunks as they are received from the streaming API. Stops iteration if an error is encountered or the response is finished.
179+
180+
Args:
181+
messages: A list of chat message dictionaries to send as context.
182+
183+
Yields:
184+
str: Segments of the assistant's reply as they arrive from the API.
185+
"""
151186
s = requests.Session()
152187
headers = {
153188
"Content-Type": "application/json",
@@ -187,6 +222,15 @@ def _do_streaming_api_request(self, messages):
187222
yield chunk["choices"][0]["delta"]["content"]
188223

189224
def get_chat_history(self, system_prompt=None):
225+
"""
226+
Builds the chat history as a list of messages, starting with a system prompt.
227+
228+
Args:
229+
system_prompt: Optional override for the system prompt message.
230+
231+
Returns:
232+
A list of message dictionaries representing the system prompt and the most recent user-assistant exchanges.
233+
"""
190234
qa = self.qa_pairs[-1 * self.max_utts:]
191235
system_prompt = system_prompt or self.system_prompt or "You are a helpful assistant."
192236
messages = [
@@ -198,6 +242,16 @@ def get_chat_history(self, system_prompt=None):
198242
return messages
199243

200244
def get_messages(self, utt, system_prompt=None) -> MessageList:
245+
"""
246+
Builds a list of chat messages including the system prompt, recent conversation history, and the current user utterance.
247+
248+
Args:
249+
utt: The current user input to be appended as the latest message.
250+
system_prompt: Optional system prompt to use as the initial message.
251+
252+
Returns:
253+
A list of message dictionaries representing the chat context for the API.
254+
"""
201255
messages = self.get_chat_history(system_prompt)
202256
messages.append({"role": "user", "content": utt})
203257
return messages
@@ -206,15 +260,18 @@ def get_messages(self, utt, system_prompt=None) -> MessageList:
206260
def continue_chat(self, messages: MessageList,
207261
lang: Optional[str],
208262
units: Optional[str] = None) -> Optional[str]:
209-
"""Generate a response based on the chat history.
263+
"""
264+
Generates a chat response using the provided message history and updates memory if enabled.
265+
266+
If the first message is not a system prompt, prepends the system prompt. Processes the API response and returns a cleaned answer, or None if the answer is empty or only punctuation/underscores. Updates internal memory with the latest question and answer if memory is enabled.
210267
211268
Args:
212-
messages (List[Dict[str, str]]): List of chat messages, each containing 'role' and 'content'.
213-
lang (Optional[str]): The language code for the response. If None, will be auto-detected.
214-
units (Optional[str]): Optional unit system for numerical values.
269+
messages: List of chat messages with 'role' and 'content' keys.
270+
lang: Optional language code for the response.
271+
units: Optional unit system for numerical values.
215272
216273
Returns:
217-
Optional[str]: The generated response or None if no response could be generated.
274+
The generated response as a string, or None if no valid response is produced.
218275
"""
219276
if messages[0]["role"] != "system":
220277
messages = [{"role": "system", "content": self.system_prompt }] + messages

0 commit comments

Comments
Β (0)