Skip to content

Commit eaae3af

Browse files
authored
fix:moderize_signatures (#10)
* fix:moderize_signatures update method kwargs for latest OPM * . * refactor!: drop outdated classes
1 parent 095abfc commit eaae3af

File tree

4 files changed

+65
-142
lines changed

4 files changed

+65
-142
lines changed

ovos_solver_openai_persona/__init__.py

Lines changed: 19 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from typing import Optional
2+
13
from ovos_solver_openai_persona.engines import OpenAIChatCompletionsSolver
24

35

@@ -24,10 +26,21 @@ def get_chat_history(self, persona=None):
2426
return messages
2527

2628
# officially exported Solver methods
27-
def get_spoken_answer(self, query, context=None):
28-
context = context or {}
29-
persona = context.get("persona") or self.default_persona
30-
messages = self.get_prompt(query, persona)
29+
def get_spoken_answer(self, query: str,
30+
lang: Optional[str] = None,
31+
units: Optional[str] = None) -> Optional[str]:
32+
"""
33+
Obtain the spoken answer for a given query.
34+
35+
Args:
36+
query (str): The query text.
37+
lang (Optional[str]): Optional language code. Defaults to None.
38+
units (Optional[str]): Optional units for the query. Defaults to None.
39+
40+
Returns:
41+
str: The spoken answer as a text response.
42+
"""
43+
messages = self.get_prompt(query, self.default_persona)
3144
response = self._do_api_request(messages)
3245
answer = response.strip()
3346
if not answer or not answer.strip("?") or not answer.strip("_"):
@@ -46,7 +59,6 @@ def get_spoken_answer(self, query, context=None):
4659
# Think of it like playing with toy building blocks that represent particles.
4760
# Instead of rigid structures, these particles can be in different energy levels or "states." Quantum mechanics helps scientists understand and predict these states, making it crucial for many fields like chemistry, materials science, and engineering.
4861

49-
5062
# Quantum mechanics is a branch of physics that deals with the behavior of particles on a very small scale, such as atoms and subatomic particles. It explores the idea that particles can exist in multiple states at once and that their behavior is not predictable in the traditional sense.
51-
print(bot.spoken_answer("Quem encontrou o caminho maritimo para o Brazil", {"lang": "pt-pt"}))
52-
# Explorador português Pedro Álvares Cabral é creditado com a descoberta do Brasil em 1500
63+
print(bot.spoken_answer("Quem encontrou o caminho maritimo para o Brazil", lang="pt-pt"))
64+
# O português Pedro Álvares Cabral encontrou o caminho marítimo para o Brasil em 1500. Ele foi o responsável por descobrir o litoral brasileiro, embora Cristóvão Colombo tenha chegado à América do Sul em 1498, cinco anos antes. Cabral desembarcou na atual costa de Alagoas, no Nordeste do Brasil.

ovos_solver_openai_persona/dialog_transformers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,4 +24,4 @@ def transform(self, dialog: str, context: dict = None) -> Tuple[str, dict]:
2424
prompt = context.get("prompt") or self.config.get("rewrite_prompt")
2525
if not prompt:
2626
return dialog, context
27-
return self.solver.get_spoken_answer(f"{prompt} : {dialog}"), context
27+
return self.solver.get_spoken_answer(f"{prompt} : {dialog}", lang=context.get("lang")), context

ovos_solver_openai_persona/engines.py

Lines changed: 45 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import json
2+
from typing import Optional, Iterable
23

34
import requests
45
from ovos_plugin_manager.templates.solvers import QuestionSolver
@@ -47,7 +48,20 @@ def _do_api_request(self, prompt):
4748
return response["choices"][0]["text"]
4849

4950
# officially exported Solver methods
50-
def get_spoken_answer(self, query, context=None):
51+
def get_spoken_answer(self, query: str,
52+
lang: Optional[str] = None,
53+
units: Optional[str] = None) -> Optional[str]:
54+
"""
55+
Obtain the spoken answer for a given query.
56+
57+
Args:
58+
query (str): The query text.
59+
lang (Optional[str]): Optional language code. Defaults to None.
60+
units (Optional[str]): Optional units for the query. Defaults to None.
61+
62+
Returns:
63+
str: The spoken answer as a text response.
64+
"""
5165
response = self._do_api_request(query)
5266
answer = response.strip()
5367
if not answer or not answer.strip("?") or not answer.strip("_"):
@@ -154,20 +168,46 @@ def get_prompt(self, utt, initial_prompt=None):
154168
messages.append({"role": "user", "content": utt})
155169
return messages
156170

157-
# officially exported Solver methods
158-
def stream_utterances(self, query):
171+
# asbtract Solver methods
172+
def stream_utterances(self, query: str,
173+
lang: Optional[str] = None,
174+
units: Optional[str] = None) -> Iterable[str]:
175+
"""
176+
Stream utterances for the given query as they become available.
177+
178+
Args:
179+
query (str): The query text.
180+
lang (Optional[str]): Optional language code. Defaults to None.
181+
units (Optional[str]): Optional units for the query. Defaults to None.
182+
183+
Returns:
184+
Iterable[str]: An iterable of utterances.
185+
"""
159186
messages = self.get_prompt(query)
160187
answer = ""
161188
for chunk in self._do_streaming_api_request(messages):
162189
answer += chunk
163190
if any(chunk.endswith(p) for p in [".", "!", "?", "\n", ":"]):
164191
if len(chunk) >= 2 and chunk[-2].isdigit() and chunk[-1] == ".":
165-
continue # dont split numbers
192+
continue # dont split numbers
166193
if answer.strip():
167194
yield answer
168195
answer = ""
169196

170-
def get_spoken_answer(self, query, context=None):
197+
def get_spoken_answer(self, query: str,
198+
lang: Optional[str] = None,
199+
units: Optional[str] = None) -> Optional[str]:
200+
"""
201+
Obtain the spoken answer for a given query.
202+
203+
Args:
204+
query (str): The query text.
205+
lang (Optional[str]): Optional language code. Defaults to None.
206+
units (Optional[str]): Optional units for the query. Defaults to None.
207+
208+
Returns:
209+
str: The spoken answer as a text response.
210+
"""
171211
messages = self.get_prompt(query)
172212
response = self._do_api_request(messages)
173213
answer = response.strip()
@@ -176,69 +216,3 @@ def get_spoken_answer(self, query, context=None):
176216
if self.memory:
177217
self.qa_pairs.append((query, answer))
178218
return answer
179-
180-
181-
# Base models
182-
class GPT35Turbo(OpenAIChatCompletionsSolver):
183-
def __init__(self, config=None):
184-
config = config or {}
185-
config["model"] = "gpt-3.5-turbo"
186-
super().__init__(config=config)
187-
188-
189-
class AdaSolver(OpenAICompletionsSolver):
190-
def __init__(self, config=None):
191-
config = config or {}
192-
config["model"] = "ada"
193-
super().__init__(config=config)
194-
195-
196-
class BabbageSolver(OpenAICompletionsSolver):
197-
def __init__(self, config=None):
198-
config = config or {}
199-
config["model"] = "babbage"
200-
super().__init__(config=config)
201-
202-
203-
class CurieSolver(OpenAICompletionsSolver):
204-
def __init__(self, config=None):
205-
config = config or {}
206-
config["model"] = "curie"
207-
super().__init__(config=config)
208-
209-
210-
class DavinciSolver(OpenAICompletionsSolver):
211-
def __init__(self, config=None):
212-
config = config or {}
213-
config["model"] = "davinci"
214-
super().__init__(config=config)
215-
216-
217-
class Davinci2Solver(OpenAICompletionsSolver):
218-
def __init__(self, config=None):
219-
config = config or {}
220-
config["model"] = "text-davinci-02"
221-
super().__init__(config=config)
222-
223-
224-
class Davinci3Solver(OpenAICompletionsSolver):
225-
def __init__(self, config=None):
226-
config = config or {}
227-
config["model"] = "text-davinci-03"
228-
super().__init__(config=config)
229-
230-
231-
# Code completion
232-
class DavinciCodeSolver(OpenAICompletionsSolver):
233-
def __init__(self, config=None):
234-
config = config or {}
235-
config["model"] = "code-davinci-002"
236-
super().__init__(config=config)
237-
238-
239-
class CushmanCodeSolver(OpenAICompletionsSolver):
240-
def __init__(self, config=None):
241-
config = config or {}
242-
config["model"] = "code-cushman-001"
243-
super().__init__(config=config)
244-

ovos_solver_openai_persona/prompts.py

Lines changed: 0 additions & 63 deletions
This file was deleted.

0 commit comments

Comments
 (0)