Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 19 additions & 7 deletions ovos_solver_openai_persona/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Optional

from ovos_solver_openai_persona.engines import OpenAIChatCompletionsSolver


Expand All @@ -24,10 +26,21 @@ def get_chat_history(self, persona=None):
return messages

# officially exported Solver methods
def get_spoken_answer(self, query, context=None):
context = context or {}
persona = context.get("persona") or self.default_persona
messages = self.get_prompt(query, persona)
def get_spoken_answer(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Optional[str]:
"""
Obtain the spoken answer for a given query.

Args:
query (str): The query text.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
str: The spoken answer as a text response.
"""
messages = self.get_prompt(query, self.default_persona)
response = self._do_api_request(messages)
answer = response.strip()
if not answer or not answer.strip("?") or not answer.strip("_"):
Expand All @@ -46,7 +59,6 @@ def get_spoken_answer(self, query, context=None):
# Think of it like playing with toy building blocks that represent particles.
# Instead of rigid structures, these particles can be in different energy levels or "states." Quantum mechanics helps scientists understand and predict these states, making it crucial for many fields like chemistry, materials science, and engineering.


# Quantum mechanics is a branch of physics that deals with the behavior of particles on a very small scale, such as atoms and subatomic particles. It explores the idea that particles can exist in multiple states at once and that their behavior is not predictable in the traditional sense.
print(bot.spoken_answer("Quem encontrou o caminho maritimo para o Brazil", {"lang": "pt-pt"}))
# Explorador português Pedro Álvares Cabral é creditado com a descoberta do Brasil em 1500
print(bot.spoken_answer("Quem encontrou o caminho maritimo para o Brazil", lang="pt-pt"))
# O português Pedro Álvares Cabral encontrou o caminho marítimo para o Brasil em 1500. Ele foi o responsável por descobrir o litoral brasileiro, embora Cristóvão Colombo tenha chegado à América do Sul em 1498, cinco anos antes. Cabral desembarcou na atual costa de Alagoas, no Nordeste do Brasil.
2 changes: 1 addition & 1 deletion ovos_solver_openai_persona/dialog_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,4 @@ def transform(self, dialog: str, context: dict = None) -> Tuple[str, dict]:
prompt = context.get("prompt") or self.config.get("rewrite_prompt")
if not prompt:
return dialog, context
return self.solver.get_spoken_answer(f"{prompt} : {dialog}"), context
return self.solver.get_spoken_answer(f"{prompt} : {dialog}", lang=context.get("lang")), context
116 changes: 45 additions & 71 deletions ovos_solver_openai_persona/engines.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
from typing import Optional, Iterable

import requests
from ovos_plugin_manager.templates.solvers import QuestionSolver
Expand Down Expand Up @@ -47,7 +48,20 @@ def _do_api_request(self, prompt):
return response["choices"][0]["text"]

# officially exported Solver methods
def get_spoken_answer(self, query, context=None):
def get_spoken_answer(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Optional[str]:
"""
Obtain the spoken answer for a given query.

Args:
query (str): The query text.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
str: The spoken answer as a text response.
"""
response = self._do_api_request(query)
answer = response.strip()
if not answer or not answer.strip("?") or not answer.strip("_"):
Expand Down Expand Up @@ -154,20 +168,46 @@ def get_prompt(self, utt, initial_prompt=None):
messages.append({"role": "user", "content": utt})
return messages

# officially exported Solver methods
def stream_utterances(self, query):
# asbtract Solver methods
def stream_utterances(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Iterable[str]:
"""
Stream utterances for the given query as they become available.

Args:
query (str): The query text.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
Iterable[str]: An iterable of utterances.
"""
messages = self.get_prompt(query)
answer = ""
for chunk in self._do_streaming_api_request(messages):
answer += chunk
if any(chunk.endswith(p) for p in [".", "!", "?", "\n", ":"]):
if len(chunk) >= 2 and chunk[-2].isdigit() and chunk[-1] == ".":
continue # dont split numbers
continue # dont split numbers
if answer.strip():
yield answer
answer = ""

def get_spoken_answer(self, query, context=None):
def get_spoken_answer(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Optional[str]:
"""
Obtain the spoken answer for a given query.

Args:
query (str): The query text.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
str: The spoken answer as a text response.
"""
messages = self.get_prompt(query)
response = self._do_api_request(messages)
answer = response.strip()
Expand All @@ -176,69 +216,3 @@ def get_spoken_answer(self, query, context=None):
if self.memory:
self.qa_pairs.append((query, answer))
return answer


# Base models
class GPT35Turbo(OpenAIChatCompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "gpt-3.5-turbo"
super().__init__(config=config)


class AdaSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "ada"
super().__init__(config=config)


class BabbageSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "babbage"
super().__init__(config=config)


class CurieSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "curie"
super().__init__(config=config)


class DavinciSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "davinci"
super().__init__(config=config)


class Davinci2Solver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "text-davinci-02"
super().__init__(config=config)


class Davinci3Solver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "text-davinci-03"
super().__init__(config=config)


# Code completion
class DavinciCodeSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "code-davinci-002"
super().__init__(config=config)


class CushmanCodeSolver(OpenAICompletionsSolver):
def __init__(self, config=None):
config = config or {}
config["model"] = "code-cushman-001"
super().__init__(config=config)

63 changes: 0 additions & 63 deletions ovos_solver_openai_persona/prompts.py

This file was deleted.