Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8
python-version: "3.11"
- name: Install Build Tools
run: |
python -m pip install build wheel
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/license_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8
python-version: "3.11"
- name: Install Build Tools
run: |
python -m pip install build wheel
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/publish_stable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8
python-version: "3.11"
- name: Install Build Tools
run: |
python -m pip install build wheel
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release_workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v1
with:
python-version: 3.8
python-version: "3.11"
- name: Install Build Tools
run: |
python -m pip install build wheel
Expand Down
12 changes: 7 additions & 5 deletions ovos_solver_openai_persona/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def get_spoken_answer(self, query: str,
"""
messages = self.get_prompt(query, self.default_persona)
response = self._do_api_request(messages)
answer = response.strip()
answer = response.strip() if response else ""
if not answer or not answer.strip("?") or not answer.strip("_"):
return None
if self.memory:
Expand All @@ -64,14 +64,16 @@ def get_spoken_answer(self, query: str,


if __name__ == "__main__":
bot = OpenAIPersonaSolver({"key": "sk-xxxx", "api_url": "https://llama.smartgic.io/v1"})
for utt in bot.stream_utterances("describe quantum mechanics in simple terms"):
print(utt)
bot = OpenAIPersonaSolver({"key": "sk-xxxx",
"model": 'mixtral-8x7b',
"api_url": "http://10.42.0.109:8401"})
#for utt in bot.stream_utterances("describe quantum mechanics in simple terms"):
# print(utt)
# Quantum mechanics is a branch of physics that studies the behavior of atoms and particles at the smallest scales.
# It describes how these particles interact with each other, move, and change energy levels.
# Think of it like playing with toy building blocks that represent particles.
# Instead of rigid structures, these particles can be in different energy levels or "states." Quantum mechanics helps scientists understand and predict these states, making it crucial for many fields like chemistry, materials science, and engineering.

# Quantum mechanics is a branch of physics that deals with the behavior of particles on a very small scale, such as atoms and subatomic particles. It explores the idea that particles can exist in multiple states at once and that their behavior is not predictable in the traditional sense.
print(bot.spoken_answer("Quem encontrou o caminho maritimo para o Brazil", lang="pt-pt"))
print(bot.spoken_answer("what is the definition of computer", lang="en-US"))
# O português Pedro Álvares Cabral encontrou o caminho marítimo para o Brasil em 1500. Ele foi o responsável por descobrir o litoral brasileiro, embora Cristóvão Colombo tenha chegado à América do Sul em 1498, cinco anos antes. Cabral desembarcou na atual costa de Alagoas, no Nordeste do Brasil.
80 changes: 62 additions & 18 deletions ovos_solver_openai_persona/engines.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
import json
from typing import Optional, Iterable
from typing import Optional, Iterable, List, Dict

import requests
from ovos_plugin_manager.templates.solvers import QuestionSolver
from ovos_utils.log import LOG

from ovos_plugin_manager.templates.solvers import ChatMessageSolver

MessageList = List[Dict[str, str]] # for typing

class OpenAICompletionsSolver(QuestionSolver):
enable_tx = False
Expand Down Expand Up @@ -69,14 +72,14 @@ def get_spoken_answer(self, query: str,
return answer


class OpenAIChatCompletionsSolver(QuestionSolver):
class OpenAIChatCompletionsSolver(ChatMessageSolver):
enable_tx = False
priority = 25

def __init__(self, config=None):
super().__init__(config)
self.api_url = f"{self.config.get('api_url', 'https://api.openai.com/v1')}/chat/completions"
self.engine = self.config.get("model", "gpt-3.5-turbo") # "ada" cheaper and faster, "davinci" better
self.engine = self.config.get("model", "gpt-4o-mini") # "ada" cheaper and faster, "davinci" better
self.stop_token = "<|im_end|>"
self.key = self.config.get("key")
if not self.key:
Expand Down Expand Up @@ -163,37 +166,82 @@ def get_chat_history(self, initial_prompt=None):
messages.append({"role": "assistant", "content": a})
return messages

def get_prompt(self, utt, initial_prompt=None):
def get_messages(self, utt, initial_prompt=None) -> MessageList:
messages = self.get_chat_history(initial_prompt)
messages.append({"role": "user", "content": utt})
return messages

# asbtract Solver methods
def stream_utterances(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Iterable[str]:
def continue_chat(self, messages: MessageList,
lang: Optional[str],
units: Optional[str] = None) -> Optional[str]:
"""Generate a response based on the chat history.

Args:
messages (List[Dict[str, str]]): List of chat messages, each containing 'role' and 'content'.
lang (Optional[str]): The language code for the response. If None, will be auto-detected.
units (Optional[str]): Optional unit system for numerical values.

Returns:
Optional[str]: The generated response or None if no response could be generated.
"""
Stream utterances for the given query as they become available.
response = self._do_api_request(messages)
answer = response.strip()
if not answer or not answer.strip("?") or not answer.strip("_"):
return None
if self.memory:
query = messages[-1]["content"]
self.qa_pairs.append((query, answer))
return answer

def stream_chat_utterances(self, messages: List[Dict[str, str]],
lang: Optional[str] = None,
units: Optional[str] = None) -> Iterable[str]:
"""
Stream utterances for the given chat history as they become available.

Args:
query (str): The query text.
messages: The chat messages.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
Iterable[str]: An iterable of utterances.
"""
messages = self.get_prompt(query)
answer = ""
query = messages[-1]["content"]
if self.memory:
self.qa_pairs.append((query, answer))

for chunk in self._do_streaming_api_request(messages):
answer += chunk
if any(chunk.endswith(p) for p in [".", "!", "?", "\n", ":"]):
if len(chunk) >= 2 and chunk[-2].isdigit() and chunk[-1] == ".":
continue # dont split numbers
if answer.strip():
if self.memory:
full_ans = f"{self.qa_pairs[-1][-1]}\n{answer}".strip()
self.qa_pairs[-1] = (query, full_ans)
yield answer
answer = ""

def stream_utterances(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Iterable[str]:
"""
Stream utterances for the given query as they become available.

Args:
query (str): The query text.
lang (Optional[str]): Optional language code. Defaults to None.
units (Optional[str]): Optional units for the query. Defaults to None.

Returns:
Iterable[str]: An iterable of utterances.
"""
messages = self.get_messages(query)
yield from self.stream_chat_utterances(messages, lang, units)

def get_spoken_answer(self, query: str,
lang: Optional[str] = None,
units: Optional[str] = None) -> Optional[str]:
Expand All @@ -208,11 +256,7 @@ def get_spoken_answer(self, query: str,
Returns:
str: The spoken answer as a text response.
"""
messages = self.get_prompt(query)
response = self._do_api_request(messages)
answer = response.strip()
if not answer or not answer.strip("?") or not answer.strip("_"):
return None
if self.memory:
self.qa_pairs.append((query, answer))
return answer
messages = self.get_messages(query)
# just for api compat since it's a subclass, shouldn't be directly used
return self.continue_chat(messages=messages, lang=lang, units=units)

Loading