Skip to content

Commit a72a099

Browse files
committed
[8.0.3] fix tools bug + OpenAIProtocol class
1 parent 06e1f87 commit a72a099

File tree

8 files changed

+97
-37
lines changed

8 files changed

+97
-37
lines changed

docs/changelog.rst

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,13 @@ minor versions.
77

88
All relevant steps to be taken will be mentioned here.
99

10+
8.0.3
11+
-----
12+
13+
- Fix bug in ``tools`` that was causing ever increasing number of tools in the ``Thread`` object
14+
- OpenAI protocol abstracted away as a ``OpenAIProtocol`` class in ``tuneapi.apis.openai``. This is to make it easier to
15+
add new endpoints in the future.
16+
1017
8.0.2
1118
-----
1219

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
project = "tuneapi"
1414
copyright = "2024-2025, Frello Technologies"
1515
author = "Frello Technologies"
16-
release = "8.0.2"
16+
release = "8.0.3"
1717

1818
# -- General configuration ---------------------------------------------------
1919
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "tuneapi"
3-
version = "8.0.2"
3+
version = "8.0.3"
44
description = "Tune AI APIs."
55
authors = ["Frello Technology Private Limited <[email protected]>"]
66
license = "MIT"

tuneapi/apis/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Copyright © 2024-2025 Frello Technology Private Limited
22

33
# model APIs
4-
from tuneapi.apis.model_openai import Openai, TuneModel, Groq, Mistral
4+
from tuneapi.apis.model_openai import Openai, TuneModel, Groq, Mistral, OpenAIProtocol
55
from tuneapi.apis.model_anthropic import Anthropic
66
from tuneapi.apis.model_gemini import Gemini
77
from tuneapi.apis.turbo import distributed_chat, distributed_chat_async

tuneapi/apis/model_anthropic.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def _process_input(
143143
# return headers, system.strip(), claude_messages
144144

145145
tools = []
146-
if isinstance(chats, tt.Thread):
146+
if isinstance(chats, tt.Thread) and chats.tools:
147147
tools = [x.to_dict() for x in chats.tools]
148148
for t in tools:
149149
t["input_schema"] = t.pop("parameters")
@@ -236,7 +236,7 @@ def chat(
236236
self,
237237
chats: tt.Thread | str,
238238
model: Optional[str] = None,
239-
max_tokens: int = 1024,
239+
max_tokens: int = 4096,
240240
temperature: Optional[float] = None,
241241
token: Optional[str] = None,
242242
usage: bool = False,
@@ -317,7 +317,7 @@ async def chat_async(
317317
self,
318318
chats: tt.Thread | str,
319319
model: Optional[str] = None,
320-
max_tokens: int = 1024,
320+
max_tokens: int = 4096,
321321
temperature: Optional[float] = None,
322322
token: Optional[str] = None,
323323
usage: bool = False,

tuneapi/apis/model_gemini.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def _process_input(
138138

139139
# create the body
140140
tools = []
141-
if isinstance(chats, tt.Thread):
141+
if isinstance(chats, tt.Thread) and chats.tools:
142142
tools = [x.to_dict() for x in chats.tools]
143143
extra_headers = extra_headers or self.extra_headers
144144
if extra_headers:

tuneapi/apis/model_openai.py

Lines changed: 68 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -17,38 +17,26 @@
1717
from tuneapi.apis.turbo import distributed_chat, distributed_chat_async
1818

1919

20-
class Openai(tt.ModelInterface):
20+
class OpenAIProtocol(tt.ModelInterface):
2121
def __init__(
2222
self,
23-
id: str = "gpt-4o",
24-
base_url: str = "https://api.openai.com/v1/chat/completions",
25-
extra_headers: Optional[Dict[str, str]] = None,
26-
api_token: Optional[str] = None,
27-
emebdding_url: Optional[str] = None,
28-
image_gen_url: Optional[str] = None,
29-
audio_transcribe: Optional[str] = None,
30-
audio_gen_url: Optional[str] = None,
23+
id: str,
24+
base_url: str,
25+
extra_headers: Optional[Dict[str, str]],
26+
api_token: Optional[str],
27+
emebdding_url: Optional[str],
28+
image_gen_url: Optional[str],
29+
audio_transcribe_url: Optional[str],
30+
audio_gen_url: Optional[str],
3131
):
3232
self.model_id = id
3333
self.base_url = base_url
34-
self.api_token = api_token or tu.ENV.OPENAI_TOKEN("")
34+
self.api_token = api_token
3535
self.extra_headers = extra_headers
36-
self.emebdding_url = emebdding_url or base_url.replace(
37-
"/chat/completions",
38-
"/embeddings",
39-
)
40-
self.image_gen_url = image_gen_url or base_url.replace(
41-
"/chat/completions",
42-
"/images/generations",
43-
)
44-
self.audio_transcribe_url = audio_transcribe or base_url.replace(
45-
"/chat/completions",
46-
"/audio/transcriptions",
47-
)
48-
self.audio_gen_url = audio_gen_url or base_url.replace(
49-
"/chat/completions",
50-
"/audio/speech",
51-
)
36+
self.emebdding_url = emebdding_url
37+
self.image_gen_url = image_gen_url
38+
self.audio_transcribe_url = audio_transcribe_url
39+
self.audio_gen_url = audio_gen_url
5240

5341
def set_api_token(self, token: str) -> None:
5442
self.api_token = token
@@ -875,7 +863,47 @@ async def text_to_speech_async(
875863
# Other OpenAI compatible models
876864

877865

878-
class Mistral(Openai):
866+
class Openai(OpenAIProtocol):
867+
def __init__(
868+
self,
869+
id: str = "gpt-4o",
870+
base_url: str = "https://api.openai.com/v1/chat/completions",
871+
extra_headers: Optional[Dict[str, str]] = None,
872+
api_token: Optional[str] = None,
873+
emebdding_url: Optional[str] = None,
874+
image_gen_url: Optional[str] = None,
875+
audio_transcribe: Optional[str] = None,
876+
audio_gen_url: Optional[str] = None,
877+
):
878+
super().__init__(
879+
id=id,
880+
base_url=base_url,
881+
api_token=api_token or tu.ENV.OPENAI_TOKEN(""),
882+
extra_headers=extra_headers,
883+
emebdding_url=emebdding_url
884+
or base_url.replace(
885+
"/chat/completions",
886+
"/embeddings",
887+
),
888+
image_gen_url=image_gen_url
889+
or base_url.replace(
890+
"/chat/completions",
891+
"/images/generations",
892+
),
893+
audio_transcribe_url=audio_transcribe
894+
or base_url.replace(
895+
"/chat/completions",
896+
"/audio/transcriptions",
897+
),
898+
audio_gen_url=audio_gen_url
899+
or base_url.replace(
900+
"/chat/completions",
901+
"/audio/speech",
902+
),
903+
)
904+
905+
906+
class Mistral(OpenAIProtocol):
879907
"""
880908
A class to interact with Mistral's Large Language Models (LLMs) via their API. Note this class does not contain the
881909
`embedding` method.
@@ -905,13 +933,17 @@ def __init__(
905933
base_url=base_url,
906934
extra_headers=extra_headers,
907935
api_token=api_token or tu.ENV.MISTRAL_TOKEN(),
936+
emebdding_url=None,
937+
image_gen_url=None,
938+
audio_transcribe_url=None,
939+
audio_gen_url=None,
908940
)
909941

910942
def embedding(*a, **k):
911943
raise NotImplementedError("Mistral does not support embeddings")
912944

913945

914-
class Groq(Openai):
946+
class Groq(OpenAIProtocol):
915947
"""
916948
A class to interact with Groq's Large Language Models (LLMs) via their API. Note this class does not contain the
917949
`embedding` method.
@@ -938,13 +970,17 @@ def __init__(
938970
base_url=base_url,
939971
extra_headers=extra_headers,
940972
api_token=api_token or tu.ENV.GROQ_TOKEN(),
973+
emebdding_url=None,
974+
image_gen_url=None,
975+
audio_transcribe_url=None,
976+
audio_gen_url=None,
941977
)
942978

943979
def embedding(*a, **k):
944980
raise NotImplementedError("Groq does not support embeddings")
945981

946982

947-
class TuneModel(Openai):
983+
class TuneModel(OpenAIProtocol):
948984
"""
949985
A class to interact with Groq's Large Language Models (LLMs) via their API.
950986
@@ -978,6 +1014,9 @@ def __init__(
9781014
extra_headers=extra_headers,
9791015
api_token=api_token or tu.ENV.TUNEAPI_TOKEN(),
9801016
emebdding_url="https://proxy.tune.app/v1/embeddings",
1017+
image_gen_url=None,
1018+
audio_transcribe_url=None,
1019+
audio_gen_url=None,
9811020
)
9821021

9831022
def embedding(

tuneapi/types/chats.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ def __init__(
483483
self,
484484
input_tokens: int,
485485
output_tokens: int,
486-
cached_tokens: int,
486+
cached_tokens: Optional[int] = 0,
487487
**kwargs,
488488
):
489489
self.input_tokens = input_tokens
@@ -498,6 +498,20 @@ def __getitem__(self, x):
498498
def __repr__(self) -> str:
499499
return f"<Usage: {self.input_tokens} [Cached: {self.cached_tokens}] -> {self.output_tokens}>"
500500

501+
def __radd__(self, other: "Usage"):
502+
return Usage(
503+
input_tokens=self.input_tokens + other.input_tokens,
504+
output_tokens=self.output_tokens + other.output_tokens,
505+
cached_tokens=self.cached_tokens + other.cached_tokens,
506+
)
507+
508+
def __add__(self, other: "Usage"):
509+
return Usage(
510+
input_tokens=self.input_tokens + other.input_tokens,
511+
output_tokens=self.output_tokens + other.output_tokens,
512+
cached_tokens=self.cached_tokens + other.cached_tokens,
513+
)
514+
501515
def to_json(self, *a, **k) -> str:
502516
return tu.to_json(self.__dict__, *a, **k)
503517

0 commit comments

Comments
 (0)