Skip to content

Commit f07a56f

Browse files
committed
[8.0.6] standardise tt.ModelInterface
1 parent 0bffa8f commit f07a56f

File tree

7 files changed

+61
-29
lines changed

7 files changed

+61
-29
lines changed

docs/changelog.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,11 @@ minor versions.
77

88
All relevant steps to be taken will be mentioned here.
99

10+
8.0.6
11+
-----
12+
13+
- Standardise all the implementations of the ``tuneapi.types.chats.ModelInterface``
14+
1015
8.0.5
1116
-----
1217

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
project = "tuneapi"
1414
copyright = "2024-2025, Frello Technologies"
1515
author = "Frello Technologies"
16-
release = "8.0.5"
16+
release = "8.0.6"
1717

1818
# -- General configuration ---------------------------------------------------
1919
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "tuneapi"
3-
version = "8.0.5"
3+
version = "8.0.6"
44
description = "Tune AI APIs."
55
authors = ["Frello Technology Private Limited <[email protected]>"]
66
license = "MIT"

tuneapi/apis/model_gemini.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -257,13 +257,14 @@ def stream_chat(
257257
self,
258258
chats: tt.Thread | str,
259259
model: Optional[str] = None,
260-
max_tokens: int = 4096,
260+
max_tokens: int = None,
261261
temperature: float = 1,
262262
token: Optional[str] = None,
263-
debug: bool = False,
263+
timeout=(5, 60),
264+
usage: bool = False,
264265
extra_headers: Optional[Dict[str, str]] = None,
266+
debug: bool = False,
265267
raw: bool = False,
266-
timeout=(5, 60),
267268
**kwargs,
268269
):
269270
url, headers, data = self._process_input(
@@ -297,11 +298,13 @@ def chat(
297298
self,
298299
chats: tt.Thread | str,
299300
model: Optional[str] = None,
300-
max_tokens: int = 4096,
301+
max_tokens: int = None,
301302
temperature: float = 1,
302303
token: Optional[str] = None,
303-
timeout=None,
304+
usage: bool = False,
304305
extra_headers: Optional[Dict[str, str]] = None,
306+
debug: bool = False,
307+
timeout=(5, 60),
305308
**kwargs,
306309
) -> Any:
307310
output = ""
@@ -338,10 +341,10 @@ async def stream_chat_async(
338341
max_tokens: int = 4096,
339342
temperature: float = 1,
340343
token: Optional[str] = None,
341-
timeout=(5, 60),
342344
raw: bool = False,
343345
debug: bool = False,
344346
extra_headers: Optional[Dict[str, str]] = None,
347+
timeout=(5, 60),
345348
**kwargs,
346349
):
347350
url, headers, data = self._process_input(
@@ -383,8 +386,8 @@ async def chat_async(
383386
max_tokens: int = None,
384387
temperature: float = 1,
385388
token: Optional[str] = None,
386-
timeout=None,
387389
extra_headers: Optional[Dict[str, str]] = None,
390+
timeout=(5, 60),
388391
**kwargs,
389392
) -> Any:
390393
output = ""

tuneapi/apis/model_openai.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -249,13 +249,13 @@ def stream_chat(
249249
model: Optional[str] = None,
250250
max_tokens: int = None,
251251
temperature: float = 1,
252-
parallel_tool_calls: bool = False,
253252
token: Optional[str] = None,
254253
timeout=(5, 60),
255254
usage: bool = False,
256255
extra_headers: Optional[Dict[str, str]] = None,
257256
debug: bool = False,
258257
raw: bool = False,
258+
parallel_tool_calls: bool = False,
259259
**kwargs,
260260
):
261261
headers, data = self._process_input(
@@ -295,10 +295,12 @@ def chat(
295295
model: Optional[str] = None,
296296
max_tokens: int = None,
297297
temperature: float = 1,
298-
parallel_tool_calls: bool = False,
299298
token: Optional[str] = None,
300299
usage: bool = False,
301300
extra_headers: Optional[Dict[str, str]] = None,
301+
debug: bool = False,
302+
timeout=(5, 60),
303+
parallel_tool_calls: bool = False,
302304
**kwargs,
303305
) -> Any:
304306
output = ""
@@ -313,6 +315,8 @@ def chat(
313315
token=token,
314316
extra_headers=extra_headers,
315317
raw=False,
318+
debug=debug,
319+
timeout=timeout,
316320
**kwargs,
317321
):
318322
if isinstance(x, dict):
@@ -340,11 +344,11 @@ async def stream_chat_async(
340344
temperature: float = 1,
341345
parallel_tool_calls: bool = False,
342346
token: Optional[str] = None,
343-
timeout=(5, 60),
344347
usage: bool = False,
345348
extra_headers: Optional[Dict[str, str]] = None,
346349
debug: bool = False,
347350
raw: bool = False,
351+
timeout=(5, 60),
348352
**kwargs,
349353
):
350354
headers, data = self._process_input(
@@ -391,6 +395,7 @@ async def chat_async(
391395
token: Optional[str] = None,
392396
usage: bool = False,
393397
extra_headers: Optional[Dict[str, str]] = None,
398+
timeout=(5, 60),
394399
**kwargs,
395400
) -> Any:
396401
output = ""
@@ -404,6 +409,7 @@ async def chat_async(
404409
token=token,
405410
extra_headers=extra_headers,
406411
raw=False,
412+
timeout=timeout,
407413
**kwargs,
408414
):
409415
if isinstance(x, dict):
@@ -1208,6 +1214,7 @@ def __init__(
12081214
base_url: str = "https://api.mistral.ai/v1/chat/completions",
12091215
extra_headers: Optional[Dict[str, str]] = None,
12101216
api_token: Optional[str] = None,
1217+
**kwargs,
12111218
):
12121219
super().__init__(
12131220
id=id,
@@ -1247,6 +1254,7 @@ def __init__(
12471254
base_url: str = "https://api.groq.com/openai/v1/chat/completions",
12481255
extra_headers: Optional[Dict[str, str]] = None,
12491256
api_token: Optional[str] = None,
1257+
**kwargs,
12501258
):
12511259
super().__init__(
12521260
id=id,
@@ -1287,6 +1295,7 @@ def __init__(
12871295
org_id: Optional[str] = None,
12881296
extra_headers: Optional[Dict[str, str]] = None,
12891297
api_token: Optional[str] = None,
1298+
**kwargs,
12901299
):
12911300
if extra_headers is None:
12921301
extra_headers = {}
@@ -1348,6 +1357,7 @@ def __init__(
13481357
self,
13491358
id: str,
13501359
base_url: str = "http://localhost:11434/v1/chat/completions",
1360+
**kwargs,
13511361
):
13521362
super().__init__(
13531363
id=id,

tuneapi/apis/turbo.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,11 @@ def distributed_chat(
5555
TypeError: If model is not an instance of ModelInterface
5656
5757
Example:
58-
>>> model = ChatModel(api_token="...")
58+
>>> from tuneapi import ta, tt
59+
>>> model = ta.Gemini()
5960
>>> prompts = [
60-
... Thread([Message("What is 2+2?")]),
61-
... Thread([Message("What is Python?")])
61+
... tt.Thread([tt.human("What is 2+2?")]),
62+
... tt.Thread([tt.human("What is Python?")])
6263
... ]
6364
>>> responses = distributed_chat(model, prompts, max_threads=5)
6465
>>> for prompt, response in zip(prompts, responses):

tuneapi/types/chats.py

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -322,56 +322,67 @@ def set_api_token(self, token: str) -> None:
322322

323323
def stream_chat(
324324
self,
325-
chats: "Thread",
325+
chats: Union["Thread", str],
326326
model: Optional[str] = None,
327-
max_tokens: int = 1024,
327+
max_tokens: int = None,
328328
temperature: float = 1,
329329
token: Optional[str] = None,
330-
timeout=(5, 60),
331-
raw: bool = False,
332-
debug: bool = False,
330+
usage: bool = False,
333331
extra_headers: Optional[Dict[str, str]] = None,
332+
debug: bool = False,
333+
raw: bool = False,
334+
timeout=(5, 60),
335+
**kwargs,
334336
):
335337
"""This is the blocking function to stream chat with the model where each token is iteratively generated"""
336338
raise NotImplementedError("This model has no operation for this.")
337339

338340
def chat(
339341
self,
340-
chats: "Thread",
342+
chats: Union["Thread", str],
341343
model: Optional[str] = None,
342-
max_tokens: int = 1024,
344+
max_tokens: int = None,
343345
temperature: float = 1,
346+
parallel_tool_calls: bool = False,
344347
token: Optional[str] = None,
345-
timeout=(5, 30),
348+
usage: bool = False,
346349
extra_headers: Optional[Dict[str, str]] = None,
350+
debug: bool = False,
351+
timeout=(5, 60),
347352
**kwargs,
348353
) -> str | Dict[str, Any]:
349354
"""This is the blocking function to block chat with the model"""
350355
raise NotImplementedError("This model has no operation for this.")
351356

352357
async def stream_chat_async(
353358
self,
354-
chats: "Thread",
359+
chats: Union["Thread", str],
355360
model: Optional[str] = None,
356-
max_tokens: int = 1024,
361+
max_tokens: int = None,
357362
temperature: float = 1,
358363
token: Optional[str] = None,
359-
timeout=(5, 30),
364+
usage: bool = False,
360365
extra_headers: Optional[Dict[str, str]] = None,
366+
debug: bool = False,
367+
raw: bool = False,
368+
timeout=(5, 60),
361369
**kwargs,
362370
) -> str | Dict[str, Any]:
363371
"""This is the async function to stream chat with the model where each token is iteratively generated"""
364372
raise NotImplementedError("This model has no operation for this.")
365373

366374
async def chat_async(
367375
self,
368-
chats: "Thread",
376+
chats: Union["Thread", str],
369377
model: Optional[str] = None,
370-
max_tokens: int = 1024,
378+
max_tokens: int = None,
371379
temperature: float = 1,
380+
parallel_tool_calls: bool = False,
372381
token: Optional[str] = None,
373-
timeout=(5, 30),
382+
usage: bool = False,
374383
extra_headers: Optional[Dict[str, str]] = None,
384+
debug: bool = False,
385+
timeout=(5, 60),
375386
**kwargs,
376387
) -> str | Dict[str, Any]:
377388
"""This is the async function to block chat with the model"""
@@ -384,6 +395,7 @@ def distributed_chat(
384395
max_threads: int = 10,
385396
retry: int = 3,
386397
pbar=True,
398+
debug=False,
387399
**kwargs,
388400
):
389401
"""This is the blocking function to chat with the model in a distributed manner"""
@@ -396,6 +408,7 @@ async def distributed_chat_async(
396408
max_threads: int = 10,
397409
retry: int = 3,
398410
pbar=True,
411+
debug=False,
399412
**kwargs,
400413
):
401414
"""This is the async function to chat with the model in a distributed manner"""

0 commit comments

Comments
 (0)