Skip to content

Commit 95b7477

Browse files
committed
Add new provider - blackboxai
1 parent eb1e30a commit 95b7477

File tree

7 files changed

+256
-2
lines changed

7 files changed

+256
-2
lines changed

docs/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,8 @@ These are simply the hosts of the LLMs, which include:
6969
6. [WebChatGPT](https://github.com/Simatwa/WebChatGPT) - **OpenAI** *(Session ID required)*
7070
7. [Bard](https://github.com/acheong08/bard) - **Google** *(Session ID required)*
7171
8. [Phind](https://www.phind.com) - *default*
72+
9. [Llama2](https://www.llama2.ai)
73+
10. [Blackboxai](https://www.blackbox.ai)
7274

7375
<details>
7476

src/pytgpt/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
"bard",
1515
"phind",
1616
"llama2",
17+
"blackboxai",
1718
]
1819

1920
gpt4free_providers = [

src/pytgpt/blackboxai/__init__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from .main import BLACKBOXAI
2+
from .main import session
3+
4+
__info__ = "Interact with Blackboxai models"
5+
6+
__all__ = ["BLACKBOXAI", "session"]

src/pytgpt/blackboxai/main.py

Lines changed: 216 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,216 @@
1+
import re
2+
import json
3+
import yaml
4+
import requests
5+
from pytgpt.utils import Optimizers
6+
from pytgpt.utils import Conversation
7+
from pytgpt.utils import AwesomePrompts
8+
9+
session = requests.Session()
10+
11+
default_model = None
12+
13+
14+
class BLACKBOXAI:
15+
def __init__(
16+
self,
17+
is_conversation: bool = True,
18+
max_tokens: int = 600,
19+
timeout: int = 30,
20+
intro: str = None,
21+
filepath: str = None,
22+
update_file: bool = True,
23+
proxies: dict = {},
24+
history_offset: int = 10250,
25+
act: str = None,
26+
model: str = default_model,
27+
):
28+
"""Instantiates OPENGPT
29+
30+
Args:
31+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
32+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
33+
timeout (int, optional): Http request timeout. Defaults to 30.
34+
intro (str, optional): Conversation introductory prompt. Defaults to None.
35+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
36+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
37+
proxies (dict, optional): Http request proxies. Defaults to {}.
38+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
39+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
40+
model (str, optional): Model name. Defaults to "Phind Model".
41+
"""
42+
self.max_tokens_to_sample = max_tokens
43+
self.is_conversation = is_conversation
44+
self.chat_endpoint = "https://www.blackbox.ai/api/chat"
45+
self.stream_chunk_size = 64
46+
self.timeout = timeout
47+
self.last_response = {}
48+
self.model = model
49+
self.previewToken: str = None
50+
self.userId: str = ""
51+
self.codeModelMode: bool = True
52+
self.id: str = ""
53+
self.agentMode: dict = {}
54+
self.trendingAgentMode: dict = {}
55+
self.isMicMode: bool = False
56+
57+
self.headers = {
58+
"Content-Type": "application/json",
59+
"User-Agent": "",
60+
"Accept": "*/*",
61+
"Accept-Encoding": "Identity",
62+
}
63+
64+
self.__available_optimizers = (
65+
method
66+
for method in dir(Optimizers)
67+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
68+
)
69+
session.headers.update(self.headers)
70+
Conversation.intro = (
71+
AwesomePrompts().get_act(
72+
act, raise_not_found=True, default=None, case_insensitive=True
73+
)
74+
if act
75+
else intro or Conversation.intro
76+
)
77+
self.conversation = Conversation(
78+
is_conversation, self.max_tokens_to_sample, filepath, update_file
79+
)
80+
self.conversation.history_offset = history_offset
81+
session.proxies = proxies
82+
83+
def ask(
84+
self,
85+
prompt: str,
86+
stream: bool = False,
87+
raw: bool = False,
88+
optimizer: str = None,
89+
conversationally: bool = False,
90+
) -> dict:
91+
"""Chat with AI
92+
93+
Args:
94+
prompt (str): Prompt to be send.
95+
stream (bool, optional): Flag for streaming response. Defaults to False.
96+
raw (bool, optional): Stream back raw response as received. Defaults to False.
97+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
98+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
99+
Returns:
100+
dict : {}
101+
```json
102+
{
103+
"text" : "print('How may I help you today?')"
104+
}
105+
```
106+
"""
107+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
108+
if optimizer:
109+
if optimizer in self.__available_optimizers:
110+
conversation_prompt = getattr(Optimizers, optimizer)(
111+
conversation_prompt if conversationally else prompt
112+
)
113+
else:
114+
raise Exception(
115+
f"Optimizer is not one of {self.__available_optimizers}"
116+
)
117+
118+
session.headers.update(self.headers)
119+
payload = {
120+
"messages": [
121+
# json.loads(prev_messages),
122+
{"content": conversation_prompt, "role": "user"}
123+
],
124+
"id": self.id,
125+
"previewToken": self.previewToken,
126+
"userId": self.userId,
127+
"codeModelMode": self.codeModelMode,
128+
"agentMode": self.agentMode,
129+
"trendingAgentMode": self.trendingAgentMode,
130+
"isMicMode": self.isMicMode,
131+
}
132+
133+
def for_stream():
134+
response = session.post(
135+
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
136+
)
137+
if (
138+
not response.ok
139+
or not response.headers.get("Content-Type")
140+
== "text/plain; charset=utf-8"
141+
):
142+
raise Exception(
143+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
144+
)
145+
streaming_text = ""
146+
for value in response.iter_lines(
147+
decode_unicode=True,
148+
chunk_size=self.stream_chunk_size,
149+
delimiter="\n",
150+
):
151+
try:
152+
if bool(value):
153+
streaming_text += value + ("\n" if stream else "")
154+
155+
resp = dict(text=streaming_text)
156+
self.last_response.update(resp)
157+
yield value if raw else resp
158+
except json.decoder.JSONDecodeError:
159+
pass
160+
self.conversation.update_chat_history(
161+
prompt, self.get_message(self.last_response)
162+
)
163+
164+
def for_non_stream():
165+
for _ in for_stream():
166+
pass
167+
return self.last_response
168+
169+
return for_stream() if stream else for_non_stream()
170+
171+
def chat(
172+
self,
173+
prompt: str,
174+
stream: bool = False,
175+
optimizer: str = None,
176+
conversationally: bool = False,
177+
) -> str:
178+
"""Generate response `str`
179+
Args:
180+
prompt (str): Prompt to be send.
181+
stream (bool, optional): Flag for streaming response. Defaults to False.
182+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
183+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
184+
Returns:
185+
str: Response generated
186+
"""
187+
188+
def for_stream():
189+
for response in self.ask(
190+
prompt, True, optimizer=optimizer, conversationally=conversationally
191+
):
192+
yield self.get_message(response)
193+
194+
def for_non_stream():
195+
return self.get_message(
196+
self.ask(
197+
prompt,
198+
False,
199+
optimizer=optimizer,
200+
conversationally=conversationally,
201+
)
202+
)
203+
204+
return for_stream() if stream else for_non_stream()
205+
206+
def get_message(self, response: dict) -> str:
207+
"""Retrieves message only from response
208+
209+
Args:
210+
response (dict): Response generated by `self.ask`
211+
212+
Returns:
213+
str: Message extracted
214+
"""
215+
assert isinstance(response, dict), "Response should be of dict data-type only"
216+
return response["text"]

src/pytgpt/console.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,6 +485,22 @@ def __init__(
485485
act=awesome_prompt,
486486
)
487487

488+
elif provider == "blackboxai":
489+
490+
from pytgpt.blackboxai import BLACKBOXAI
491+
492+
self.bot = BLACKBOXAI(
493+
is_conversation=disable_conversation,
494+
max_tokens=max_tokens,
495+
timeout=timeout,
496+
intro=intro,
497+
filepath=filepath,
498+
update_file=update_file,
499+
proxies=proxies,
500+
history_offset=history_offset,
501+
act=awesome_prompt,
502+
)
503+
488504
elif provider in pytgpt.gpt4free_providers:
489505
from pytgpt.gpt4free import GPT4FREE
490506

src/pytgpt/llama2/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,12 +146,12 @@ def for_stream():
146146
message_load: str = ""
147147
for value in response.iter_lines(
148148
decode_unicode=True,
149-
delimiter="" if raw else "\n",
149+
delimiter="\n",
150150
chunk_size=self.stream_chunk_size,
151151
):
152152
try:
153153
if bool(value.strip()):
154-
message_load += value
154+
message_load += value + ("\n" if stream else "")
155155
resp: dict = dict(text=message_load)
156156
yield value if raw else resp
157157
self.last_response.update(resp)

tests/test_blackboxai.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import unittest
2+
import tests.base as base
3+
from pytgpt.blackboxai import BLACKBOXAI
4+
5+
6+
class TestBlackboxai(base.llmBase):
7+
def setUp(self):
8+
self.bot = BLACKBOXAI()
9+
self.prompt = base.prompt
10+
11+
12+
if __name__ == "__main__":
13+
unittest.main()

0 commit comments

Comments
 (0)