Skip to content

Commit 61af271

Browse files
committed
Add gpt4all provider - offline
1 parent 4a69baf commit 61af271

File tree

8 files changed

+252
-5
lines changed

8 files changed

+252
-5
lines changed

docs/CHANGELOG.md

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,4 +360,10 @@ For instance:
360360

361361
- Execute python code in responses *(interactive)*- `exec`
362362
- Execute python codes using system installed python interpreter - *default*
363-
- Other minor fixes.
363+
- Other minor fixes.
364+
365+
## v0.4.5
366+
367+
**What's new?**
368+
369+
- New model : **GPT4ALL** - Support offline LLM.

docs/README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ This project enables seamless interaction with over **45 free LLM providers** wi
4444

4545
The name *python-tgpt* draws inspiration from its parent project [tgpt](https://github.com/aandrew-me/tgpt), which operates on [Golang](https://go.dev/). Through this Python adaptation, users can effortlessly engage with a number of free LLMs available, fostering a smoother AI interaction experience.
4646

47+
4748
### Features
4849

4950
- 🗨️ Enhanced conversational chat experience
@@ -57,6 +58,7 @@ The name *python-tgpt* draws inspiration from its parent project [tgpt](https://
5758
- 🤖 Pass [awesome-chatgpt prompts](https://github.com/f/awesome-chatgpt-prompts) easily
5859
- 🧠 Multiple LLM providers - **45+**
5960
- 🎯 Customizable script generation and execution
61+
- 🔌 Offline support for Large Language Models
6062

6163
## Providers
6264

@@ -71,6 +73,7 @@ These are simply the hosts of the LLMs, which include:
7173
9. [Phind](https://www.phind.com) - *default*
7274
10. [Llama2](https://www.llama2.ai)
7375
11. [Blackboxai](https://www.blackbox.ai)
76+
12. [gpt4all](https://gpt4all.io) *(Offline)*
7477

7578
<details>
7679

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,4 @@ colorama==0.4.6
1010
g4f>=0.2.1.0
1111
pyyaml==6.0.1
1212
matplotlib
13+
gpt4all==2.2.0

setup.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
"requests==2.28.2",
1010
"appdirs==1.4.4",
1111
"pyyaml==6.0.1",
12+
"gpt4all==2.2.0",
1213
]
1314

1415
cli_reqs = [
@@ -36,7 +37,7 @@
3637

3738
setup(
3839
name="python-tgpt",
39-
version="0.4.4",
40+
version="0.4.5",
4041
license="MIT",
4142
author="Smartwa",
4243
maintainer="Smartwa",
@@ -78,6 +79,8 @@
7879
"openai",
7980
"bard",
8081
"gpt4free",
82+
"gpt4all-cli",
83+
"gptcli",
8184
],
8285
long_description=Path.open("README.md", encoding="utf-8").read(),
8386
long_description_content_type="text/markdown",

src/pytgpt/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from .utils import appdir
22
import g4f
33

4-
__version__ = "0.4.4"
4+
__version__ = "0.4.5"
55
__author__ = "Smartwa"
66
__repo__ = "https://github.com/Simatwa/python-tgpt"
77

@@ -14,6 +14,7 @@
1414
"phind",
1515
"llama2",
1616
"blackboxai",
17+
"gpt4all",
1718
]
1819

1920
gpt4free_providers = [

src/pytgpt/console.py

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,26 @@ def __init__(
498498
act=awesome_prompt,
499499
)
500500

501+
elif provider == "gpt4all":
502+
from pytgpt.gpt4all import GPT4ALL
503+
504+
assert (
505+
auth
506+
), f"Path to LLM (.gguf or .bin) file is required. Use the flag `--key` or `-k`"
507+
self.bot = GPT4ALL(
508+
model=auth,
509+
is_conversation=disable_conversation,
510+
max_tokens=max_tokens,
511+
temperature=temperature,
512+
presence_penalty=top_p,
513+
frequency_penalty=top_k,
514+
top_p=top_p,
515+
intro=intro,
516+
filepath=filepath,
517+
update_file=update_file,
518+
history_offset=history_offset,
519+
)
520+
501521
elif provider in pytgpt.gpt4free_providers:
502522
from pytgpt.gpt4free import GPT4FREE
503523

@@ -1037,7 +1057,7 @@ class ChatInteractive:
10371057
"-k",
10381058
"--key",
10391059
envvar="auth_key",
1040-
help="LLM API access key or auth value",
1060+
help="LLM API access key or auth value or path to LLM with provider.",
10411061
)
10421062
@click.option(
10431063
"-ct",
@@ -1314,7 +1334,7 @@ class ChatGenerate:
13141334
"-k",
13151335
"--key",
13161336
envvar="auth_key",
1317-
help="LLM API access key or auth value",
1337+
help="LLM API access key or auth value or path to LLM with provider.",
13181338
)
13191339
@click.option(
13201340
"-ct",

src/pytgpt/gpt4all/__init__.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .main import GPT4ALL
2+
3+
__info__ = "Interact with offline models"
4+
5+
__all__ = [
6+
"GPT4ALL",
7+
]

src/pytgpt/gpt4all/main.py

Lines changed: 206 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
from pytgpt.utils import Optimizers
2+
from pytgpt.utils import Conversation
3+
from pytgpt.utils import AwesomePrompts
4+
from pytgpt.base import Provider
5+
from gpt4all import GPT4All
6+
from gpt4all.gpt4all import empty_chat_session
7+
from gpt4all.gpt4all import append_extension_if_missing
8+
9+
10+
import logging
11+
12+
my_logger = logging.getLogger("gpt4all")
13+
my_logger.setLevel(logging.CRITICAL)
14+
15+
16+
class GPT4ALL(Provider):
17+
def __init__(
18+
self,
19+
model: str,
20+
is_conversation: bool = True,
21+
max_tokens: int = 800,
22+
temperature: float = 0.7,
23+
presence_penalty: int = 0,
24+
frequency_penalty: int = 1.18,
25+
top_p: float = 0.4,
26+
intro: str = None,
27+
filepath: str = None,
28+
update_file: bool = True,
29+
history_offset: int = 10250,
30+
act: str = None,
31+
):
32+
"""Instantiates GPT4ALL
33+
34+
Args:
35+
model (str, optional): Path to LLM model (.gguf or .bin).
36+
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
37+
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 800.
38+
temperature (float, optional): Charge of the generated text's randomness. Defaults to 0.7.
39+
presence_penalty (int, optional): Chances of topic being repeated. Defaults to 0.
40+
frequency_penalty (int, optional): Chances of word being repeated. Defaults to 1.18.
41+
top_p (float, optional): Sampling threshold during inference time. Defaults to 0.4.
42+
intro (str, optional): Conversation introductory prompt. Defaults to None.
43+
filepath (str, optional): Path to file containing conversation history. Defaults to None.
44+
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
45+
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
46+
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
47+
"""
48+
self.is_conversation = is_conversation
49+
self.max_tokens_to_sample = max_tokens
50+
self.model = model
51+
self.temperature = temperature
52+
self.presence_penalty = presence_penalty
53+
self.frequency_penalty = frequency_penalty
54+
self.top_p = top_p
55+
self.last_response = {}
56+
57+
self.__available_optimizers = (
58+
method
59+
for method in dir(Optimizers)
60+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
61+
)
62+
Conversation.intro = (
63+
AwesomePrompts().get_act(
64+
act, raise_not_found=True, default=None, case_insensitive=True
65+
)
66+
if act
67+
else intro or Conversation.intro
68+
)
69+
self.conversation = Conversation(
70+
is_conversation, self.max_tokens_to_sample, filepath, update_file
71+
)
72+
self.conversation.history_offset = history_offset
73+
74+
def get_model_name_path():
75+
import os
76+
from pathlib import Path
77+
78+
initial_model_path = Path(append_extension_if_missing(model))
79+
if initial_model_path.exists:
80+
if not initial_model_path.is_absolute():
81+
initial_model_path = Path(os.getcwd()) / initial_model_path
82+
return os.path.split(initial_model_path.as_posix())
83+
else:
84+
raise FileNotFoundError(
85+
"File does not exist " + initial_model_path.as_posix()
86+
)
87+
88+
model_dir, model_name = get_model_name_path()
89+
90+
self.gpt4all = GPT4All(
91+
model_name=model_name,
92+
model_path=model_dir,
93+
allow_download=False,
94+
verbose=False,
95+
)
96+
97+
def ask(
98+
self,
99+
prompt: str,
100+
stream: bool = False,
101+
raw: bool = False,
102+
optimizer: str = None,
103+
conversationally: bool = False,
104+
) -> dict:
105+
"""Chat with AI
106+
107+
Args:
108+
prompt (str): Prompt to be send.
109+
stream (bool, optional): Flag for streaming response. Defaults to False.
110+
raw (bool, optional): Stream back raw response as received. Defaults to False.
111+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
112+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
113+
Returns:
114+
dict : {}
115+
```json
116+
{
117+
"text" : "How may I help you today?"
118+
}
119+
```
120+
"""
121+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
122+
if optimizer:
123+
if optimizer in self.__available_optimizers:
124+
conversation_prompt = getattr(Optimizers, optimizer)(
125+
conversation_prompt if conversationally else prompt
126+
)
127+
else:
128+
raise Exception(
129+
f"Optimizer is not one of {self.__available_optimizers}"
130+
)
131+
132+
def for_stream():
133+
response = self.gpt4all.generate(
134+
prompt=conversation_prompt,
135+
max_tokens=self.max_tokens_to_sample,
136+
temp=self.temperature,
137+
top_p=self.top_p,
138+
repeat_penalty=self.frequency_penalty,
139+
streaming=True,
140+
)
141+
142+
message_load: str = ""
143+
for token in response:
144+
message_load += token
145+
resp: dict = dict(text=message_load)
146+
yield token if raw else resp
147+
self.last_response.update(resp)
148+
149+
self.conversation.update_chat_history(
150+
prompt, self.get_message(self.last_response)
151+
)
152+
self.gpt4all.current_chat_session = empty_chat_session()
153+
154+
def for_non_stream():
155+
for _ in for_stream():
156+
pass
157+
return self.last_response
158+
159+
return for_stream() if stream else for_non_stream()
160+
161+
def chat(
162+
self,
163+
prompt: str,
164+
stream: bool = False,
165+
optimizer: str = None,
166+
conversationally: bool = False,
167+
) -> str:
168+
"""Generate response `str`
169+
Args:
170+
prompt (str): Prompt to be send.
171+
stream (bool, optional): Flag for streaming response. Defaults to False.
172+
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
173+
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
174+
Returns:
175+
str: Response generated
176+
"""
177+
178+
def for_stream():
179+
for response in self.ask(
180+
prompt, True, optimizer=optimizer, conversationally=conversationally
181+
):
182+
yield self.get_message(response)
183+
184+
def for_non_stream():
185+
return self.get_message(
186+
self.ask(
187+
prompt,
188+
False,
189+
optimizer=optimizer,
190+
conversationally=conversationally,
191+
)
192+
)
193+
194+
return for_stream() if stream else for_non_stream()
195+
196+
def get_message(self, response: dict) -> str:
197+
"""Retrieves message only from response
198+
199+
Args:
200+
response (str): Response generated by `self.ask`
201+
202+
Returns:
203+
str: Message extracted
204+
"""
205+
assert isinstance(response, dict), "Response should be of dict data-type only"
206+
return response["text"]

0 commit comments

Comments
 (0)