Skip to content

Commit 532a74a

Browse files
committed
Allow g4f user to select provider, show models in each provider, add manuals in g4f and using api tabs
1 parent 1ab8e40 commit 532a74a

File tree

5 files changed

+119
-37
lines changed

5 files changed

+119
-37
lines changed

pyqt_openai/__init__.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -336,9 +336,6 @@ def move_updater():
336336

337337
STT_MODEL = 'whisper-1'
338338

339-
# G4F Models
340-
G4F_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gemini-flash', 'claude-3-5-sonnet', 'llama-3.1-70b']
341-
342339
# Endpoint
343340
# https://platform.openai.com/docs/models/model-endpoint-compatibility
344341
OPENAI_ENDPOINT_DICT = {
@@ -391,6 +388,10 @@ def move_updater():
391388
# This has to be managed separately since some of the arguments are different with usual models
392389
O1_MODELS = ['o1-preview', 'o1-mini']
393390

391+
DEFAULT_LLM = 'gpt-4o'
392+
393+
G4F_PROVIDER_DEFAULT = 'Auto'
394+
394395
# Dictionary that stores the platform and model pairs
395396
PROVIDER_MODEL_DICT = {
396397
'OpenAI': ['gpt-4o', 'gpt-4o-mini']+O1_MODELS,
@@ -493,7 +494,7 @@ def move_updater():
493494
'show_chat_list': True,
494495
'stream': True,
495496
'db': 'conv',
496-
'model': 'gpt-4o',
497+
'model': DEFAULT_LLM,
497498
'show_setting': True,
498499
'use_llama_index': False,
499500
'do_not_ask_again': False,

pyqt_openai/chat_widget/right_sidebar/usingAPIPage.py

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
from PySide6.QtCore import Qt, Signal
22
from PySide6.QtWidgets import QWidget, QDoubleSpinBox, QSpinBox, QFormLayout, QSizePolicy, QComboBox, QTextEdit, \
3-
QLabel, QVBoxLayout, QCheckBox, QPushButton, QScrollArea, QGroupBox, QHBoxLayout
3+
QLabel, QVBoxLayout, QCheckBox, QPushButton, QScrollArea, QGroupBox, QHBoxLayout, QTextBrowser
44

55
from pyqt_openai import DEFAULT_SHORTCUT_JSON_MODE, OPENAI_TEMPERATURE_RANGE, OPENAI_TEMPERATURE_STEP, \
66
MAX_TOKENS_RANGE, TOP_P_RANGE, TOP_P_STEP, FREQUENCY_PENALTY_RANGE, PRESENCE_PENALTY_STEP, PRESENCE_PENALTY_RANGE, \
77
FREQUENCY_PENALTY_STEP, LLAMAINDEX_URL
88
from pyqt_openai.chat_widget.right_sidebar.apiWidget import ApiWidget
99
from pyqt_openai.config_loader import CONFIG_MANAGER
1010
from pyqt_openai.lang.translations import LangClass
11-
from pyqt_openai.globals import get_chat_model, init_llama, get_openai_chat_model
12-
from pyqt_openai.util.script import getSeparator
11+
from pyqt_openai.globals import init_llama, get_openai_chat_model
12+
from pyqt_openai.util.script import getSeparator, get_chat_model
1313
from pyqt_openai.widgets.linkLabel import LinkLabel
1414

1515

@@ -37,6 +37,26 @@ def __initVal(self):
3737
self.__use_llama_index = CONFIG_MANAGER.get_general_property('use_llama_index')
3838

3939
def __initUi(self):
40+
manualBrowser = QTextBrowser()
41+
manualBrowser.setOpenExternalLinks(True)
42+
manualBrowser.setOpenLinks(True)
43+
44+
# TODO LANGUAGE
45+
manualBrowser.setHtml('''
46+
<h2>Using API</h2>
47+
<h3>Description</h3>
48+
<p>- Fast responses.</p>
49+
<p>- Stable response server.</p>
50+
<p>- Ability to save your AI usage history and statistics.</p>
51+
<p>- Option to add custom LLMs you have created.</p>
52+
<p>- Ability to save conversation history on the server.</p>
53+
<p>- JSON response functionality available (limited to specific LLMs).</p>
54+
<p>- LlamaIndex can be used.</p>
55+
<p>- Various hyperparameters can be assigned.</p>
56+
''')
57+
58+
manualBrowser.setSizePolicy(QSizePolicy.Policy.MinimumExpanding, QSizePolicy.Policy.MinimumExpanding)
59+
4060
systemlbl = QLabel(LangClass.TRANSLATIONS['System'])
4161
systemlbl.setToolTip(LangClass.TRANSLATIONS['Basically system means instructions or rules that the model should follow.'] + '\n' + LangClass.TRANSLATIONS['You can write your own system instructions here.'])
4262

@@ -170,6 +190,8 @@ def __initUi(self):
170190
self.__llamaChkBox.setText(LangClass.TRANSLATIONS['Use LlamaIndex'])
171191

172192
lay = QVBoxLayout()
193+
lay.addWidget(manualBrowser)
194+
lay.addWidget(getSeparator('horizontal'))
173195
lay.addWidget(systemlbl)
174196
lay.addWidget(self.__systemTextEdit)
175197
lay.addWidget(saveSystemBtn)
Lines changed: 37 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
from PySide6.QtCore import Qt, Signal
2-
from PySide6.QtWidgets import QWidget, QComboBox, QVBoxLayout, QCheckBox, QHBoxLayout, QLabel
2+
from PySide6.QtWidgets import QWidget, QComboBox, QCheckBox, QFormLayout, QTextBrowser, QSizePolicy
33

4+
from pyqt_openai import G4F_PROVIDER_DEFAULT
45
from pyqt_openai.config_loader import CONFIG_MANAGER
5-
from pyqt_openai.globals import get_chat_model
66
from pyqt_openai.lang.translations import LangClass
7+
from pyqt_openai.util.script import get_g4f_providers, get_g4f_models_by_provider, get_chat_model, get_g4f_models, \
8+
getSeparator
79

810

911
class UsingG4FPage(QWidget):
@@ -20,27 +22,39 @@ def __initVal(self):
2022
self.__model = CONFIG_MANAGER.get_general_property('model')
2123

2224
def __initUi(self):
23-
modelCmbBox = QComboBox()
24-
modelCmbBox.addItems(get_chat_model(is_g4f=True))
25-
modelCmbBox.setCurrentText(self.__model)
26-
modelCmbBox.currentTextChanged.connect(self.__modelChanged)
25+
manualBrowser = QTextBrowser()
26+
manualBrowser.setOpenExternalLinks(True)
27+
manualBrowser.setOpenLinks(True)
2728

28-
lay = QHBoxLayout()
29-
lay.addWidget(QLabel(LangClass.TRANSLATIONS['Model']))
30-
lay.addWidget(modelCmbBox)
31-
lay.setContentsMargins(0, 0, 0, 0)
29+
# TODO LANGUAGE
30+
manualBrowser.setHtml('''
31+
<h2>Using GPT4Free (Free)</h2>
32+
<h3>Description</h3>
33+
<p>- Responses may often be slow or incomplete.</p>
34+
<p>- The response server may be unstable.</p>
35+
''')
36+
manualBrowser.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred)
3237

33-
selectModelWidget = QWidget()
34-
selectModelWidget.setLayout(lay)
38+
self.__modelCmbBox = QComboBox()
39+
self.__modelCmbBox.addItems(get_chat_model(is_g4f=True))
40+
self.__modelCmbBox.setCurrentText(self.__model)
41+
self.__modelCmbBox.currentTextChanged.connect(self.__modelChanged)
3542

3643
streamChkBox = QCheckBox()
3744
streamChkBox.setChecked(self.__stream)
3845
streamChkBox.toggled.connect(self.__streamChecked)
3946
streamChkBox.setText(LangClass.TRANSLATIONS['Stream'])
4047

41-
lay = QVBoxLayout()
42-
lay.addWidget(selectModelWidget)
43-
lay.addWidget(streamChkBox)
48+
providerCmbBox = QComboBox()
49+
providerCmbBox.addItems(get_g4f_providers(including_auto=True))
50+
providerCmbBox.currentTextChanged.connect(self.__providerChanged)
51+
52+
lay = QFormLayout()
53+
lay.addRow(manualBrowser)
54+
lay.addRow(getSeparator('horizontal'))
55+
lay.addRow('Model', self.__modelCmbBox)
56+
lay.addRow('Provider', providerCmbBox)
57+
lay.addRow(streamChkBox)
4458
lay.setAlignment(Qt.AlignmentFlag.AlignTop)
4559

4660
self.setLayout(lay)
@@ -51,4 +65,11 @@ def __modelChanged(self, v):
5165

5266
def __streamChecked(self, f):
5367
self.__stream = f
54-
CONFIG_MANAGER.set_general_property('stream', f)
68+
CONFIG_MANAGER.set_general_property('stream', f)
69+
70+
def __providerChanged(self, v):
71+
self.__modelCmbBox.clear()
72+
if v == G4F_PROVIDER_DEFAULT:
73+
self.__modelCmbBox.addItems(get_g4f_models())
74+
else:
75+
self.__modelCmbBox.addItems(get_g4f_models_by_provider(v))

pyqt_openai/globals.py

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from g4f.client import Client
1919

2020
from pyqt_openai import STT_MODEL, OPENAI_ENDPOINT_DICT, PROVIDER_MODEL_DICT, DEFAULT_GEMINI_MODEL, LLAMA_REQUEST_URL, \
21-
OPENAI_CHAT_ENDPOINT, O1_MODELS, G4F_MODELS
21+
OPENAI_CHAT_ENDPOINT, O1_MODELS
2222
from pyqt_openai.config_loader import CONFIG_MANAGER
2323
from pyqt_openai.lang.translations import LangClass
2424
from pyqt_openai.models import ChatMessageContainer
@@ -49,7 +49,7 @@ def set_api_key(env_var_name, api_key):
4949
if env_var_name == 'LLAMA_API_KEY':
5050
LLAMA_CLIENT.api_key = api_key
5151

52-
def get_model_endpoint(model):
52+
def get_openai_model_endpoint(model):
5353
for k, v in OPENAI_ENDPOINT_DICT.items():
5454
endpoint_group = list(v)
5555
if model in endpoint_group:
@@ -58,13 +58,6 @@ def get_model_endpoint(model):
5858
def get_openai_chat_model():
5959
return OPENAI_ENDPOINT_DICT[OPENAI_CHAT_ENDPOINT]
6060

61-
def get_chat_model(is_g4f=False):
62-
if is_g4f:
63-
return G4F_MODELS
64-
else:
65-
all_models = [model for models in PROVIDER_MODEL_DICT.values() for model in models]
66-
return all_models
67-
6861
def get_image_url_from_local(image):
6962
"""
7063
Image is bytes, this function converts it to base64 and returns the image url
@@ -260,8 +253,6 @@ def get_api_response(args, get_content_only=True):
260253
else:
261254
return response
262255
elif provider == 'Gemini':
263-
# Change 'content' to 'parts'
264-
# Change role's value from 'assistant' to 'model'
265256
for message in args['messages']:
266257
message['parts'] = message.pop('content')
267258
if message['role'] == 'assistant':
@@ -364,7 +355,9 @@ def run(self):
364355

365356
print(f"Done in {int((time.time() - start_time) * 1000)}ms.")
366357
except Exception as e:
367-
self.errorGenerated.emit(f'<p style="color:red">{e}</p>')
358+
# TODO LANGUAGE
359+
self.errorGenerated.emit(f'<p style="color:red">{e}</p>\n\n'
360+
f'(Are you registered valid OpenAI API Key? This feature requires OpenAI API Key.)\n')
368361

369362
def stop(self):
370363
self.__stop = True
@@ -458,7 +451,9 @@ def run(self):
458451
)
459452
self.stt_finished.emit(transcript.text)
460453
except Exception as e:
461-
self.errorGenerated.emit(f'<p style="color:red">{e}</p>')
454+
# TODO LANGUAGE
455+
self.errorGenerated.emit(f'<p style="color:red">{e}\n\n'
456+
f'(Are you registered valid OpenAI API Key? This feature requires OpenAI API Key.)</p>')
462457
finally:
463458
os.remove(self.filename)
464459

pyqt_openai/util/script.py

Lines changed: 44 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,14 @@
2222
from PySide6.QtCore import Qt, QUrl
2323
from PySide6.QtGui import QDesktopServices
2424
from PySide6.QtWidgets import QMessageBox, QFrame
25+
from g4f.Provider import ProviderUtils
26+
from g4f.models import ModelUtils
2527
from jinja2 import Template
2628

2729
from pyqt_openai import MAIN_INDEX, \
2830
PROMPT_NAME_REGEX, PROMPT_MAIN_KEY_NAME, PROMPT_BEGINNING_KEY_NAME, \
2931
PROMPT_END_KEY_NAME, PROMPT_JSON_KEY_NAME, CONTEXT_DELIMITER, THREAD_ORDERBY, DEFAULT_APP_NAME, \
30-
AUTOSTART_REGISTRY_KEY, is_frozen
32+
AUTOSTART_REGISTRY_KEY, is_frozen, G4F_PROVIDER_DEFAULT, PROVIDER_MODEL_DICT
3133
from pyqt_openai.lang.translations import LangClass
3234
from pyqt_openai.models import ImagePromptContainer
3335
from pyqt_openai.globals import DB
@@ -414,3 +416,44 @@ def generate_random_prompt(arr):
414416
else:
415417
random_prompt = ''
416418
return random_prompt
419+
420+
def get_g4f_models():
421+
models = list(ModelUtils.convert.keys())
422+
return models
423+
424+
def get_g4f_providers(including_auto=False):
425+
providers = list(ProviderUtils.convert.keys())
426+
if including_auto:
427+
providers = [G4F_PROVIDER_DEFAULT] + providers
428+
return providers
429+
430+
def get_g4f_models_by_provider(provider):
431+
provider = ProviderUtils.convert[provider]
432+
models = []
433+
if hasattr(provider, 'models'):
434+
models = provider.models if provider.models else []
435+
return models
436+
437+
def get_g4f_providers_by_model(model):
438+
providers = get_g4f_providers()
439+
supported_providers = []
440+
441+
for provider in providers:
442+
provider = ProviderUtils.convert[provider]
443+
444+
if hasattr(provider, 'models'):
445+
models = provider.models if provider.models else models
446+
if model in models:
447+
supported_providers.append(provider)
448+
449+
supported_providers = [provider.get_dict()['name'] for provider in supported_providers]
450+
451+
return supported_providers
452+
453+
454+
def get_chat_model(is_g4f=False):
455+
if is_g4f:
456+
return get_g4f_models()
457+
else:
458+
all_models = [model for models in PROVIDER_MODEL_DICT.values() for model in models]
459+
return all_models

0 commit comments

Comments
 (0)