Skip to content

Commit 2145bc8

Browse files
committed
fixing mkdown copy bug and pyomlx client
1 parent d14b72c commit 2145bc8

File tree

7 files changed

+73
-62
lines changed

7 files changed

+73
-62
lines changed

main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def updateChat(message: Message, controlHandle: ft.Column, ai_response: bool = F
137137
#print(f'message {message} ai_response {ai_response} controlHandle {controlHandle}')
138138
if ai_response:
139139
ai_message_container = ft.Container(width=550)
140-
ai_message_md = ft.Markdown(value="", extension_set="gitHubWeb", code_theme='obsidian', code_style=ft.TextStyle(font_family='Roboto Mono'),selectable=True, on_tap_link=open_url, auto_follow_links=True)
140+
ai_message_md = ft.Markdown(value="", extension_set="gitHubWeb", code_theme='obsidian', code_style=ft.TextStyle(font_family='Roboto Mono'), on_tap_link=open_url, auto_follow_links=True)
141141
ai_message_md_selectable = ft.SelectionArea(content=ai_message_md)
142142
ai_message_container.content = ai_message_md_selectable
143143
controlHandle.controls.append(

mlxClient.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from openai import OpenAI
2+
from typing import List
23

34
class MlxClient():
45

@@ -13,6 +14,7 @@ def append_history(self, message):
1314
self.messages.append(message)
1415

1516
def chat(self, prompt:str, model: str, temp: float, system:str = 'default') -> str:
17+
print('Entering Chat for Mlx' + model)
1618
message = {}
1719
message['role'] = 'user'
1820
message['content'] = prompt
@@ -21,14 +23,22 @@ def chat(self, prompt:str, model: str, temp: float, system:str = 'default') -> s
2123
try:
2224
#response = requests.post(self.llmHost, data=json.dumps(data), headers={'Content-Type': 'application/json'})
2325
#print(f'response code {response.status_code}')
24-
response = self.client.chat.completions.create(model=f"mlx-community/{model}",
26+
response = self.client.chat.completions.create(model=model,
2527
messages=self.messages)
2628
response = response.choices[0].message.content
29+
print(response)
2730
except Exception as e:
2831
raise ValueError(e)
2932
ai_message = dict({'role' : 'assistant', 'content' : response})
3033
self.messages.append(ai_message)
3134
return response
3235

3336
def chat_stream(self, prompt:str, model: str, temp: float) -> str:
34-
pass
37+
pass
38+
39+
def list(self) -> List[str]:
40+
response = self.client.models.list()
41+
models = []
42+
for m in response:
43+
models.append(m.id)
44+
return models

models.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
from typing_extensions import List
44
import flet as ft
55
import re
6-
from ollama import list
6+
from ollamaOpenAIClient import OllamaOpenAPIClient
7+
from mlxClient import MlxClient
78

89
DEFAULT_OLLAMA_MODEL_REGISTRY = Path("~/.ollama/models/manifests/registry.ollama.ai/library").expanduser()
910
DEFAULT_HF_MLX_MODEL_REGISTRY = Path("~/.cache/huggingface/hub/").expanduser()
@@ -22,23 +23,12 @@
2223

2324
def returnModels() -> List[str]:
2425
print('entering retModels')
25-
response = list()
26-
raw_models = response['models']
27-
models = []
28-
for model in raw_models:
29-
models.append(model['name'])
26+
models = OllamaOpenAPIClient().list()
3027
return models
3128

3229
def returnMlxModels() -> List[str]:
33-
print(DEFAULT_HF_MLX_MODEL_REGISTRY)
34-
models = []
35-
for d in os.listdir(DEFAULT_HF_MLX_MODEL_REGISTRY):
36-
a_dir = os.path.join(DEFAULT_HF_MLX_MODEL_REGISTRY, d)
37-
if os.path.isdir(a_dir) and not d.startswith('.'):
38-
#print(f'a_dir is {a_dir}')
39-
model = re.split('--', d)[-1]
40-
models.append(model)
41-
#print(models)
30+
print('entering retModels')
31+
models = MlxClient().list()
4232
return models
4333

4434
def retModelOptions(isMlx=False):

ollamaOpenAIClient.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
from openai import OpenAI
2+
from typing import List
3+
4+
class OllamaOpenAPIClient():
5+
6+
def __init__(self):
7+
self.messages = []
8+
self.client = OpenAI(base_url='http://127.0.0.1:11434/v1', api_key='pyomlx')
9+
10+
def clear_history(self):
11+
self.messages.clear()
12+
13+
def append_history(self, message):
14+
self.messages.append(message)
15+
16+
def chat(self, prompt:str, model: str, temp: float, system:str = 'default') -> str:
17+
message = {}
18+
message['role'] = 'user'
19+
message['content'] = prompt
20+
self.messages.append(message)
21+
try:
22+
response = self.client.chat.completions.create(model=model,
23+
messages=self.messages)
24+
response = response.choices[0].message.content
25+
except Exception as e:
26+
raise ValueError(e)
27+
ai_message = dict({'role' : 'assistant', 'content' : response})
28+
self.messages.append(ai_message)
29+
return response
30+
31+
def chat_stream(self, prompt:str, model: str, temp: float) -> str:
32+
pass
33+
34+
def list(self) -> List[str]:
35+
response = self.client.models.list()
36+
models = []
37+
for m in response:
38+
models.append(m.id)
39+
return models
40+
41+
42+
if __name__ == '__main__':
43+
print(OllamaOpenAPIClient().list())

prompt.py

Lines changed: 4 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
1-
from mlxLLM import MlxLLM
21
from search import *
3-
#from mlxLLM_local import MlxLLM_local
42
import requests
5-
from ollamaClient import OllamaClient
63
from mlxClient import MlxClient
4+
from ollamaOpenAIClient import OllamaOpenAPIClient
75

8-
oChatClient = OllamaClient()
9-
oSearchClient = OllamaClient()
6+
oChatClient = OllamaOpenAPIClient()
7+
oSearchClient = OllamaOpenAPIClient()
108

119
mChatClient = MlxClient()
1210
mSearchClient = MlxClient()
@@ -70,39 +68,4 @@ def firePrompt(prompt: str, model: str="dolphin-mistral:latest", temp=0.4, isMlx
7068
except Exception as e:
7169
return f'Generic Error Occured {e}', ""
7270

73-
return response, keywords
74-
75-
76-
77-
78-
79-
# def firePrompt1(prompt: str, model: str="dolphin-mistral:latest", temp=0.4, isMlx=False, chat_mode=False) -> str:
80-
# res = ""
81-
# keywords = ""
82-
# if chat_mode:
83-
# if isMlx:
84-
# llm = MlxLLM(model=model, temp=temp)
85-
# #llm = OllamaClient()
86-
# #llm = MlxLLM_local(model=model, temp=temp)
87-
# else:
88-
# #llm = Ollama(model=model,
89-
# # temperature=temp
90-
# # )
91-
# llm = OllamaClient()
92-
# try:
93-
# #res = llm.invoke(prompt)
94-
# res = llm.chat(model=model, temp=temp, prompt=prompt)
95-
# except requests.exceptions.ConnectionError as e:
96-
# err_str = f'PyOMlX' if isMlx else f'Ollama'
97-
# return f'Unable to connect to {err_str}. Is it running?🤔', ""
98-
# except Exception as e:
99-
# return f'Generic Error Occured {e}', ""
100-
# else:
101-
# try:
102-
# if isMlx:
103-
# res = f'Search not implemented for MlX models. Stay tuned!'
104-
# else:
105-
# res, keywords = retSearchResults(model=model, search_str=prompt, temp=temp)
106-
# except Exception as e:
107-
# return f'__Generic Error Occured__ {e}', ""
108-
# return res, keywords
71+
return response, keywords

requirements.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ duckduckgo_search==6.1.7
22
langchain==0.1.4
33
langchain-community==0.0.16
44
langchain-core==0.1.17
5-
typing_extensions==4.9.0
6-
flet==0.23.2
5+
typing_extensions==4.12.2
6+
flet==0.25.2
7+
flet-core==0.24.1
78
openai==1.58.1

settings.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import flet as ft
22
from models import *
33

4+
PYOLLAMX_VERSION = '0.0.5'
5+
46
def clearState(e: ft.ControlEvent) -> None:
57
model_dropdown.value = "N/A"
68
e.page.session.set('selected_model', 'N/A')
@@ -73,10 +75,12 @@ def updateModelInfo(e: ft.ControlEvent) -> None:
7375
height=75,
7476
fit=ft.ImageFit.CONTAIN,
7577
)
78+
settings_pyollmx_version_text = ft.Text(value=f'v{PYOLLAMX_VERSION}', style=ft.TextStyle(font_family='CabinSketch-Bold'), size=10)
7679

7780
settings_banner_view = ft.Row([
7881
settings_banner_image,
79-
settings_banner_text
82+
settings_banner_text,
83+
settings_pyollmx_version_text
8084
], alignment=ft.MainAxisAlignment.CENTER, vertical_alignment=ft.CrossAxisAlignment.CENTER)
8185

8286

0 commit comments

Comments
 (0)