Skip to content

Commit 161c837

Browse files
committed
bootstrapping v0.0.7
1 parent 328be75 commit 161c837

File tree

6 files changed

+52
-7
lines changed

6 files changed

+52
-7
lines changed

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@ MacOS DMGs are available in [Releases](https://github.com/kspviswa/pyOllaMx/rele
2222

2323
[PyOMlx](https://github.com/kspviswa/PyOMlx) : A Macos App capable of discovering, loading & serving Apple MlX models downloaded from [Apple MLX Community repo in hugging face](https://huggingface.co/mlx-community) 🤗
2424

25+
## Star History
26+
27+
[![Star History Chart](https://api.star-history.com/svg?repos=kspviswa/pyOllaMx&type=Date)](https://star-history.com/#kspviswa/pyOllaMx&Date)
28+
2529
## How to use?
2630

2731
1) Install [Ollama Application](https://ollama.ai/download) & use Ollama CLI to download your desired models

buildflet.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
#!/bin/sh
2-
flet build macos --build-version '0.0.6' --copyright 'Viswa Kumar 2025 ©' --project 'PyOllaMx' --company 'PyOllaMx'
2+
flet build macos --build-version '0.0.7' --copyright 'Viswa Kumar 2025 ©' --project 'PyOllaMx' --company 'PyOllaMx'

main.py

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,10 +136,35 @@ def open_url(e):
136136
def updateChat(message: Message, controlHandle: ft.Column, ai_response: bool = False):
137137
#print(f'message {message} ai_response {ai_response} controlHandle {controlHandle}')
138138
if ai_response:
139+
ai_thinking_text = re.sub(r"</think>.*$", "", message.text, flags=re.DOTALL) if "</think>" in message.text else ""
140+
ai_non_thinking_text = message.text
141+
142+
#print(f'### {ai_thinking_text}')
139143
ai_message_container = ft.Container(width=550)
140144
ai_message_md = ft.Markdown(value="", extension_set="gitHubWeb", code_theme='obsidian', code_style=ft.TextStyle(font_family='Roboto Mono'), on_tap_link=open_url, auto_follow_links=True)
141145
ai_message_md_selectable = ft.SelectionArea(content=ai_message_md)
142-
ai_message_container.content = ai_message_md_selectable
146+
ai_message_thinking_md = ft.ExpansionTile(
147+
title=ft.Text("Thinking tokens 🤔", font_family="RobotoSlab"),
148+
subtitle=ft.Text("Expand to reveal the model's thinking tokens", theme_style=ft.TextThemeStyle.BODY_SMALL, font_family="RobotoSlab"),
149+
affinity=ft.TileAffinity.LEADING,
150+
initially_expanded=False,
151+
collapsed_text_color=ft.Colors.BLUE,
152+
text_color=ft.Colors.BLUE,
153+
controls=[
154+
ft.ListTile(title=ft.Text(
155+
theme_style=ft.TextThemeStyle.BODY_SMALL,
156+
font_family="RobotoSlab",
157+
)),
158+
],
159+
)
160+
if ai_thinking_text :
161+
ai_message_container.content = ft.Column([ai_message_thinking_md,
162+
ai_message_md_selectable,
163+
])
164+
ai_non_thinking_text = ''.join(message.text.split("</think>")[1:])
165+
else:
166+
ai_message_container.content = ai_message_md_selectable
167+
143168
controlHandle.controls.append(
144169
ft.Row([
145170
ft.Image(src=getAILogo(page.session.get('isMlx')),
@@ -151,16 +176,17 @@ def updateChat(message: Message, controlHandle: ft.Column, ai_response: bool = F
151176
width=500, vertical_alignment=ft.CrossAxisAlignment.START,
152177
)
153178
)
179+
ai_message_thinking_md.controls[0].title.value = ai_thinking_text
154180
if enable_streaming.value:
155181
full_r = ""
156-
for chunk in message.text.split(sep=" "):
182+
for chunk in ai_non_thinking_text.split(sep=" "):
157183
full_r += chunk + " "
158184
ai_message_md.value = full_r
159185
# controlHandle.scroll_to(offset=-1, duration=100, curve=ft.AnimationCurve.EASE_IN_OUT)
160186
page.update()
161187
time.sleep(0.05)
162188
else:
163-
ai_message_md.value = message.text
189+
ai_message_md.value = ai_non_thinking_text
164190
else:
165191
controlHandle.controls.append(
166192
ft.Row([

mlxClient.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def append_history(self, message):
1414
self.messages.append(message)
1515

1616
def chat(self, prompt:str, model: str, temp: float, system:str = 'default') -> str:
17-
print('Entering Chat for Mlx' + model)
17+
#print('Entering Chat for Mlx' + model)
1818
message = {}
1919
message['role'] = 'user'
2020
message['content'] = prompt
@@ -26,7 +26,7 @@ def chat(self, prompt:str, model: str, temp: float, system:str = 'default') -> s
2626
response = self.client.chat.completions.create(model=model,
2727
messages=self.messages)
2828
response = response.choices[0].message.content
29-
print(response)
29+
# print(response)
3030
except Exception as e:
3131
raise ValueError(e)
3232
ai_message = dict({'role' : 'assistant', 'content' : response})

model_hub.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,8 +171,9 @@ def download_from_ollama(e: ft.ControlEvent):
171171
ollama_download_pbar,
172172
restart_required_container,
173173
ft.Column([
174+
ft.Text(),
174175
ollama_models_table
175-
], scroll=ft.ScrollMode.ADAPTIVE, height=350),
176+
], scroll=ft.ScrollMode.ADAPTIVE, height=300),
176177
],alignment=ft.MainAxisAlignment.START, horizontal_alignment=ft.CrossAxisAlignment.STRETCH, spacing=10)
177178

178179
coming_soon_view = ft.Column([

pyomlx_test.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from mlx_lm import load, generate
2+
3+
model, tokenizer = load("mlx-community/Phi-4-mini-instruct-8bit")
4+
5+
prompt="Hi how are you?"
6+
7+
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
8+
messages = [{"role": "user", "content": prompt}]
9+
prompt = tokenizer.apply_chat_template(
10+
messages, tokenize=False, add_generation_prompt=True
11+
)
12+
13+
response = generate(model, tokenizer, prompt=prompt, verbose=True)
14+
print(response)

0 commit comments

Comments
 (0)