-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathassistant.py
More file actions
682 lines (548 loc) · 28.1 KB
/
assistant.py
File metadata and controls
682 lines (548 loc) · 28.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
import subprocess
import os
import requests
import json
from datetime import datetime, time
import re
import random
import time as time_lib
# =========================================================
# CRITICAL FIX: Robust Safely Handled Imports & State
# =========================================================
# Initialize modules to None for safety
sr = None
pyjokes = None
torch = None
torchaudio = None
numpy = None
whisper = None
# State variables
SPEECH_RECOGNITION_AVAILABLE = False
PYJOKES_AVAILABLE = False
WHISPER_AVAILABLE = False
# Global state for input mode
CURRENT_MODE = 'W' # Start in Written mode for ease of testing
# --- Speech Recognition Component ---
try:
import speech_recognition as sr
SPEECH_RECOGNITION_AVAILABLE = True
except ImportError:
print("Warning: Failed to import SpeechRecognition. Legacy STT unavailable.")
# --- Pyjokes Component ---
try:
import pyjokes
PYJOKES_AVAILABLE = True
except ImportError:
print("Warning: Failed to import pyjokes. Joke command unavailable.")
# --- Whisper and PyTorch Components ---
try:
import torch
import torchaudio
import numpy
import whisper
WHISPER_AVAILABLE = True
except ImportError:
print("Warning: Failed to import Whisper components. Voice command functionality may be limited.")
# ==============================
# 1. WHISPER CONFIGURATION
# ==============================
WHISPER_MODEL = None
# ============================================
# +++ 2. OLLAMA CONFIGURATION (UPDATED) +++
# ============================================
OLLAMA_API_URL = "http://localhost:11434/api/chat"
# --- CRITICAL CHANGE: Switched from "llama3" to your custom model ---
OLLAMA_MODEL = "llama3"
# NOTE: The full prompt is now managed in the Modelfile, but we keep the structure here for history fallbacks.
OLLAMA_SYSTEM_PROMPT = """
You are Ishu, a helpful and friendly local AI assistant created by Shubham Jana, specializing in providing emotional support, telling jokes/stories, and helping with B.Tech studies.
Your primary function is to manage the user's daily schedule/routine, which is stored in a JSON file.
When a user's request requires using a tool, you MUST respond ONLY with a JSON object
in the following format: {"tool_call": {"name": "function_name", "arguments": {"arg1": "value", "arg2": "value"}}}.
Do NOT include any other text, markdown, or conversational phrases when making a tool call.
The available tools and their required arguments are:
- get_routine(): Retrieves the user's entire daily routine from the file.
- get_task_by_time(query_time: str [optional]): Finds the activity at a **single point in time (HH:MM)**, not a time range. This function is for "what are I doing AT 11:30" or "what is my next task."
- add_routine_entry(start: str, end: str, activity: str): Adds a new entry to the routine file. Both start and end must be strict HH:MM.
- remove_routine_entry(activity_keyword: str): Removes an entry matching a keyword from the routine file.
If the request is NOT a tool call (e.g., asking a general question, asking for a joke, or when provided with tool results),
answer the question directly and concisely as Ishu.
"""
# ============================================
# --- File Paths (CRITICAL FIX: Use simple relative path for FLAT structure) ---
# Assuming all files are in the same directory as assistant.py
ROUTINE_FILE_PATH = "routine.json"
FAVORITES_FILE_PATH = "favorites.json"
# ========== Helper functions ==========
def speak(text, blocking=False):
"""
Handles text-to-speech using the fast, local Mac 'say' command via subprocess.
"""
print(f"Ishu says: {text}")
# --- MAC/DARWIN TTS ---
is_mac = os.name == "posix" and os.uname().sysname == "Darwin"
if is_mac:
try:
command = ['say', text]
if blocking:
# Waits for the speech to finish (blocking)
subprocess.run(command)
else:
# Starts the speech and moves on (non-blocking)
subprocess.Popen(command)
except FileNotFoundError:
print("Warning: Mac 'say' command not found. Speech failed.")
print("TTS currently configured for macOS 'say' command. Speech unavailable.")
# --- RASPBERRY PI/LINUX TTS Placeholder ---
elif os.uname().sysname == "Linux" and ("arm" in os.uname().machine or "aarch64" in os.uname().machine):
# NOTE: Placeholder for Pi-compatible TTS engine (e.g., Piper/PicoTTS)
print("Pi/Linux environment detected. TTS engine (Piper/Pico) needs to be configured and uncommented.")
# Placeholder/Error message for non-Mac/Pi environments
else:
print("TTS currently configured for macOS 'say' command. Speech unavailable.")
def listen_whisper():
"""Records audio and uses Whisper for high-accuracy transcription."""
global WHISPER_MODEL
if not WHISPER_AVAILABLE or not SPEECH_RECOGNITION_AVAILABLE:
return "Required speech modules (Whisper/SpeechRecognition) failed to load."
if WHISPER_MODEL is None:
try:
print("Lazily loading Whisper model...")
WHISPER_MODEL = whisper.load_model("base")
except Exception as e:
print(f"Error loading Whisper model: {e}")
WHISPER_MODEL = False # Mark as failed
return "Whisper model failed to load during runtime."
# Proceed with listening
r = sr.Recognizer()
temp_audio_file = "temp_audio.wav"
with sr.Microphone() as source:
print("Whisper Listening...")
r.adjust_for_ambient_noise(source)
try:
audio = r.listen(source, timeout=5, phrase_time_limit=15)
except sr.WaitTimeoutError:
print("No speech detected within the timeout period.")
return ""
try:
# Save the recorded audio to a temporary file
with open(temp_audio_file, "wb") as f:
f.write(audio.get_wav_data())
# 2. Use Whisper to transcribe the file
if WHISPER_MODEL:
print("Transcribing with Whisper...")
result = WHISPER_MODEL.transcribe(temp_audio_file, fp16=False)
text = result["text"].strip()
print(f"User said: {text}")
return text
else:
print("Whisper model not loaded.")
return ""
except Exception as e:
print(f"Whisper/Audio error; {e}")
return ""
finally:
if os.path.exists(temp_audio_file):
os.remove(temp_audio_file)
def listen_written():
"""Captures input from the keyboard."""
result = input("Write your command: ").lower()
return result
def select_initial_mode():
"""Prompts user to select the initial input mode."""
global CURRENT_MODE
while True:
print("\nChoose initial input mode: (S)peech or (W)ritten")
mode = input("Enter S or W: ").upper().strip()
if mode in ['S', 'W']:
CURRENT_MODE = mode
return mode
else:
print("Invalid input. Please enter S or W.")
def load_json(filename, default):
try:
if os.path.exists(filename):
with open(filename, "r") as f:
return json.load(f)
except Exception:
pass
return default
def save_json(filename, obj):
try:
os.makedirs(os.path.dirname(filename) or '.', exist_ok=True)
with open(filename, "w") as f:
json.dump(obj, f, indent=4)
except Exception as e:
print(f"Error saving JSON: {e}")
def ollama_response(prompt, history=None):
"""
Sends a prompt to the local Ollama LLM and returns the response.
(Fixed: Implements post-processing to strip out LLM-hallucinated conversational turns.)
"""
print(f"Ollama thinking...")
# Build the messages list for the API call
if history and len(history) > 0:
messages = history
else:
messages = [{"role": "system", "content": OLLAMA_SYSTEM_PROMPT}]
# Only append the new user prompt if it's not already the last message
if not messages or messages[-1].get('content') != prompt:
messages.append({"role": "user", "content": prompt})
payload = {
"model": OLLAMA_MODEL,
"messages": messages,
"stream": False,
}
try:
response = requests.post(OLLAMA_API_URL, json=payload)
if response.status_code == 200:
data = response.json()
message = data.get("message", {"role": "assistant", "content":"Sorry, the LLM returned an empty response."})
# --- CRITICAL FIX: POST-PROCESS THE LLM OUTPUT ---
content = message.get("content", "")
# Find the first occurrence of "User:" or "Assistant:"
# (preceded by a line break) and slice the string to keep only the part before it.
# Using regex for a robust check across different capitalizations/formats.
# This regex looks for a line break (\n) followed by optional whitespace (\s*) and
# then either 'User:' or 'Assistant:'
match = re.search(r'(\n|\r\n|\r)\s*(User:|Assistant:)', content, re.IGNORECASE)
if match:
# If a match is found, trim the content at the start of the line break
clean_content = content[:match.start()].strip()
else:
# Otherwise, use the full content
clean_content = content.strip()
message["content"] = clean_content
# --- END CRITICAL FIX ---
return message
else:
return {"role": "assistant", "content": f"Ollama API Error (Code {response.status_code}). Check your model name ({OLLAMA_MODEL}). Response text: {response.text[:100]}..."}
except requests.exceptions.ConnectionError:
return {"role": "assistant", "content": f"I can't connect to the local LLM. Please make sure Ollama is running on http://localhost:11434 and the model ('{OLLAMA_MODEL}') is created."}
except Exception as e:
print(f"Unexpected Ollama error: {e}")
return {"role": "assistant", "content": "An unexpected error occurred while processing the LLM request."}
# ========== Routine Features with Robust Time Logic (TOOL FUNCTIONS) ==========
def parse_time(timestr):
# Clean time string (e.g., remove 'PM' or 'AM' if LLM adds it)
timestr = re.sub(r'[^0-9:]', '', timestr).strip()
h, m = [int(part) for part in timestr.strip().split(":")]
return time(hour=h, minute=m)
def get_routine():
routine = load_json(ROUTINE_FILE_PATH, [])
if not routine:
return "You have not set your daily routine yet."
routine.sort(key=lambda x: parse_time(x['start']))
return json.dumps(routine)
def get_task_by_time(query_time=None):
routine = load_json(ROUTINE_FILE_PATH, [])
if not routine:
return json.dumps({"status": "error", "message": "No daily routine is set."})
# 1. Use current system time if not specified
if query_time is None:
now_dt = datetime.now()
query_time = now_dt.strftime('%H:%M')
else:
# Validate time format
try:
# Clean query time for validation (LLM sometimes adds extra characters)
clean_query_time = re.sub(r'[^0-9:]', '', query_time).strip()
datetime.strptime(clean_query_time, '%H:%M')
now_dt = datetime.combine(datetime.today().date(), parse_time(clean_query_time))
query_time = clean_query_time
except ValueError:
return json.dumps({"status": "error", "message": "Invalid time format. Please use HH:MM."})
qt = now_dt.time()
# Check for task in progress (current task)
for slot in routine:
start = parse_time(slot['start'])
end = parse_time(slot['end'])
if start < end:
in_range = start <= qt < end
else: # wraps over midnight
in_range = qt >= start or qt < end
if in_range:
return json.dumps({"status": "found", "time": query_time, "start": slot['start'], "end": slot['end'], "activity": slot['activity']})
# 3. Check for the next upcoming task
routine.sort(key=lambda x: parse_time(x['start']))
next_task = None
# Find the next task in the future (after the current time)
for slot in routine:
start_time = parse_time(slot['start'])
if start_time >= qt:
next_task = slot
break
if next_task:
return json.dumps({"status": "next_found", "time": query_time, "start": next_task['start'], "end": next_task['end'], "activity": next_task['activity']})
# 4. Add wrap-around logic to find the next task (first task of the day)
if routine:
wrap_around_task = routine[0]
return json.dumps({"status": "next_found", "time": query_time, "start": wrap_around_task['start'], "end": wrap_around_task['end'], "activity": wrap_around_task['activity']})
return json.dumps({"status": "not_found", "time": query_time, "message": "No activity found for the current or upcoming time."})
def add_routine_entry(start, end, activity):
"""Adds a new routine entry if start/end times are valid (HH:MM)."""
routine = load_json(ROUTINE_FILE_PATH, [])
try:
# Clean inputs before parsing
clean_start = re.sub(r'[^0-9:]', '', start).strip()
clean_end = re.sub(r'[^0-9:]', '', end).strip()
parse_time(clean_start)
parse_time(clean_end)
except Exception:
return f"ERROR: Invalid time format received. LLM requested start: '{start}', end: '{end}'. Please use strict HH:MM format."
new_entry = {
"start": clean_start,
"end": clean_end,
"activity": activity.strip()
}
routine.append(new_entry)
routine.sort(key=lambda x: parse_time(x['start']))
save_json(ROUTINE_FILE_PATH, routine)
return json.dumps({"status": "success", "message": f"Added {activity} from {clean_start} to {clean_end}."})
def remove_routine_entry(activity_keyword):
"""Removes a routine entry based on a partial match of the activity name."""
routine = load_json(ROUTINE_FILE_PATH, [])
initial_count = len(routine)
new_routine = [
entry for entry in routine
if activity_keyword.lower() not in entry['activity'].lower()
]
if len(new_routine) < initial_count:
removed_count = initial_count - len(new_routine)
save_json(ROUTINE_FILE_PATH, new_routine)
return json.dumps({"status": "success", "removed_count": removed_count, "keyword": activity_keyword})
else:
return json.dumps({"status": "not_found", "keyword": activity_keyword})
# ========== Other Assistant Features (Included for functionality) ==========
def get_favorite():
favs = load_json(FAVORITES_FILE_PATH, {})
if "color" in favs:
return f"Your favorite color is {favs['color']}."
else:
return "It's a tricky question, you don't have any favorite color."
def set_favorite_color(color):
favs = load_json(FAVORITES_FILE_PATH, {})
favs["color"] = color
save_json(FAVORITES_FILE_PATH, favs)
return f"Got it! I'll remember your favorite color is {color}."
def tell_joke():
"""Tells a joke using the local pyjokes library."""
if pyjokes is not None:
try:
return pyjokes.get_joke()
except Exception as e:
print(f"Error fetching joke from pyjokes: {e}")
return "Why do programmers prefer dark mode? Because light attracts bugs."
def tell_story(topic=""):
"""Generates a creative story using the Ollama LLM."""
if topic:
prompt = f"Tell me a short, imaginative story about {topic}. Make the story suitable for a student and end with a gentle lesson."
else:
prompt = "Tell me a short, imaginative story (about 100 words) focusing on the adventures of a young coder named Ishu. Make the story suitable for a student and end with a gentle lesson."
response_message = ollama_response(prompt)
return response_message.get("content", "I'm having trouble thinking of a good story right now.")
def get_weather(city, api_key):
try:
url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}&units=metric"
r = requests.get(url)
if r.status_code == 200:
data = r.json()
temp = data["main"]["temp"]
cond = data["weather"][0]["description"]
return f"The weather in {city} is {cond} with a temperature of {temp}°C."
else:
return "Sorry, I couldn't get the weather. Is the city name correct?"
except Exception:
return "Sorry, there was an error fetching the weather."
# ========== End of Other Assistant Features ==========
TOOL_MAPPER = {
"get_routine": get_routine,
"get_task_by_time": get_task_by_time,
"add_routine_entry": add_routine_entry,
"remove_routine_entry": remove_routine_entry,
}
# ========== Main Loop with Manual Tool Execution Logic ==========
def main():
global CURRENT_MODE
# NOTE: You must replace this with your actual OpenWeatherMap API key
WEATHER_API_KEY = "YOUR_OPENWEATHERMAP_API_KEY"
speak("Hello! I'm Ishu.")
select_initial_mode()
speak(f"Starting in {'Speech' if CURRENT_MODE == 'S' else 'Written'} mode. Say or type 'change mode' to switch.", blocking=True)
chat_history = [
{"role": "system", "content": OLLAMA_SYSTEM_PROMPT},
]
while True:
query = ""
if CURRENT_MODE == 'S':
speak("Listening...", blocking=True)
query = listen_whisper().lower()
if not query:
speak("Sorry, I didn't catch that. Can you repeat?", blocking=True)
continue
else: # CURRENT_MODE == 'W'
query = listen_written()
print(f"User said: {query}")
# --- COMMAND HANDLING LOGIC ---
if "change mode" in query:
new_mode = 'W' if CURRENT_MODE == 'S' else 'S'
CURRENT_MODE = new_mode
speak(f"Mode switched to {'Speech' if CURRENT_MODE == 'S' else 'Written'} mode.", blocking=True)
continue
elif "thank you" in query:
speak("Mention not! Have a great day!", blocking=True)
break
elif "exit" in query or "quit" in query or "goodbye" in query or "stop listening" in query:
speak("Goodbye! Have a great day!", blocking=True)
break
# --- Local Query Interception (NOW/NEXT Distinction) ---
user_input_lower = query.lower()
# Define phrases for NOW (simple current task)
time_query_now = [
"what should i do now",
"what is my current task"
]
# Define phrases for NEXT (current task + next task)
time_query_next = [
"what should i do next",
"what's my next task",
"what is my next task"
]
routine_query_phrases = ["what is my routine", "show my routine", "daily schedule"]
is_local_tool_query = False
tool_to_call = None
is_next_task_query = False
if any(phrase in user_input_lower for phrase in routine_query_phrases):
is_local_tool_query = True
tool_to_call = "get_routine"
elif any(phrase in user_input_lower for phrase in time_query_next):
is_local_tool_query = True
tool_to_call = "get_task_by_time"
is_next_task_query = True # This will trigger the dual-task response
elif any(phrase in user_input_lower for phrase in time_query_now):
is_local_tool_query = True
tool_to_call = "get_task_by_time"
if is_local_tool_query:
# Execute the function locally and bypass Ollama.
print(f"Executing Local Tool: {tool_to_call}()")
# --- Local Output Handling ---
try:
output = ""
# 1. Handle get_routine()
if tool_to_call == "get_routine":
response_json_string = get_routine()
if response_json_string.startswith('['):
task_list = json.loads(response_json_string)
# Format for clear console output
header = "| Start | End | Activity |\n|---|---|---|"
output_list = [f"| {t['start']} | {t['end']} | {t['activity']} |" for t in task_list]
output = f"## Your Full Daily Routine 🗓️\n{header}\n" + "\n".join(output_list)
else:
output = response_json_string
# 2. Handle get_task_by_time()
elif tool_to_call == "get_task_by_time":
# First call to find the current/next task based on current time
response_json_string = get_task_by_time()
task_data = json.loads(response_json_string)
output = ""
if task_data.get("status") == "found":
current_activity = task_data.get('activity')
current_end_time = task_data.get('end')
# --- LOGIC FOR "WHAT SHOULD I DO NEXT" ---
if is_next_task_query:
# Search for the next one using the current task's end time
next_task_json_string = get_task_by_time(query_time=current_end_time)
next_task_data = json.loads(next_task_json_string)
if next_task_data.get("status") in ["found", "next_found"]:
next_activity = next_task_data.get('activity')
next_start_time = next_task_data.get('start')
output = (
f"Your current task is **{current_activity}** (Ends at {current_end_time}). "
f"Your *next* scheduled task is **{next_activity}** starting at {next_start_time}."
)
else:
output = f"You are currently doing **{current_activity}** (Ends at {current_end_time}). There is no further scheduled task after this."
# LOGIC for "WHAT SHOULD I DO NOW"
else:
output = f"Right now, you should be doing: **{current_activity}** (Ends at {current_end_time})."
elif task_data.get("status") == "next_found":
# If no task is found, but the next one is found (user is free)
output = f"You are currently free! Your next scheduled activity is **{task_data.get('activity')}** starting at {task_data.get('start')}."
else:
output = "No scheduled activity found for the current or upcoming time. Enjoy the free time!"
speak(output, blocking=False)
print(f"Ishu says: {output}")
continue
except json.JSONDecodeError:
print(f"Error processing local tool output for {tool_to_call}. Falling through to Ollama.")
pass
# --- End of Local Output Handling ---
# --- End of Local Query Interception ---
# *** Default Command to Ollama LLM (Manual Tool Execution) ***
else:
# 1. Start the conversation with the user's query
# CRITICAL: Always append the current query to history for the LLM's first pass
current_messages = chat_history + [{"role": "user", "content": query}]
response_message = ollama_response(query, history=current_messages)
response_content = response_message.get("content", "")
# 2. Add the LLM's initial response to history
chat_history.append(response_message)
tool_calls = []
# --- Robust Multi-Tool JSON Parsing ---
# Use regex to find all JSON blocks (handling chatty and batched responses)
json_matches = re.findall(r'(\s*\{.*?\}\s*)', response_content, re.DOTALL)
for match in json_matches:
try:
parsed_json = json.loads(match.strip())
if "tool_call" in parsed_json:
tool_calls.append(parsed_json["tool_call"])
except json.JSONDecodeError:
# Ignore invalid JSON blocks
pass
if tool_calls:
# --- Multi-Tool Execution Loop ---
executed_tools_summary = []
for i, tool_call in enumerate(tool_calls):
func_name = tool_call.get("name")
func_args = tool_call.get("arguments", {})
if func_name in TOOL_MAPPER:
print(f"Executing Tool {i+1}/{len(tool_calls)}: {func_name} with args: {func_args}")
try:
if func_args is None:
func_args = {}
tool_output = TOOL_MAPPER[func_name](**func_args)
executed_tools_summary.append(f"Tool {i+1} ({func_name}) Success: {tool_output[:50]}...")
except Exception as e:
tool_output = f"ERROR executing {func_name}: {e}"
executed_tools_summary.append(f"Tool {i+1} ({func_name}) FAILED.")
# Add the Tool's output (as a function result) to history
# We temporarily add the SYSTEM prompt here to reinforce tool use discipline
chat_history.append({"role": "system", "content": OLLAMA_SYSTEM_PROMPT})
chat_history.append({
"role": "tool",
"content": tool_output,
})
else:
executed_tools_summary.append(f"Tool {i+1} FAILED: Tool '{func_name}' is not implemented.")
# 3. Final Call to LLM for Conversational Summary
print(f"--- Execution Complete. Calling LLM for final answer. ---")
# --- CRITICAL FIX: Clean the history before the final call ---
# Remove the strict SYSTEM prompt to allow conversational mode
history_for_final_call = [msg for msg in chat_history if msg['role'] != 'system']
# Use a specific, strong prompt for the final answer
final_response_message = ollama_response(
"Based ONLY on the tool results in the last messages, summarize the actions taken (added/removed tasks) and answer the user's original query in a friendly, conversational way.",
history=history_for_final_call # Use the cleaned history!
)
# 4. Add final LLM response to history and speak
chat_history.append(final_response_message)
# We ONLY speak the final, cleaned-up response from the LLM.
speak(final_response_message["content"], blocking=True)
# 5. Handle standard LLM conversation (No tool call returned)
elif response_content:
# LLM spoke directly (joke, story, general question). Just speak the content.
speak(response_message["content"], blocking=True)
else:
speak("I received an empty response from the LLM. Please check your Ollama configuration or model.", blocking=True)
if __name__ == "__main__":
main()