diff --git a/examples/chatbot.py b/examples/chatbot.py new file mode 100755 index 0000000..58ac2a2 --- /dev/null +++ b/examples/chatbot.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +"""Example script demonstrating an interactive LLM chatbot.""" + +import readline # Enables input line editing + +import lmstudio as lm + +model = lm.llm() +chat = lm.Chat("You are a task focused AI assistant") + +while True: + try: + user_input = input("You (leave blank to exit): ") + except EOFError: + print() + break + if not user_input: + break + chat.add_user_message(user_input) + prediction_stream = model.respond_stream( + chat, + on_message=chat.append, + ) + print("Bot: ", end="", flush=True) + for fragment in prediction_stream: + print(fragment.content, end="", flush=True) + print() diff --git a/examples/terminal-sim.py b/examples/terminal-sim.py new file mode 100755 index 0000000..2eedd17 --- /dev/null +++ b/examples/terminal-sim.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +"""Example script demonstrating a simulated terminal command processor.""" + +import readline # Enables input line editing + +import lmstudio as lm + +model = lm.llm() +console_history = [] + +while True: + try: + user_command = input("$ ") + except EOFError: + print() + break + if user_command.strip() == "exit": + break + console_history.append(f"$ {user_command}") + history_prompt = "\n".join(console_history) + prediction_stream = model.complete_stream( + history_prompt, + config={ "stopStrings": ["$"] }, + ) + for fragment in prediction_stream: + print(fragment.content, end="", flush=True) + print() + console_history.append(prediction_stream.result().content)