|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +import argparse |
| 4 | +import json |
| 5 | +import http.client |
| 6 | +import sys |
| 7 | + |
| 8 | +def list_models(hostname, port): |
| 9 | + """List available models from the Ollama server.""" |
| 10 | + endpoint = "/api/tags" # Correct endpoint for listing local models |
| 11 | + connection = http.client.HTTPConnection(hostname, port) |
| 12 | + try: |
| 13 | + connection.request("GET", endpoint) |
| 14 | + response = connection.getresponse() |
| 15 | + |
| 16 | + if response.status != 200: |
| 17 | + print(f"Error: HTTP {response.status} - {response.reason}") |
| 18 | + return |
| 19 | + |
| 20 | + data = json.loads(response.read().decode("utf-8")) |
| 21 | + models = data.get("models", []) |
| 22 | + |
| 23 | + print("Available Models:\n") |
| 24 | + for model in models: |
| 25 | + name = model.get("name", "Unknown") |
| 26 | + size = model.get("size", 0) |
| 27 | + modified_at = model.get("modified_at", "Unknown") |
| 28 | + details = model.get("details", {}) |
| 29 | + parameter_size = details.get("parameter_size", "Unknown") |
| 30 | + quantization = details.get("quantization_level", "Unknown") |
| 31 | + format_ = details.get("format", "Unknown") |
| 32 | + |
| 33 | + # Display model information |
| 34 | + print(f"Name: {name}") |
| 35 | + print(f" Size: {size / 1_000_000_000:.2f} GB") # Convert bytes to GB |
| 36 | + print(f" Modified At: {modified_at}") |
| 37 | + print(f" Parameters: {parameter_size}") |
| 38 | + print(f" Quantization Level: {quantization}") |
| 39 | + print(f" Format: {format_}") |
| 40 | + print("-" * 40) |
| 41 | + |
| 42 | + finally: |
| 43 | + connection.close() |
| 44 | + |
| 45 | + |
| 46 | +def stream_ollama_response(hostname, port, model, prompt, system_prompt, temperature, max_tokens): |
| 47 | + """Stream responses from the Ollama server.""" |
| 48 | + endpoint = "/api/generate" |
| 49 | + headers = {"Content-Type": "application/json"} |
| 50 | + payload = json.dumps({ |
| 51 | + "model": model, |
| 52 | + "prompt": prompt, |
| 53 | + "system": system_prompt, |
| 54 | + "temperature": temperature, |
| 55 | + "max_tokens": max_tokens |
| 56 | + }) |
| 57 | + |
| 58 | + connection = http.client.HTTPConnection(hostname, port) |
| 59 | + try: |
| 60 | + connection.request("POST", endpoint, body=payload, headers=headers) |
| 61 | + response = connection.getresponse() |
| 62 | + |
| 63 | + if response.status != 200: |
| 64 | + print(f"Error: HTTP {response.status} - {response.reason}") |
| 65 | + return |
| 66 | + |
| 67 | + concatenated_response = "" |
| 68 | + for line in response: |
| 69 | + line = line.decode("utf-8").strip() |
| 70 | + if line: |
| 71 | + try: |
| 72 | + response_data = json.loads(line) |
| 73 | + word = response_data.get("response", "") |
| 74 | + concatenated_response += word |
| 75 | + print(word, end="", flush=True) |
| 76 | + except json.JSONDecodeError: |
| 77 | + print(f"\nInvalid JSON: {line}") |
| 78 | + |
| 79 | + print("\n\nFinal Response:") |
| 80 | + print(concatenated_response) |
| 81 | + |
| 82 | + finally: |
| 83 | + connection.close() |
| 84 | + |
| 85 | +def main(): |
| 86 | + parser = argparse.ArgumentParser(description="Interact with Ollama server.") |
| 87 | + parser.add_argument("--ollama-hostname", required=True, help="Hostname of the Ollama server") |
| 88 | + parser.add_argument("--ollama-port", type=int, required=True, help="Port number of the Ollama server") |
| 89 | + |
| 90 | + # Mutually exclusive arguments |
| 91 | + group = parser.add_mutually_exclusive_group(required=True) |
| 92 | + group.add_argument("--list-models", action="store_true", |
| 93 | + help="List all available models on the Ollama server") |
| 94 | + group.add_argument("--model", help="Specify the model to use") |
| 95 | + |
| 96 | + # Other arguments |
| 97 | + parser.add_argument("--prompt", help="Prompt to send to the Ollama server") |
| 98 | + parser.add_argument("--system-prompt", default="You are a helpful assistant.", |
| 99 | + help="System prompt to define the model's behavior") |
| 100 | + parser.add_argument("--temperature", type=float, default=0.7, |
| 101 | + help="Temperature for response randomness") |
| 102 | + parser.add_argument("--max-tokens", type=int, default=100, |
| 103 | + help="Maximum number of tokens to generate") |
| 104 | + |
| 105 | + args = parser.parse_args() |
| 106 | + |
| 107 | + # If --list-models is specified, list models and exit |
| 108 | + if args.list_models: |
| 109 | + list_models(args.ollama_hostname, args.ollama_port) |
| 110 | + return |
| 111 | + |
| 112 | + # Validate other required arguments |
| 113 | + if not args.prompt: |
| 114 | + print("Error: --prompt is required when --list-models is not specified.") |
| 115 | + sys.exit(1) |
| 116 | + |
| 117 | + if not args.model: |
| 118 | + print("Error: --model is required when --list-models is not specified.") |
| 119 | + sys.exit(1) |
| 120 | + |
| 121 | + # Stream response from the server |
| 122 | + stream_ollama_response( |
| 123 | + args.ollama_hostname, |
| 124 | + args.ollama_port, |
| 125 | + args.model, |
| 126 | + args.prompt, |
| 127 | + args.system_prompt, |
| 128 | + args.temperature, |
| 129 | + args.max_tokens |
| 130 | + ) |
| 131 | + |
| 132 | +if __name__ == "__main__": |
| 133 | + main() |
| 134 | + |
0 commit comments