diff --git a/pyproject.toml b/pyproject.toml index d74dfc1..62a3143 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,7 @@ dependencies = [ "httpx>=0.28.1", "langchain-azure-ai>=0.1.4", "langchain-community>=0.3.27", + "langchain-ollama>=0.3.6", "langchain-openai>=0.3.28", "langchain-text-splitters>=0.3.9", "langgraph>=0.6.2", diff --git a/scripts/ollama_operator.py b/scripts/ollama_operator.py new file mode 100644 index 0000000..fb12280 --- /dev/null +++ b/scripts/ollama_operator.py @@ -0,0 +1,57 @@ +import logging + +import typer +from dotenv import load_dotenv + +from template_langgraph.llms.ollamas import OllamaWrapper +from template_langgraph.loggers import get_logger + +# Initialize the Typer application +app = typer.Typer( + add_completion=False, + help="Ollama operator CLI", +) + +# Set up logging +logger = get_logger(__name__) + + +@app.command() +def run( + query: str = typer.Option( + "What is the weather like today?", + "--query", + "-q", + help="Query to run against the Ollama model", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + # Set up logging + if verbose: + logger.setLevel(logging.DEBUG) + + logger.info("Running...") + chat_model = OllamaWrapper().chat_model + response = chat_model.invoke( + input=query, + ) + logger.debug( + response.model_dump_json( + indent=2, + exclude_none=True, + ) + ) + logger.info(f"Output: {response.content}") + + +if __name__ == "__main__": + load_dotenv( + override=True, + verbose=True, + ) + app() diff --git a/template_langgraph/llms/ollamas.py b/template_langgraph/llms/ollamas.py new file mode 100644 index 0000000..7616d42 --- /dev/null +++ b/template_langgraph/llms/ollamas.py @@ -0,0 +1,31 @@ +from functools import lru_cache + +from langchain_ollama import ChatOllama +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + ollama_model_chat: str = "phi3:latest" + + model_config = SettingsConfigDict( + env_file=".env", + env_ignore_empty=True, + extra="ignore", + ) + + +@lru_cache +def get_ollama_settings() -> Settings: + return Settings() + + +class OllamaWrapper: + def __init__(self, settings: Settings = None): + if settings is None: + settings = get_ollama_settings() + + self.chat_model = ChatOllama( + model=settings.ollama_model_chat, + temperature=0.0, + streaming=True, + ) diff --git a/uv.lock b/uv.lock index 98ed8ae..175a02c 100644 --- a/uv.lock +++ b/uv.lock @@ -1884,6 +1884,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/7d/9f75023c478e3b854d67da31d721e39f0eb30ae969ec6e755430cb1c0fb5/langchain_core-0.3.72-py3-none-any.whl", hash = "sha256:9fa15d390600eb6b6544397a7aa84be9564939b6adf7a2b091179ea30405b240", size = 442806, upload-time = "2025-07-24T00:40:06.994Z" }, ] +[[package]] +name = "langchain-ollama" +version = "0.3.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "ollama" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/67/93429a78d6fd40e2addf27e881db37e7f0076d712ffe9759ca0d5e10910e/langchain_ollama-0.3.6.tar.gz", hash = "sha256:4270c4b30b3f3d10850cb9a1183b8c77d616195e0d9717ac745ef7f7f6cc2b6e", size = 30479, upload-time = "2025-07-22T17:26:59.605Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/c5/1e559f5b43d62850ea2b44097afc944f38894eac00e7feef3b42f0428916/langchain_ollama-0.3.6-py3-none-any.whl", hash = "sha256:b339bd3fcf913b8d606ad426ef39e7122695532507fcd85aa96271b3f33dc3df", size = 24535, upload-time = "2025-07-22T17:26:58.556Z" }, +] + [[package]] name = "langchain-openai" version = "0.3.28" @@ -2661,6 +2674,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/e3/6690b3f85a05506733c7e90b577e4762517404ea78bab2ca3a5cb1aeb78d/numpy-2.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619", size = 12977811, upload-time = "2025-07-24T21:29:18.234Z" }, ] +[[package]] +name = "ollama" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/21/31/381d5429fffb088b55c0ace6d4c441dc355cf801620d7288813f77e9193a/ollama-0.5.2.tar.gz", hash = "sha256:7a575a90416a816231f216dbd10c3480b107218a90388c061fdf20d7ab7fe990", size = 43092, upload-time = "2025-08-05T23:02:08.404Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/10/d8a4e0468394e1093d32421bacd1e87452aa96b60a55f5f70766af5406f8/ollama-0.5.2-py3-none-any.whl", hash = "sha256:48ee9aed1c8f4cf2e4237b6d4cc36c328f1abc40da4aa6edf52698f757bc4164", size = 13423, upload-time = "2025-08-05T23:02:07.191Z" }, +] + [[package]] name = "openai" version = "1.98.0" @@ -4109,6 +4135,7 @@ dependencies = [ { name = "httpx" }, { name = "langchain-azure-ai" }, { name = "langchain-community" }, + { name = "langchain-ollama" }, { name = "langchain-openai" }, { name = "langchain-text-splitters" }, { name = "langgraph" }, @@ -4145,6 +4172,7 @@ requires-dist = [ { name = "httpx", specifier = ">=0.28.1" }, { name = "langchain-azure-ai", specifier = ">=0.1.4" }, { name = "langchain-community", specifier = ">=0.3.27" }, + { name = "langchain-ollama", specifier = ">=0.3.6" }, { name = "langchain-openai", specifier = ">=0.3.28" }, { name = "langchain-text-splitters", specifier = ">=0.3.9" }, { name = "langgraph", specifier = ">=0.6.2" },