Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ dependencies = [
"httpx>=0.28.1",
"langchain-azure-ai>=0.1.4",
"langchain-community>=0.3.27",
"langchain-ollama>=0.3.6",
"langchain-openai>=0.3.28",
"langchain-text-splitters>=0.3.9",
"langgraph>=0.6.2",
Expand Down
57 changes: 57 additions & 0 deletions scripts/ollama_operator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import logging

import typer
from dotenv import load_dotenv

from template_langgraph.llms.ollamas import OllamaWrapper
from template_langgraph.loggers import get_logger

# Initialize the Typer application
app = typer.Typer(
add_completion=False,
help="Ollama operator CLI",
)

# Set up logging
logger = get_logger(__name__)


@app.command()
def run(
query: str = typer.Option(
"What is the weather like today?",
"--query",
"-q",
help="Query to run against the Ollama model",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)

logger.info("Running...")
chat_model = OllamaWrapper().chat_model
response = chat_model.invoke(
input=query,
)
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
)
)
logger.info(f"Output: {response.content}")


if __name__ == "__main__":
load_dotenv(
override=True,
verbose=True,
)
app()
31 changes: 31 additions & 0 deletions template_langgraph/llms/ollamas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from functools import lru_cache

from langchain_ollama import ChatOllama
from pydantic_settings import BaseSettings, SettingsConfigDict


class Settings(BaseSettings):
ollama_model_chat: str = "phi3:latest"

model_config = SettingsConfigDict(
env_file=".env",
env_ignore_empty=True,
extra="ignore",
)


@lru_cache
def get_ollama_settings() -> Settings:
return Settings()


class OllamaWrapper:
def __init__(self, settings: Settings = None):
if settings is None:
settings = get_ollama_settings()

self.chat_model = ChatOllama(
model=settings.ollama_model_chat,
temperature=0.0,
streaming=True,
)
28 changes: 28 additions & 0 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.