Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ AZURE_OPENAI_MODEL_CHAT="gpt-5"
AZURE_OPENAI_MODEL_EMBEDDING="text-embedding-3-small"
AZURE_OPENAI_MODEL_REASONING="o4-mini"

## Ollama Settings
OLLAMA_MODEL_CHAT="gemma3:270m"

# ---------
# Tools
# ---------
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/labeler.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ jobs:
permissions:
contents: read
pull-requests: write
issues: write
runs-on: ubuntu-latest
steps:
- name: labeler action
Expand Down
88 changes: 78 additions & 10 deletions scripts/ollama_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,23 @@

import typer
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage
from pydantic import BaseModel, Field

from template_langgraph.llms.ollamas import OllamaWrapper
from template_langgraph.loggers import get_logger


class Profile(BaseModel):
first_name: str = Field(..., description="First name of the user")
last_name: str = Field(..., description="Last name of the user")
age: int = Field(..., description="Age of the user")
origin: str = Field(
...,
description="Origin of the user, e.g., country or city",
)


# Initialize the Typer application
app = typer.Typer(
add_completion=False,
Expand All @@ -17,9 +30,9 @@


@app.command()
def run(
def chat(
query: str = typer.Option(
"What is the weather like today?",
"Explain the concept of Fourier transform.",
"--query",
"-q",
help="Query to run against the Ollama model",
Expand All @@ -30,23 +43,78 @@ def run(
"-v",
help="Enable verbose output",
),
stream: bool = typer.Option(
False,
"--stream",
"-s",
help="Enable streaming output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)

logger.info("Running...")
chat_model = OllamaWrapper().chat_model
response = chat_model.invoke(
input=query,
)
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,

if stream:
response = ""
for chunk in chat_model.stream(
input=[
HumanMessage(content=query),
],
):
print(
chunk.content,
end="",
flush=True,
)
response += str(chunk.content)
logger.info(f"Output: {response}")
else:
response = chat_model.invoke(
input=[
HumanMessage(content=query),
],
)
logger.debug(
response.model_dump_json(
indent=2,
exclude_none=True,
)
)
logger.info(f"Output: {response.content}")


@app.command()
def structured_output(
query: str = typer.Option(
"I'm Taro Okamoto from Japan. 30 years old.",
"--query",
"-q",
help="Query to run against the Ollama model",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
):
# Set up logging
if verbose:
logger.setLevel(logging.DEBUG)

logger.info("Running...")
chat_model = OllamaWrapper().chat_model
profile = chat_model.with_structured_output(
schema=Profile,
).invoke(
input=[
HumanMessage(content=query),
],
)
logger.info(f"Output: {response.content}")
logger.info(f"Output: {profile.model_dump_json(indent=2, exclude_none=True)}")


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion template_langgraph/llms/ollamas.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


class Settings(BaseSettings):
ollama_model_chat: str = "phi3:latest"
ollama_model_chat: str = "gemma3:270m"

model_config = SettingsConfigDict(
env_file=".env",
Expand Down