Skip to content

Commit 3d68c84

Browse files
authored
Merge pull request #42 from ks6088ts-labs/feature/issue-41_ollama
add ollama chat cli
2 parents 6e92fc4 + b4f62a9 commit 3d68c84

File tree

4 files changed

+117
-0
lines changed

4 files changed

+117
-0
lines changed

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ dependencies = [
1111
"httpx>=0.28.1",
1212
"langchain-azure-ai>=0.1.4",
1313
"langchain-community>=0.3.27",
14+
"langchain-ollama>=0.3.6",
1415
"langchain-openai>=0.3.28",
1516
"langchain-text-splitters>=0.3.9",
1617
"langgraph>=0.6.2",

scripts/ollama_operator.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import logging
2+
3+
import typer
4+
from dotenv import load_dotenv
5+
6+
from template_langgraph.llms.ollamas import OllamaWrapper
7+
from template_langgraph.loggers import get_logger
8+
9+
# Initialize the Typer application
10+
app = typer.Typer(
11+
add_completion=False,
12+
help="Ollama operator CLI",
13+
)
14+
15+
# Set up logging
16+
logger = get_logger(__name__)
17+
18+
19+
@app.command()
20+
def run(
21+
query: str = typer.Option(
22+
"What is the weather like today?",
23+
"--query",
24+
"-q",
25+
help="Query to run against the Ollama model",
26+
),
27+
verbose: bool = typer.Option(
28+
False,
29+
"--verbose",
30+
"-v",
31+
help="Enable verbose output",
32+
),
33+
):
34+
# Set up logging
35+
if verbose:
36+
logger.setLevel(logging.DEBUG)
37+
38+
logger.info("Running...")
39+
chat_model = OllamaWrapper().chat_model
40+
response = chat_model.invoke(
41+
input=query,
42+
)
43+
logger.debug(
44+
response.model_dump_json(
45+
indent=2,
46+
exclude_none=True,
47+
)
48+
)
49+
logger.info(f"Output: {response.content}")
50+
51+
52+
if __name__ == "__main__":
53+
load_dotenv(
54+
override=True,
55+
verbose=True,
56+
)
57+
app()

template_langgraph/llms/ollamas.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from functools import lru_cache
2+
3+
from langchain_ollama import ChatOllama
4+
from pydantic_settings import BaseSettings, SettingsConfigDict
5+
6+
7+
class Settings(BaseSettings):
8+
ollama_model_chat: str = "phi3:latest"
9+
10+
model_config = SettingsConfigDict(
11+
env_file=".env",
12+
env_ignore_empty=True,
13+
extra="ignore",
14+
)
15+
16+
17+
@lru_cache
18+
def get_ollama_settings() -> Settings:
19+
return Settings()
20+
21+
22+
class OllamaWrapper:
23+
def __init__(self, settings: Settings = None):
24+
if settings is None:
25+
settings = get_ollama_settings()
26+
27+
self.chat_model = ChatOllama(
28+
model=settings.ollama_model_chat,
29+
temperature=0.0,
30+
streaming=True,
31+
)

uv.lock

Lines changed: 28 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)