Skip to content

Commit 885da46

Browse files
authored
Merge pull request #88 from ks6088ts-labs/feature/issue-87_gemma3-270m
run gemma3:270m
2 parents 91aebef + 5b8d6b7 commit 885da46

File tree

4 files changed

+83
-11
lines changed

4 files changed

+83
-11
lines changed

.env.template

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@ AZURE_OPENAI_MODEL_CHAT="gpt-5"
2020
AZURE_OPENAI_MODEL_EMBEDDING="text-embedding-3-small"
2121
AZURE_OPENAI_MODEL_REASONING="o4-mini"
2222

23+
## Ollama Settings
24+
OLLAMA_MODEL_CHAT="gemma3:270m"
25+
2326
# ---------
2427
# Tools
2528
# ---------

.github/workflows/labeler.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ jobs:
88
permissions:
99
contents: read
1010
pull-requests: write
11+
issues: write
1112
runs-on: ubuntu-latest
1213
steps:
1314
- name: labeler action

scripts/ollama_operator.py

Lines changed: 78 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,23 @@
22

33
import typer
44
from dotenv import load_dotenv
5+
from langchain_core.messages import HumanMessage
6+
from pydantic import BaseModel, Field
57

68
from template_langgraph.llms.ollamas import OllamaWrapper
79
from template_langgraph.loggers import get_logger
810

11+
12+
class Profile(BaseModel):
13+
first_name: str = Field(..., description="First name of the user")
14+
last_name: str = Field(..., description="Last name of the user")
15+
age: int = Field(..., description="Age of the user")
16+
origin: str = Field(
17+
...,
18+
description="Origin of the user, e.g., country or city",
19+
)
20+
21+
922
# Initialize the Typer application
1023
app = typer.Typer(
1124
add_completion=False,
@@ -17,9 +30,9 @@
1730

1831

1932
@app.command()
20-
def run(
33+
def chat(
2134
query: str = typer.Option(
22-
"What is the weather like today?",
35+
"Explain the concept of Fourier transform.",
2336
"--query",
2437
"-q",
2538
help="Query to run against the Ollama model",
@@ -30,23 +43,78 @@ def run(
3043
"-v",
3144
help="Enable verbose output",
3245
),
46+
stream: bool = typer.Option(
47+
False,
48+
"--stream",
49+
"-s",
50+
help="Enable streaming output",
51+
),
3352
):
3453
# Set up logging
3554
if verbose:
3655
logger.setLevel(logging.DEBUG)
3756

3857
logger.info("Running...")
3958
chat_model = OllamaWrapper().chat_model
40-
response = chat_model.invoke(
41-
input=query,
42-
)
43-
logger.debug(
44-
response.model_dump_json(
45-
indent=2,
46-
exclude_none=True,
59+
60+
if stream:
61+
response = ""
62+
for chunk in chat_model.stream(
63+
input=[
64+
HumanMessage(content=query),
65+
],
66+
):
67+
print(
68+
chunk.content,
69+
end="",
70+
flush=True,
71+
)
72+
response += str(chunk.content)
73+
logger.info(f"Output: {response}")
74+
else:
75+
response = chat_model.invoke(
76+
input=[
77+
HumanMessage(content=query),
78+
],
4779
)
80+
logger.debug(
81+
response.model_dump_json(
82+
indent=2,
83+
exclude_none=True,
84+
)
85+
)
86+
logger.info(f"Output: {response.content}")
87+
88+
89+
@app.command()
90+
def structured_output(
91+
query: str = typer.Option(
92+
"I'm Taro Okamoto from Japan. 30 years old.",
93+
"--query",
94+
"-q",
95+
help="Query to run against the Ollama model",
96+
),
97+
verbose: bool = typer.Option(
98+
False,
99+
"--verbose",
100+
"-v",
101+
help="Enable verbose output",
102+
),
103+
):
104+
# Set up logging
105+
if verbose:
106+
logger.setLevel(logging.DEBUG)
107+
108+
logger.info("Running...")
109+
chat_model = OllamaWrapper().chat_model
110+
profile = chat_model.with_structured_output(
111+
schema=Profile,
112+
).invoke(
113+
input=[
114+
HumanMessage(content=query),
115+
],
48116
)
49-
logger.info(f"Output: {response.content}")
117+
logger.info(f"Output: {profile.model_dump_json(indent=2, exclude_none=True)}")
50118

51119

52120
if __name__ == "__main__":

template_langgraph/llms/ollamas.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
class Settings(BaseSettings):
8-
ollama_model_chat: str = "phi3:latest"
8+
ollama_model_chat: str = "gemma3:270m"
99

1010
model_config = SettingsConfigDict(
1111
env_file=".env",

0 commit comments

Comments
 (0)