2
2
3
3
import typer
4
4
from dotenv import load_dotenv
5
+ from langchain_core .messages import HumanMessage
6
+ from pydantic import BaseModel , Field
5
7
6
8
from template_langgraph .llms .ollamas import OllamaWrapper
7
9
from template_langgraph .loggers import get_logger
8
10
11
+
12
+ class Profile (BaseModel ):
13
+ first_name : str = Field (..., description = "First name of the user" )
14
+ last_name : str = Field (..., description = "Last name of the user" )
15
+ age : int = Field (..., description = "Age of the user" )
16
+ origin : str = Field (
17
+ ...,
18
+ description = "Origin of the user, e.g., country or city" ,
19
+ )
20
+
21
+
9
22
# Initialize the Typer application
10
23
app = typer .Typer (
11
24
add_completion = False ,
17
30
18
31
19
32
@app .command ()
20
- def run (
33
+ def chat (
21
34
query : str = typer .Option (
22
- "What is the weather like today? " ,
35
+ "Explain the concept of Fourier transform. " ,
23
36
"--query" ,
24
37
"-q" ,
25
38
help = "Query to run against the Ollama model" ,
@@ -30,23 +43,78 @@ def run(
30
43
"-v" ,
31
44
help = "Enable verbose output" ,
32
45
),
46
+ stream : bool = typer .Option (
47
+ False ,
48
+ "--stream" ,
49
+ "-s" ,
50
+ help = "Enable streaming output" ,
51
+ ),
33
52
):
34
53
# Set up logging
35
54
if verbose :
36
55
logger .setLevel (logging .DEBUG )
37
56
38
57
logger .info ("Running..." )
39
58
chat_model = OllamaWrapper ().chat_model
40
- response = chat_model .invoke (
41
- input = query ,
42
- )
43
- logger .debug (
44
- response .model_dump_json (
45
- indent = 2 ,
46
- exclude_none = True ,
59
+
60
+ if stream :
61
+ response = ""
62
+ for chunk in chat_model .stream (
63
+ input = [
64
+ HumanMessage (content = query ),
65
+ ],
66
+ ):
67
+ print (
68
+ chunk .content ,
69
+ end = "" ,
70
+ flush = True ,
71
+ )
72
+ response += str (chunk .content )
73
+ logger .info (f"Output: { response } " )
74
+ else :
75
+ response = chat_model .invoke (
76
+ input = [
77
+ HumanMessage (content = query ),
78
+ ],
47
79
)
80
+ logger .debug (
81
+ response .model_dump_json (
82
+ indent = 2 ,
83
+ exclude_none = True ,
84
+ )
85
+ )
86
+ logger .info (f"Output: { response .content } " )
87
+
88
+
89
+ @app .command ()
90
+ def structured_output (
91
+ query : str = typer .Option (
92
+ "I'm Taro Okamoto from Japan. 30 years old." ,
93
+ "--query" ,
94
+ "-q" ,
95
+ help = "Query to run against the Ollama model" ,
96
+ ),
97
+ verbose : bool = typer .Option (
98
+ False ,
99
+ "--verbose" ,
100
+ "-v" ,
101
+ help = "Enable verbose output" ,
102
+ ),
103
+ ):
104
+ # Set up logging
105
+ if verbose :
106
+ logger .setLevel (logging .DEBUG )
107
+
108
+ logger .info ("Running..." )
109
+ chat_model = OllamaWrapper ().chat_model
110
+ profile = chat_model .with_structured_output (
111
+ schema = Profile ,
112
+ ).invoke (
113
+ input = [
114
+ HumanMessage (content = query ),
115
+ ],
48
116
)
49
- logger .info (f"Output: { response . content } " )
117
+ logger .info (f"Output: { profile . model_dump_json ( indent = 2 , exclude_none = True ) } " )
50
118
51
119
52
120
if __name__ == "__main__" :
0 commit comments