3
3
from os import getenv
4
4
5
5
from dotenv import load_dotenv
6
+ from langchain_core .messages import HumanMessage , SystemMessage
7
+ from langchain_openai import AzureChatOpenAI
6
8
from openai import AzureOpenAI
7
- from pydantic import BaseModel
9
+ from pydantic import BaseModel , Field
8
10
9
11
10
12
def init_args () -> argparse .Namespace :
@@ -14,18 +16,19 @@ def init_args() -> argparse.Namespace:
14
16
)
15
17
parser .add_argument ("-s" , "--system" , default = "Extract the event information." )
16
18
parser .add_argument ("-u" , "--user" , default = "Alice and Bob are going to a science fair on Friday." )
19
+ parser .add_argument ("-t" , "--type" , default = "openai" , choices = ["openai" , "langchain" ])
17
20
parser .add_argument ("-v" , "--verbose" , action = "store_true" )
18
21
return parser .parse_args ()
19
22
20
23
21
24
class ResponseFormat (BaseModel ):
22
25
# FIXME: Update the ResponseFormat class to match your actual response format
23
- name : str
24
- date : str
25
- participants : list [str ]
26
+ name : str = Field ( description = "The name of the event." )
27
+ date : str = Field ( description = "The date of the event." )
28
+ participants : list [str ] = Field ( description = "The participants of the event." )
26
29
27
30
28
- def print_structured_output (
31
+ def print_structured_output_openai (
29
32
system : str ,
30
33
user : str ,
31
34
):
@@ -60,6 +63,43 @@ def print_structured_output(
60
63
print (completion .model_dump_json (indent = 2 ))
61
64
62
65
66
+ def print_structured_output_langchain (
67
+ system : str ,
68
+ user : str ,
69
+ ):
70
+ """
71
+ How to use:
72
+ Support for structured outputs was first added in API version 2024-08-01-preview.
73
+ It is available in the latest preview APIs as well as the latest GA API: 2024-10-21.
74
+
75
+ Install dependencies:
76
+ $ pip install openai python-dotenv pydantic langchain-openai
77
+
78
+ References:
79
+ - https://python.langchain.com/docs/how_to/structured_output/
80
+ """
81
+ llm = AzureChatOpenAI (
82
+ temperature = 0 ,
83
+ api_key = getenv ("AZURE_OPENAI_API_KEY" ),
84
+ api_version = getenv ("AZURE_OPENAI_API_VERSION" ),
85
+ azure_endpoint = getenv ("AZURE_OPENAI_ENDPOINT" ),
86
+ model = getenv ("AZURE_OPENAI_GPT_MODEL" ),
87
+ )
88
+ structured_llm = llm .with_structured_output (ResponseFormat )
89
+ response : ResponseFormat = structured_llm .invoke (
90
+ [
91
+ SystemMessage (
92
+ content = system ,
93
+ ),
94
+ HumanMessage (
95
+ content = user ,
96
+ ),
97
+ ]
98
+ )
99
+ print (response )
100
+ print (f"Name: { response .name } , Date: { response .date } , Participants: { response .participants } " )
101
+
102
+
63
103
if __name__ == "__main__" :
64
104
args = init_args ()
65
105
@@ -71,9 +111,18 @@ def print_structured_output(
71
111
load_dotenv ()
72
112
73
113
try :
74
- print_structured_output (
75
- system = args .system ,
76
- user = args .user ,
77
- )
114
+ if args .type == "openai" :
115
+ print_structured_output_openai (
116
+ system = args .system ,
117
+ user = args .user ,
118
+ )
119
+ elif args .type == "langchain" :
120
+ print_structured_output_langchain (
121
+ system = args .system ,
122
+ user = args .user ,
123
+ )
124
+ else :
125
+ raise ValueError (f"Invalid type: { args .type } " )
126
+
78
127
except Exception as e :
79
128
logging .error (e )
0 commit comments