|
| 1 | +# https://python.langchain.com/docs/tutorials/chatbot |
| 2 | + |
| 3 | +from os import environ |
| 4 | +from typing import Sequence |
| 5 | + |
| 6 | +from langchain_core.messages import ( |
| 7 | + AIMessage, |
| 8 | + BaseMessage, |
| 9 | + HumanMessage, |
| 10 | + SystemMessage, |
| 11 | + trim_messages, |
| 12 | +) |
| 13 | +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
| 14 | +from langchain_google_vertexai import ChatVertexAI |
| 15 | +from langgraph.checkpoint.memory import MemorySaver |
| 16 | +from langgraph.graph import START, MessagesState, StateGraph |
| 17 | +from langgraph.graph.message import add_messages |
| 18 | +from typing_extensions import Annotated, TypedDict |
| 19 | + |
| 20 | +from opentelemetry import trace |
| 21 | + |
| 22 | + |
| 23 | +def main() -> None: |
| 24 | + model = ChatVertexAI( |
| 25 | + model="gemini-1.5-flash", |
| 26 | + project=environ.get("GOOGLE_CLOUD_PROJECT", None), |
| 27 | + ) |
| 28 | + |
| 29 | + # # Define a new graph |
| 30 | + # workflow = StateGraph(state_schema=MessagesState) |
| 31 | + |
| 32 | + # # Define the function that calls the model |
| 33 | + # def call_model(state: MessagesState): |
| 34 | + # response = model.invoke(state["messages"]) |
| 35 | + # return {"messages": response} |
| 36 | + |
| 37 | + # # Define the (single) node in the graph |
| 38 | + # workflow.add_edge(START, "model") |
| 39 | + # workflow.add_node("model", call_model) |
| 40 | + |
| 41 | + # # Add memory |
| 42 | + # memory = MemorySaver() |
| 43 | + # app = workflow.compile(checkpointer=memory) |
| 44 | + |
| 45 | + # config = {"configurable": {"thread_id": "abc123"}} |
| 46 | + |
| 47 | + # query = "Hi! I'm Bob." |
| 48 | + |
| 49 | + # input_messages = [HumanMessage(query)] |
| 50 | + # output = app.invoke({"messages": input_messages}, config) |
| 51 | + # output["messages"][ |
| 52 | + # -1 |
| 53 | + # ].pretty_print() # output contains all messages in state |
| 54 | + |
| 55 | + # query = "What's my name?" |
| 56 | + |
| 57 | + # input_messages = [HumanMessage(query)] |
| 58 | + # output = app.invoke({"messages": input_messages}, config) |
| 59 | + # output["messages"][-1].pretty_print() |
| 60 | + |
| 61 | + # config = {"configurable": {"thread_id": "abc234"}} |
| 62 | + |
| 63 | + # input_messages = [HumanMessage(query)] |
| 64 | + # output = app.invoke({"messages": input_messages}, config) |
| 65 | + # output["messages"][-1].pretty_print() |
| 66 | + |
| 67 | + # config = {"configurable": {"thread_id": "abc123"}} |
| 68 | + |
| 69 | + # input_messages = [HumanMessage(query)] |
| 70 | + # output = app.invoke({"messages": input_messages}, config) |
| 71 | + # output["messages"][-1].pretty_print() |
| 72 | + |
| 73 | + # prompt_template = ChatPromptTemplate.from_messages( |
| 74 | + # [ |
| 75 | + # ( |
| 76 | + # "system", |
| 77 | + # "You talk like a pirate. Answer all questions to the best of your ability.", |
| 78 | + # ), |
| 79 | + # MessagesPlaceholder(variable_name="messages"), |
| 80 | + # ] |
| 81 | + # ) |
| 82 | + |
| 83 | + # workflow = StateGraph(state_schema=MessagesState) |
| 84 | + |
| 85 | + # def call_model(state: MessagesState): |
| 86 | + # # highlight-start |
| 87 | + # prompt = prompt_template.invoke(state) |
| 88 | + # response = model.invoke(prompt) |
| 89 | + # # highlight-end |
| 90 | + # return {"messages": response} |
| 91 | + |
| 92 | + # workflow.add_edge(START, "model") |
| 93 | + # workflow.add_node("model", call_model) |
| 94 | + |
| 95 | + # memory = MemorySaver() |
| 96 | + # app = workflow.compile(checkpointer=memory) |
| 97 | + |
| 98 | + # config = {"configurable": {"thread_id": "abc345"}} |
| 99 | + # query = "Hi! I'm Jim." |
| 100 | + |
| 101 | + # input_messages = [HumanMessage(query)] |
| 102 | + # output = app.invoke({"messages": input_messages}, config) |
| 103 | + # output["messages"][-1].pretty_print() |
| 104 | + |
| 105 | + # query = "What is my name?" |
| 106 | + |
| 107 | + # input_messages = [HumanMessage(query)] |
| 108 | + # output = app.invoke({"messages": input_messages}, config) |
| 109 | + # output["messages"][-1].pretty_print() |
| 110 | + |
| 111 | + prompt_template = ChatPromptTemplate.from_messages( |
| 112 | + [ |
| 113 | + ( |
| 114 | + "system", |
| 115 | + "You are a helpful assistant. Answer all questions to the best of your ability in {language}.", |
| 116 | + ), |
| 117 | + MessagesPlaceholder(variable_name="messages"), |
| 118 | + ] |
| 119 | + ) |
| 120 | + |
| 121 | + # # highlight-next-line |
| 122 | + class State(TypedDict): |
| 123 | + # highlight-next-line |
| 124 | + messages: Annotated[Sequence[BaseMessage], add_messages] |
| 125 | + # highlight-next-line |
| 126 | + language: str |
| 127 | + |
| 128 | + # workflow = StateGraph(state_schema=State) |
| 129 | + |
| 130 | + # def call_model(state: State): |
| 131 | + # prompt = prompt_template.invoke(state) |
| 132 | + # response = model.invoke(prompt) |
| 133 | + # return {"messages": [response]} |
| 134 | + |
| 135 | + # workflow.add_edge(START, "model") |
| 136 | + # workflow.add_node("model", call_model) |
| 137 | + |
| 138 | + # memory = MemorySaver() |
| 139 | + # app = workflow.compile(checkpointer=memory) |
| 140 | + |
| 141 | + # config = {"configurable": {"thread_id": "abc456"}} |
| 142 | + # query = "Hi! I'm Bob." |
| 143 | + # language = "Spanish" |
| 144 | + |
| 145 | + # input_messages = [HumanMessage(query)] |
| 146 | + # output = app.invoke( |
| 147 | + # # highlight-next-line |
| 148 | + # {"messages": input_messages, "language": language}, |
| 149 | + # config, |
| 150 | + # ) |
| 151 | + # output["messages"][-1].pretty_print() |
| 152 | + |
| 153 | + # query = "What is my name?" |
| 154 | + |
| 155 | + # input_messages = [HumanMessage(query)] |
| 156 | + # output = app.invoke( |
| 157 | + # {"messages": input_messages}, |
| 158 | + # config, |
| 159 | + # ) |
| 160 | + # output["messages"][-1].pretty_print() |
| 161 | + |
| 162 | + trimmer = trim_messages( |
| 163 | + max_tokens=65, |
| 164 | + strategy="last", |
| 165 | + token_counter=model, |
| 166 | + include_system=True, |
| 167 | + allow_partial=False, |
| 168 | + start_on="human", |
| 169 | + ) |
| 170 | + |
| 171 | + messages = [ |
| 172 | + SystemMessage(content="you're a good assistant"), |
| 173 | + HumanMessage(content="hi! I'm bob"), |
| 174 | + AIMessage(content="hi!"), |
| 175 | + HumanMessage(content="I like vanilla ice cream"), |
| 176 | + AIMessage(content="nice"), |
| 177 | + HumanMessage(content="whats 2 + 2"), |
| 178 | + AIMessage(content="4"), |
| 179 | + HumanMessage(content="thanks"), |
| 180 | + AIMessage(content="no problem!"), |
| 181 | + HumanMessage(content="having fun?"), |
| 182 | + AIMessage(content="yes!"), |
| 183 | + ] |
| 184 | + |
| 185 | + trimmer.invoke(messages) |
| 186 | + |
| 187 | + workflow = StateGraph(state_schema=State) |
| 188 | + |
| 189 | + def call_model(state: State): |
| 190 | + # highlight-start |
| 191 | + trimmed_messages = trimmer.invoke(state["messages"]) |
| 192 | + prompt = prompt_template.invoke( |
| 193 | + {"messages": trimmed_messages, "language": state["language"]} |
| 194 | + ) |
| 195 | + response = model.invoke(prompt) |
| 196 | + # highlight-end |
| 197 | + return {"messages": [response]} |
| 198 | + |
| 199 | + workflow.add_edge(START, "model") |
| 200 | + workflow.add_node("model", call_model) |
| 201 | + |
| 202 | + memory = MemorySaver() |
| 203 | + app = workflow.compile(checkpointer=memory) |
| 204 | + |
| 205 | + config = {"configurable": {"thread_id": "abc567"}} |
| 206 | + query = "What is my name?" |
| 207 | + language = "English" |
| 208 | + |
| 209 | + # highlight-next-line |
| 210 | + input_messages = messages + [HumanMessage(query)] |
| 211 | + output = app.invoke( |
| 212 | + {"messages": input_messages, "language": language}, |
| 213 | + config, |
| 214 | + ) |
| 215 | + output["messages"][-1].pretty_print() |
| 216 | + |
| 217 | + config = {"configurable": {"thread_id": "abc678"}} |
| 218 | + query = "What math problem did I ask?" |
| 219 | + language = "English" |
| 220 | + |
| 221 | + input_messages = messages + [HumanMessage(query)] |
| 222 | + output = app.invoke( |
| 223 | + {"messages": input_messages, "language": language}, |
| 224 | + config, |
| 225 | + ) |
| 226 | + output["messages"][-1].pretty_print() |
| 227 | + |
| 228 | + config = {"configurable": {"thread_id": "abc789"}} |
| 229 | + query = "Hi I'm Todd, please tell me a joke." |
| 230 | + language = "English" |
| 231 | + |
| 232 | + input_messages = [HumanMessage(query)] |
| 233 | + # highlight-next-line |
| 234 | + for chunk, metadata in app.stream( |
| 235 | + {"messages": input_messages, "language": language}, |
| 236 | + config, |
| 237 | + # highlight-next-line |
| 238 | + stream_mode="messages", |
| 239 | + ): |
| 240 | + if isinstance(chunk, AIMessage): # Filter to just model responses |
| 241 | + print(chunk.content, end="|") |
| 242 | + |
| 243 | + |
| 244 | +with trace.get_tracer(__name__).start_as_current_span("demo-root-span"): |
| 245 | + main() |
0 commit comments