Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 18 additions & 10 deletions libs/langchain_v1/langchain/agents/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -545,7 +545,7 @@ def create_agent( # noqa: PLR0915
model: str | BaseChatModel,
tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
*,
system_prompt: str | None = None,
system_prompt: str | SystemMessage | None = None,
middleware: Sequence[AgentMiddleware[StateT_co, ContextT]] = (),
response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
state_schema: type[AgentState[ResponseT]] | None = None,
Expand Down Expand Up @@ -591,9 +591,9 @@ def create_agent( # noqa: PLR0915
docs for more information.
system_prompt: An optional system prompt for the LLM.

Prompts are converted to a
[`SystemMessage`][langchain.messages.SystemMessage] and added to the
beginning of the message list.
Can be a `str` (which will be converted to a `SystemMessage`) or a
`SystemMessage` instance directly. The system message is added to the
beginning of the message list when calling the model.
middleware: A sequence of middleware instances to apply to the agent.

Middleware can intercept and modify agent behavior at various stages.
Expand Down Expand Up @@ -688,6 +688,14 @@ def check_weather(location: str) -> str:
if isinstance(model, str):
model = init_chat_model(model)

# Convert system_prompt to SystemMessage if needed
system_message: SystemMessage | None = None
if system_prompt is not None:
if isinstance(system_prompt, SystemMessage):
system_message = system_prompt
else:
system_message = SystemMessage(content=system_prompt)

# Handle tools being None or empty
if tools is None:
tools = []
Expand Down Expand Up @@ -1091,8 +1099,8 @@ def _execute_model_sync(request: ModelRequest) -> ModelResponse:
# Get the bound model (with auto-detection if needed)
model_, effective_response_format = _get_bound_model(request)
messages = request.messages
if request.system_prompt:
messages = [SystemMessage(request.system_prompt), *messages]
if request.system_message:
messages = [request.system_message, *messages]

output = model_.invoke(messages)

Expand All @@ -1111,7 +1119,7 @@ def model_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
request = ModelRequest(
model=model,
tools=default_tools,
system_prompt=system_prompt,
system_message=system_message,
response_format=initial_response_format,
messages=state["messages"],
tool_choice=None,
Expand Down Expand Up @@ -1144,8 +1152,8 @@ async def _execute_model_async(request: ModelRequest) -> ModelResponse:
# Get the bound model (with auto-detection if needed)
model_, effective_response_format = _get_bound_model(request)
messages = request.messages
if request.system_prompt:
messages = [SystemMessage(request.system_prompt), *messages]
if request.system_message:
messages = [request.system_message, *messages]

output = await model_.ainvoke(messages)

Expand All @@ -1164,7 +1172,7 @@ async def amodel_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str
request = ModelRequest(
model=model,
tools=default_tools,
system_prompt=system_prompt,
system_message=system_message,
response_format=initial_response_format,
messages=state["messages"],
tool_choice=None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
AIMessage,
AnyMessage,
BaseMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.messages.utils import count_tokens_approximately
Expand Down Expand Up @@ -230,9 +229,7 @@ def wrap_model_call(
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = (
[SystemMessage(content=request.system_prompt)] if request.system_prompt else []
)
system_msg = [request.system_message] if request.system_message else []

def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
Expand All @@ -259,9 +256,7 @@ async def awrap_model_call(
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = (
[SystemMessage(content=request.system_prompt)] if request.system_prompt else []
)
system_msg = [request.system_message] if request.system_message else []

def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
Expand Down
24 changes: 13 additions & 11 deletions libs/langchain_v1/langchain/agents/middleware/todo.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable

from langchain_core.messages import ToolMessage
from langchain_core.messages import SystemMessage, ToolMessage
from langchain_core.tools import tool
from langgraph.types import Command
from typing_extensions import NotRequired, TypedDict
Expand Down Expand Up @@ -193,23 +193,25 @@ def wrap_model_call(
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
"""Update the system prompt to include the todo system prompt."""
new_system_prompt = (
request.system_prompt + "\n\n" + self.system_prompt
if request.system_prompt
"""Update the system message to include the todo system prompt."""
new_system_content = (
request.system_message.content + "\n\n" + self.system_prompt
if request.system_message
else self.system_prompt
)
return handler(request.override(system_prompt=new_system_prompt))
new_system_message = SystemMessage(content=new_system_content)
return handler(request.override(system_message=new_system_message))

async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
"""Update the system prompt to include the todo system prompt (async version)."""
new_system_prompt = (
request.system_prompt + "\n\n" + self.system_prompt
if request.system_prompt
"""Update the system message to include the todo system prompt (async version)."""
new_system_content = (
request.system_message.content + "\n\n" + self.system_prompt
if request.system_message
else self.system_prompt
)
return await handler(request.override(system_prompt=new_system_prompt))
new_system_message = SystemMessage(content=new_system_content)
return await handler(request.override(system_message=new_system_message))
Loading
Loading