diff --git a/pipeline/preprocessors/link_map.py b/pipeline/preprocessors/link_map.py
index 009b264233..bdaba8d5d3 100644
--- a/pipeline/preprocessors/link_map.py
+++ b/pipeline/preprocessors/link_map.py
@@ -386,6 +386,7 @@ class LinkMap(TypedDict):
"ClearToolUsesEdit": "classes/langchain.index.ClearToolUsesEdit.html",
"ContextEdit": "interfaces/langchain.index.ContextEdit.html",
"toolRetryMiddleware": "functions/langchain.index.toolRetryMiddleware.html",
+ "openAIModerationMiddleware": "functions/langchain.index.openAIModerationMiddleware.html",
},
},
]
diff --git a/src/oss/langchain/middleware/built-in.mdx b/src/oss/langchain/middleware/built-in.mdx
index 0c438f3a55..f09c1d535c 100644
--- a/src/oss/langchain/middleware/built-in.mdx
+++ b/src/oss/langchain/middleware/built-in.mdx
@@ -2092,9 +2092,25 @@ const agent = createAgent({
:::
:::js
+
+ Whether to enable prompt caching. Can be overridden at runtime via `middleware_context`.
+
+
Time to live for cached content. Valid values: `'5m'` or `'1h'`
+
+
+ Minimum number of messages required before caching is applied. Caching is skipped if the total message count (including system prompt) is below this threshold.
+
+
+
+ Behavior when using non-Anthropic models. Options:
+
+ - `'ignore'` - Ignore the unsupported model and continue without caching
+ - `'warn'` (default) - Warn the user and continue without caching
+ - `'raise'` - Throw an error and stop execution
+
:::
@@ -2147,6 +2163,7 @@ Please be a helpful assistant.
`;
+// Basic usage with default settings
const agent = createAgent({
model: "claude-sonnet-4-5-20250929",
prompt: LONG_PROMPT,
@@ -2163,6 +2180,40 @@ await agent.invoke({
const result = await agent.invoke({
messages: [new HumanMessage("What's my name?")]
});
+
+// Custom configuration for longer conversations
+const cachingMiddleware = anthropicPromptCachingMiddleware({
+ ttl: "1h", // Cache for 1 hour instead of default 5 minutes
+ minMessagesToCache: 5 // Only cache after 5 messages
+});
+
+const agentWithCustomConfig = createAgent({
+ model: "anthropic:claude-3-5-sonnet",
+ prompt: LONG_PROMPT,
+ middleware: [cachingMiddleware],
+});
+
+// Conditional caching based on runtime context
+const conditionalAgent = createAgent({
+ model: "anthropic:claude-3-5-sonnet",
+ prompt: LONG_PROMPT,
+ middleware: [
+ anthropicPromptCachingMiddleware({
+ enableCaching: true,
+ ttl: "5m"
+ })
+ ],
+});
+
+// Disable caching for specific requests
+await conditionalAgent.invoke(
+ { messages: [new HumanMessage("Process this without caching")] },
+ {
+ configurable: {
+ middleware_context: { enableCaching: false }
+ }
+ }
+);
```
:::
@@ -2579,6 +2630,27 @@ agent = create_agent(
```
:::
+:::js
+**API reference:** @[`openAIModerationMiddleware`]
+
+```typescript
+import { createAgent, openAIModerationMiddleware } from "langchain";
+
+const agent = createAgent({
+ model: "gpt-4o",
+ tools: [searchTool, databaseTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model: "gpt-4o", // Required: OpenAI model for moderation
+ checkInput: true,
+ checkOutput: true,
+ exitBehavior: "end",
+ }),
+ ],
+});
+```
+:::
+
:::python
@@ -2625,6 +2697,46 @@ agent = create_agent(
:::
+:::js
+
+ OpenAI model to use for moderation. Can be either a model identifier string (e.g., `'openai:gpt-4o'`) or a `BaseChatModel` instance. The model must be an OpenAI model that supports moderation.
+
+
+
+ Moderation model to use. Options: `'omni-moderation-latest'`, `'omni-moderation-2024-09-26'`, `'text-moderation-latest'`, `'text-moderation-stable'`
+
+
+
+ Whether to check user input messages before the model is called
+
+
+
+ Whether to check model output messages after the model is called
+
+
+
+ Whether to check tool result messages before the model is called
+
+
+
+ How to handle violations when content is flagged. Options:
+
+ - `'end'` (default) - End agent execution immediately with a violation message
+ - `'error'` - Throw `OpenAIModerationError` exception
+ - `'replace'` - Replace the flagged content with the violation message and continue
+
+
+
+ Custom template for violation messages. Supports template variables:
+
+ - `{categories}` - Comma-separated list of flagged categories
+ - `{category_scores}` - JSON string of category scores
+ - `{original_content}` - The original flagged content
+
+ Default: `"I'm sorry, but I can't comply with that request. It was flagged for {categories}."`
+
+:::
+
@@ -2695,4 +2807,90 @@ agent_replace = create_agent(
```
:::
+:::js
+```typescript
+import { createAgent, openAIModerationMiddleware } from "langchain";
+
+const model = "gpt-4o"
+
+// Basic moderation using model string
+const agent = createAgent({
+ model,
+ tools: [searchTool, customerDataTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ checkInput: true,
+ checkOutput: true,
+ }),
+ ],
+});
+
+// Using model instance
+import { ChatOpenAI } from "@langchain/openai";
+
+const moderationModel = new ChatOpenAI({ model: "gpt-4o-mini" });
+
+const agentWithModelInstance = createAgent({
+ model: "gpt-4o",
+ tools: [searchTool, customerDataTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model: moderationModel,
+ checkInput: true,
+ checkOutput: true,
+ }),
+ ],
+});
+
+// Strict moderation with custom message
+const model = "gpt-4o"
+const agentStrict = createAgent({
+ model,
+ tools: [searchTool, customerDataTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ moderationModel: "omni-moderation-latest",
+ checkInput: true,
+ checkOutput: true,
+ checkToolResults: true,
+ exitBehavior: "error",
+ violationMessage: "Content policy violation detected: {categories}. Please rephrase your request.",
+ }),
+ ],
+});
+
+// Moderation with replacement behavior
+const model = "gpt-4o"
+const agentReplace = createAgent({
+ model,
+ tools: [searchTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ checkInput: true,
+ exitBehavior: "replace",
+ violationMessage: "[Content removed due to safety policies]",
+ }),
+ ],
+});
+
+// Custom violation message with all template variables
+const model = "gpt-4o"
+const agentCustomMessage = createAgent({
+ model,
+ tools: [searchTool],
+ middleware: [
+ openAIModerationMiddleware({
+ model,
+ checkInput: true,
+ checkOutput: true,
+ violationMessage: "Flagged categories: {categories}. Scores: {category_scores}. Original: {original_content}",
+ }),
+ ],
+});
+```
+:::
+