diff --git a/pipeline/preprocessors/link_map.py b/pipeline/preprocessors/link_map.py
index bbc101e6a5..a449ab73dc 100644
--- a/pipeline/preprocessors/link_map.py
+++ b/pipeline/preprocessors/link_map.py
@@ -284,9 +284,12 @@ class LinkMap(TypedDict):
"AssistantsClient": "langsmith/deployment/sdk/#langgraph_sdk.client.AssistantsClient",
"AssistantsClient.create": "langsmith/deployment/sdk/#langgraph_sdk.client.AssistantsClient.create",
"AssistantsClient.update": "langsmith/deployment/sdk/#langgraph_sdk.client.AssistantsClient.update",
+ "LangGraphSDK": "langsmith/deployment/sdk/",
"ThreadsClient": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient",
"ThreadsClient.create": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.create",
"ThreadsClient.copy": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.copy",
+ "ThreadsClient.get": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.get",
+ "ThreadsClient.get_state": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.get_state",
"ThreadsClient.search": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.search",
"ThreadsClient.get_history": "langsmith/deployment/sdk/#langgraph_sdk.client.ThreadsClient.get_history",
"RunsClient": "langsmith/deployment/sdk/#langgraph_sdk.client.RunsClient",
@@ -370,10 +373,14 @@ class LinkMap(TypedDict):
"ToolNode": "classes/langchain.index.ToolNode.html",
"UsageMetadata": "types/_langchain_core.messages.UsageMetadata.html",
# LangSmith Deployment SDK - JS
+ "LangGraphSDK": "langgraph-sdk/",
"ThreadsClient": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html",
"ThreadsClient.create": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#create",
"ThreadsClient.copy": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#copy",
+ "ThreadsClient.get": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#get",
+ "ThreadsClient.get_state": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#getstate",
"ThreadsClient.search": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#search",
+ "ThreadsClient.get_history": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#gethistory",
"ThreadsClient.getHistory": "classes/_langchain_langgraph-sdk.client.ThreadsClient.html#gethistory",
"AssistantsClient": "classes/_langchain_langgraph-sdk.client.AssistantsClient.html",
"AssistantsClient.create": "classes/_langchain_langgraph-sdk.client.AssistantsClient.html#create",
@@ -386,6 +393,36 @@ class LinkMap(TypedDict):
"toolRetryMiddleware": "functions/langchain.index.toolRetryMiddleware.html",
},
},
+ {
+ # LangSmith Deployment REST API - Python scope
+ "host": "https://langchain-ai.github.io/langgraph/cloud/reference/api/",
+ "scope": "python",
+ "links": {
+ "AssistantsAPI": "api_ref.html#tag/assistants",
+ "ThreadsAPI": "api_ref.html#tag/threads",
+ "ThreadsAPI.create": "api_ref.html#tag/threads/post/threads",
+ "ThreadsAPI.copy": "api_ref.html#tag/threads/post/threads/{thread_id}/copy",
+ "ThreadsAPI.get": "api_ref.html#tag/threads/get/threads/{thread_id}",
+ "ThreadsAPI.get_state": "api_ref.html#tag/threads/get/threads/{thread_id}/state",
+ "ThreadsAPI.search": "api_ref.html#tag/threads/post/threads/search",
+ "ThreadsAPI.get_history": "api_ref.html#tag/threads/post/threads/{thread_id}/history",
+ },
+ },
+ {
+ # LangSmith Deployment REST API - JS scope
+ "host": "https://langchain-ai.github.io/langgraph/cloud/reference/api/",
+ "scope": "js",
+ "links": {
+ "AssistantsAPI": "api_ref.html#tag/assistants",
+ "ThreadsAPI": "api_ref.html#tag/threads",
+ "ThreadsAPI.create": "api_ref.html#tag/threads/post/threads",
+ "ThreadsAPI.copy": "api_ref.html#tag/threads/post/threads/{thread_id}/copy",
+ "ThreadsAPI.get": "api_ref.html#tag/threads/get/threads/{thread_id}",
+ "ThreadsAPI.get_state": "api_ref.html#tag/threads/get/threads/{thread_id}/state",
+ "ThreadsAPI.search": "api_ref.html#tag/threads/post/threads/search",
+ "ThreadsAPI.get_history": "api_ref.html#tag/threads/post/threads/{thread_id}/history",
+ },
+ },
]
diff --git a/src/langsmith/assistants.mdx b/src/langsmith/assistants.mdx
index 008aafba8b..6e621a82f8 100644
--- a/src/langsmith/assistants.mdx
+++ b/src/langsmith/assistants.mdx
@@ -3,36 +3,179 @@ title: Assistants
sidebarTitle: Overview
---
-**Assistants** allow you to manage configurations (like prompts, LLM selection, tools) separately from your graph's core logic, enabling rapid changes that don't alter the graph architecture. It is a way to create multiple specialized versions of the same graph architecture, each optimized for different use cases through configuration variations rather than structural changes.
+_Assistants_ allow you to manage configurations (like prompts, LLM selection, tools) separately from your graph's core logic, enabling rapid changes that don't alter the graph architecture. It is a way to create multiple specialized versions of the same graph architecture, each optimized for different [use cases](#when-to-use-assistants) through configuration variations rather than structural changes.
For example, imagine a general-purpose writing agent built on a common graph architecture. While the structure remains the same, different writing styles—such as blog posts and tweets—require tailored configurations to optimize performance. To support these variations, you can create multiple assistants (e.g., one for blogs and another for tweets) that share the underlying graph but differ in model selection and system prompt.

-The LangGraph API provides several endpoints for creating and managing assistants and their versions. See the [API reference](https://langchain-ai.github.io/langgraph/cloud/reference/api/api_ref/#tag/assistants) for more details.
+The LangGraph API provides several endpoints for creating and managing assistants and their versions. See the @[API reference][AssistantsAPI] for more details.
-Assistants are a [LangSmith](/langsmith/home) concept. They are not available in the open source LangGraph library.
+Assistants are a [LangSmith Deployment](/langsmith/deployments) concept. They are not available in the open source LangGraph library.
-## Configuration
+## When to use assistants
+
+Assistants are ideal when you need to deploy the same graph logic with different configurations. Common use cases include:
+
+- **User-level personalization**
+ - Customize model selection, system prompts, or tool availability per user.
+ - Store user preferences and apply them automatically to each interaction.
+ - Enable users to choose between different AI personalities or expertise levels.
+
+- **Customer or organization-specific configurations**
+ - Maintain separate configurations for different customers or organizations.
+ - Customize behavior for each client without deploying separate infrastructure.
+ - Isolate configuration changes to specific customers.
+
+```mermaid
+graph TD
+ A["Graph: agent
(deployed)"]
+ A --> B["Customer A Assistant
━━━━━━━━━━━━━
Model: GPT-4
Tone: Legal
Tools: Custom"]
+ A --> C["Customer B Assistant
━━━━━━━━━━━━━
Model: Claude
Tone: Casual
Tools: Standard"]
+ A --> D["Customer C Assistant
━━━━━━━━━━━━━
Model: GPT-3.5
Tone: Formal
Tools: Limited"]
+
+ style A fill:#4A90E2,stroke:#2E5C8A,stroke-width:3px,color:#fff
+ style B fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style C fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style D fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+```
+
+- **Environment-specific configurations**
+ - Use different models or settings for development, staging, and production.
+ - Test configuration changes in staging before promoting to production.
+ - Reduce costs in non-production environments with smaller models.
+
+- **A/B testing and experimentation**
+ - Compare different prompts, models, or parameter settings.
+ - Roll out configuration changes gradually to a subset of users.
+ - Measure performance differences between configuration variants.
+
+- **Specialized task variants**
+ - Create domain-specific versions of a general-purpose agent.
+ - Optimize configurations for different languages, regions, or industries.
+ - Maintain consistent graph logic while varying the execution details.
+
+```mermaid
+graph TD
+ A["Graph: writing-agent
(deployed)"]
+ A --> B["Blog Assistant
━━━━━━━━━━━━━
Model: GPT-4
Tone: Formal
Style: Long-form
Tools: SEO optimization"]
+ A --> C["Tweet Assistant
━━━━━━━━━━━━━
Model: GPT-4-mini
Tone: Casual
Style: 280-char limit
Tools: Hashtag suggestions"]
+ A --> D["Email Assistant
━━━━━━━━━━━━━
Model: GPT-4
Tone: Professional
Style: Medium length
Tools: Templates"]
+
+ style A fill:#4A90E2,stroke:#2E5C8A,stroke-width:3px,color:#fff
+ style B fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style C fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style D fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+```
+
+## How assistants work with deployments
+
+When you deploy a graph with LangSmith Deployment, [Agent Server](/langsmith/agent-server) automatically creates a **default assistant** tied to that graph's default configuration. You can then create additional assistants for the same graph, each with its own configuration.
+
+Assistants have several key features:
+
+- **[Managed via API and UI](/langsmith/configuration-cloud)**: Create, list, update, version, and select assistants using the Agent Server/LangGraph SDKs or the [LangSmith UI](https://smith.langchain.com).
+- **One graph, multiple assistants**: A single deployed graph can support multiple assistants, each with different configurations (e.g., prompts, models, tools).
+- **[Versioned](#versioning) configurations**: Each assistant maintains its own configuration history through versioning. Editing an assistant creates a new version, and you can promote or roll back to any version.
+- **[Configuration](#configuration) updates without graph changes**: Update prompts, model selection, and other settings through assistant configurations, enabling rapid iteration without modifying or redeploying your graph code.
+
+
+When invoking a run, you can specify either:
+- A **graph name** (e.g., `"agent"`): Uses the default assistant for that graph
+- An **assistant ID** (UUID): Uses a specific assistant configuration
+
+This flexibility allows you to quickly test with default settings or precisely control which configuration is used.
+
+
+### Configuration
Assistants build on the LangGraph open source concept of [configuration](/oss/langgraph/graph-api#runtime-context).
-While configuration is available in the open source LangGraph library, assistants are only present in [LangSmith](/langsmith/home). This is due to the fact that assistants are tightly coupled to your deployed graph. Upon deployment, Agent Server will automatically create a default assistant for each graph using the graph's default configuration settings.
+While configuration is available in the open source LangGraph library, assistants are only present in [LangSmith Deployment](/langsmith/deployments) because they are tightly coupled to your deployed graph. Upon deployment, [Agent Server](/langsmith/agent-server) will automatically create a default assistant for each graph using the graph's default configuration settings.
In practice, an assistant is just an _instance_ of a graph with a specific configuration. Therefore, multiple assistants can reference the same graph but can contain different configurations (e.g. prompts, models, tools). The LangSmith Deployment API provides several endpoints for creating and managing assistants. See the [API reference](https://langchain-ai.github.io/langgraph/cloud/reference/api/api_ref/) and [this how-to](/langsmith/configuration-cloud) for more details on how to create assistants.
-## Versioning
+### Versioning
+
+Assistants support versioning to track changes over time. Once you've created an assistant, subsequent edits will automatically create new versions.
+
+- Each update creates a new version of the assistant.
+- You can promote any version to be the active version.
+- Rolling back to a previous version is as simple as setting it as active.
+- All versions remain available for reference and rollback.
+
+
+When updating an assistant, you must provide the entire configuration payload. The update endpoint creates new versions from scratch and does not merge with previous versions. Make sure to include all configuration fields you want to retain.
+
+
+For more details on how to manage assistant versions, refer to the [Manage assistants guide](/langsmith/configuration-cloud#create-a-new-version-for-your-assistant).
+
+### Execution
+
+A _run_ is an invocation of an assistant. When you execute a run, you specify which assistant to use (either by graph name for the default assistant or by assistant ID for a specific configuration).
+
+```mermaid
+flowchart TB
+ subgraph deploy[Deployment]
+ G[Graph Code
━━━━━━━━━
Deployed logic]
+ end
+
+ subgraph config[Configuration]
+ A1[Assistant 1
GPT-4, Formal]
+ A2[Assistant 2
Claude, Casual]
+ end
+
+ subgraph state[State]
+ T1[Thread 1
User A]
+ T2[Thread 2
User B]
+ end
+
+ subgraph runs[Runs]
+ A1T1["Run: A1 + T1"]
+ A1T2["Run: A1 + T2"]
+ A2T1["Run: A2 + T1"]
+ end
+
+ A1 -.-> T1
+ A1 -.-> T2
+ A2 -.-> T1
+
+ A1T1 --> G
+ A1T2 --> G
+ A2T1 --> G
+
+ style G fill:#4A90E2,stroke:#2E5C8A,stroke-width:2px,color:#fff
+ style A1 fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style A2 fill:#E8F4F8,stroke:#4A90E2,stroke-width:2px
+ style T1 fill:#FFF4E6,stroke:#F59E0B,stroke-width:2px
+ style T2 fill:#FFF4E6,stroke:#F59E0B,stroke-width:2px
+ style A1T1 fill:#F0FDF4,stroke:#10B981,stroke-width:2px
+ style A1T2 fill:#F0FDF4,stroke:#10B981,stroke-width:2px
+ style A2T1 fill:#F0FDF4,stroke:#10B981,stroke-width:2px
+```
+
+This diagram shows how a **run** combines an assistant with a thread to execute the graph:
+
+- **Graph** (blue): The deployed code containing your agent's logic
+- **Assistants** (light blue): Configuration options (model, prompts, tools)
+- **Threads** (orange): State containers for conversation history
+- **Runs** (green): Executions that pair an assistant + thread
-Assistants support versioning to track changes over time.
-Once you've created an assistant, subsequent edits to that assistant will create new versions. See [this how-to](/langsmith/configuration-cloud#create-a-new-version-for-your-assistant) for more details on how to manage assistant versions.
+**Example combinations:**
+- **Run: A1 + T1**: Assistant 1 configuration applied to User A's conversation
+- **Run: A1 + T2**: Same assistant serving User B (different conversation)
+- **Run: A2 + T1**: Different assistant applied to User A's conversation (configuration switch)
-## Execution
+When executing a run:
-A **run** is an invocation of an assistant. Each run may have its own input, configuration, and metadata, which may affect execution and output of the underlying graph. A run can optionally be executed on a [thread](/oss/langgraph/persistence#threads).
+- Each run may have its own input, configuration overrides, and metadata.
+- Runs can be stateless (no thread) or stateful (executed on a [thread](/oss/langgraph/persistence#threads) for conversation persistence).
+- Multiple runs can use the same assistant configuration.
+- The assistant's configuration affects how the underlying graph executes.
-LangSmith API provides several endpoints for creating and managing runs. See the [API reference](https://langchain-ai.github.io/langgraph/cloud/reference/api/api_ref/) for more details.
+The LangSmith API provides several endpoints for creating and managing runs. For more details, refer to the [API reference](https://langchain-ai.github.io/langgraph/cloud/reference/api/api_ref/).
## Video guide
diff --git a/src/langsmith/configuration-cloud.mdx b/src/langsmith/configuration-cloud.mdx
index 20b9582651..888466b2cf 100644
--- a/src/langsmith/configuration-cloud.mdx
+++ b/src/langsmith/configuration-cloud.mdx
@@ -2,331 +2,422 @@
title: Manage assistants
sidebarTitle: Manage assistants
---
-In this guide we will show how to create, configure, and manage an [assistant](/langsmith/assistants).
-First, as a brief refresher on the concept of context, consider the following simple `call_model` node and context schema.
-Observe that this node tries to read and use the `model_name` as defined by the `context` object's `model_name` field.
+This page describes how to create, configure, and manage [assistants](/langsmith/assistants). Assistants allow you to customize your [deployed](/langsmith/deployments) graph's behavior through configuration—such as model selection, prompts, and tool availability—without changing the underlying graph code.
-
-
- ```python
- class ContextSchema(TypedDict):
- model_name: str
-
- builder = StateGraph(AgentState, context_schema=ContextSchema)
-
- def call_model(state, runtime: Runtime[ContextSchema]):
- messages = state["messages"]
- model = _get_model(runtime.context.get("model_name", "anthropic"))
- response = model.invoke(messages)
- # We return a list, because this will get added to the existing list
- return {"messages": [response]}
- ```
-
-
- ```js
- import { Annotation } from "@langchain/langgraph";
-
- const ContextSchema = Annotation.Root({
- model_name: Annotation,
- system_prompt:
- });
-
- const builder = new StateGraph(AgentState, ContextSchema)
-
- function callModel(state: State, runtime: Runtime[ContextSchema]) {
- const messages = state.messages;
- const model = _getModel(runtime.context.model_name ?? "anthropic");
- const response = model.invoke(messages);
- // We return a list, because this will get added to the existing list
- return { messages: [response] };
- }
- ```
-
-
+You can work with the [SDK](https://reference.langchain.com/python/langsmith/deployment/sdk/) or in the [LangSmith UI](https://smith.langchain.com).
-For more information on configurations, [see here](/langsmith/configuration-cloud#configuration).
+## Understand assistant configuration
-## Create an assistant
+Assistants store _context_ values that customize graph behavior at runtime. You define a context schema in your graph code, then provide specific context values when creating an assistant via the @[`context` parameter][AssistantsClient.create].
+
+Consider this example of a `call_model` node that reads the `model_name` from the context:
+
+
+```python Python
+class ContextSchema(TypedDict):
+ model_name: str
+
+builder = StateGraph(AgentState, context_schema=ContextSchema)
+
+def call_model(state, runtime: Runtime[ContextSchema]):
+ messages = state["messages"]
+ model = _get_model(runtime.context.get("model_name", "anthropic"))
+ response = model.invoke(messages)
+ return {"messages": [response]}
+```
+
+```javascript JavaScript
+import { Annotation } from "@langchain/langgraph";
+
+const ContextSchema = Annotation.Root({
+ model_name: Annotation,
+ system_prompt: Annotation,
+});
+
+const builder = new StateGraph(AgentState, ContextSchema)
+
+function callModel(state: State, runtime: Runtime[ContextSchema]) {
+ const messages = state.messages;
+ const model = _getModel(runtime.context.model_name ?? "anthropic");
+ const response = model.invoke(messages);
+ return { messages: [response] };
+}
+```
+
-### LangGraph SDK
+When you create an assistant, you provide specific values for these configuration fields. The assistant stores this configuration and applies it whenever the graph runs.
-To create an assistant, use the [LangGraph SDK](/langsmith/sdk) `create` method. See the @[Python][AssistantsClient.create] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.AssistantsClient.html#create) SDK reference docs for more information.
+For more information on configuration in [LangGraph](/oss/langgraph/overview), refer to the [runtime context documentation](/oss/langgraph/graph-api#runtime-context).
-This example uses the same context schema as above, and creates an assistant with `model_name` set to `openai`.
+**Select SDK or UI for your workflow:**
-
- ```python
- from langgraph_sdk import get_client
-
- client = get_client(url=)
- openai_assistant = await client.assistants.create(
- # "agent" is the name of a graph we deployed
- "agent", context={"model_name": "openai"}, name="Open AI Assistant"
- )
-
- print(openai_assistant)
- ```
-
-
- ```js
- import { Client } from "@langchain/langgraph-sdk";
-
- const client = new Client({ apiUrl: });
- const openAIAssistant = await client.assistants.create({
- graphId: 'agent',
- name: "Open AI Assistant",
- context: { "model_name": "openai" },
- });
-
- console.log(openAIAssistant);
- ```
-
-
- ```bash
- curl --request POST \
- --url /assistants \
- --header 'Content-Type: application/json' \
- --data '{"graph_id":"agent", "context":{"model_name":"openai"}, "name": "Open AI Assistant"}'
- ```
-
-
+
+
+## Create an assistant
-Output:
+Use the @[AssistantsClient.create][AssistantsClient.create] method to create a new assistant. This method requires:
+- **Graph ID**: The name of the deployed graph this assistant will use (e.g., `"agent"`).
+- **Context**: Configuration values matching your graph's context schema.
+- **Name**: A descriptive name for the assistant.
+The following example creates an assistant with `model_name` set to `openai`:
+
+
+```python Python
+from langgraph_sdk import get_client
+
+# Initialize the client with your deployment URL
+client = get_client(url=)
+
+# Create an assistant for the "agent" graph
+# The first parameter is the graph ID (also called graph name)
+openai_assistant = await client.assistants.create(
+ "agent", # Graph ID of the deployed graph
+ context={"model_name": "openai"},
+ name="Open AI Assistant"
+)
+
+print(openai_assistant)
+# Output includes the assistant_id (UUID) that uniquely identifies this assistant
```
+
+```javascript JavaScript
+import { Client } from "@langchain/langgraph-sdk";
+
+// Initialize the client with your deployment URL
+const client = new Client({ apiUrl: });
+
+// Create an assistant for the "agent" graph
+const openAIAssistant = await client.assistants.create({
+ graphId: 'agent', // Graph ID of the deployed graph
+ name: "Open AI Assistant",
+ context: { "model_name": "openai" },
+});
+
+console.log(openAIAssistant);
+// Output includes the assistant_id (UUID) that uniquely identifies this assistant
+```
+
+```bash cURL
+curl --request POST \
+ --url /assistants \
+ --header 'Content-Type: application/json' \
+ --data '{"graph_id":"agent", "context":{"model_name":"openai"}, "name": "Open AI Assistant"}'
+```
+
+
+**Response:**
+
+The API returns an assistant object containing:
+- `assistant_id`: A UUID that uniquely identifies this assistant
+- `graph_id`: The graph this assistant is configured for
+- `context`: The configuration values you provided
+- `name`, `metadata`, timestamps, and other fields
+
+```json
{
-"assistant_id": "62e209ca-9154-432a-b9e9-2d75c7a9219b",
-"graph_id": "agent",
-"name": "Open AI Assistant"
-"context": {
-"model_name": "openai"
-}
-"metadata": {}
-"created_at": "2024-08-31T03:09:10.230718+00:00",
-"updated_at": "2024-08-31T03:09:10.230718+00:00",
+ "assistant_id": "62e209ca-9154-432a-b9e9-2d75c7a9219b",
+ "graph_id": "agent",
+ "name": "Open AI Assistant",
+ "context": {
+ "model_name": "openai"
+ },
+ "metadata": {},
+ "created_at": "2024-08-31T03:09:10.230718+00:00",
+ "updated_at": "2024-08-31T03:09:10.230718+00:00"
}
```
-### LangSmith UI
-
-You can also create assistants from the LangSmith UI.
+The `assistant_id` (a UUID like `"62e209ca-9154-432a-b9e9-2d75c7a9219b"`) uniquely identifies this assistant configuration. You'll use this ID when running your graph to specify which configuration to apply.
-Inside your deployment, select the "Assistants" tab. This will load a table of all of the assistants in your deployment, across all graphs.
+
+**Graph ID vs Assistant ID**
-To create a new assistant, select the "+ New assistant" button. This will open a form where you can specify the graph this assistant is for, as well as provide a name, description, and the desired configuration for the assistant based on the configuration schema for that graph.
+When creating an assistant, you specify a **graph ID** (graph name like `"agent"`). This returns an **assistant ID** (UUID like `"62e209ca..."`). You can use either when running your graph:
+- **Graph ID** (e.g., `"agent"`): Uses the default assistant for that graph
+- **Assistant ID** (UUID): Uses the specific assistant configuration
-To confirm, click "Create assistant". This will take you to [Studio](/langsmith/studio) where you can test the assistant. If you go back to the "Assistants" tab in the deployment, you will see the newly created assistant in the table.
+See [Use an assistant](#use-an-assistant) for examples.
+
## Use an assistant
-### LangGraph SDK
+To use an assistant, pass its `assistant_id` when creating a run. The example below uses the assistant we created above:
+
+
+```python Python
+# Create a thread for the conversation
+thread = await client.threads.create()
+
+# Prepare the input
+input = {"messages": [{"role": "user", "content": "who made you?"}]}
+
+# Run the graph using the assistant's configuration
+# Pass the assistant_id (UUID) as the second parameter
+async for event in client.runs.stream(
+ thread["thread_id"],
+ openai_assistant["assistant_id"], # Assistant ID (UUID)
+ input=input,
+ stream_mode="updates",
+):
+ print(f"Receiving event of type: {event.event}")
+ print(event.data)
+ print("\n\n")
+```
-We have now created an assistant called "Open AI Assistant" that has `model_name` defined as `openai`. We can now use this assistant with this configuration:
+```javascript JavaScript
+// Create a thread for the conversation
+const thread = await client.threads.create();
+
+// Prepare the input
+const input = { "messages": [{ "role": "user", "content": "who made you?" }] };
+
+// Run the graph using the assistant's configuration
+// Pass the assistant_id (UUID) as the second parameter
+const streamResponse = client.runs.stream(
+ thread["thread_id"],
+ openAIAssistant["assistant_id"], // Assistant ID (UUID)
+ {
+ input,
+ streamMode: "updates"
+ }
+);
+
+for await (const event of streamResponse) {
+ console.log(`Receiving event of type: ${event.event}`);
+ console.log(event.data);
+ console.log("\n\n");
+}
+```
-
-
- ```python
- thread = await client.threads.create()
- input = {"messages": [{"role": "user", "content": "who made you?"}]}
- async for event in client.runs.stream(
- thread["thread_id"],
- # this is where we specify the assistant id to use
- openai_assistant["assistant_id"],
- input=input,
- stream_mode="updates",
- ):
- print(f"Receiving event of type: {event.event}")
- print(event.data)
- print("\n\n")
- ```
-
-
- ```js
- const thread = await client.threads.create();
- const input = { "messages": [{ "role": "user", "content": "who made you?" }] };
-
- const streamResponse = client.runs.stream(
- thread["thread_id"],
- // this is where we specify the assistant id to use
- openAIAssistant["assistant_id"],
- {
- input,
- streamMode: "updates"
- }
- );
-
- for await (const event of streamResponse) {
- console.log(`Receiving event of type: ${event.event}`);
- console.log(event.data);
- console.log("\n\n");
- }
- ```
-
-
- ```bash
- thread_id=$(curl --request POST \
- --url /threads \
- --header 'Content-Type: application/json' \
- --data '{}' | jq -r '.thread_id') && \
- curl --request POST \
- --url "/threads/${thread_id}/runs/stream" \
- --header 'Content-Type: application/json' \
- --data '{
- "assistant_id": ,
- "input": {
- "messages": [
- {
- "role": "user",
- "content": "who made you?"
- }
- ]
- },
- "stream_mode": [
- "updates"
+```bash cURL
+# First, create a thread
+thread_id=$(curl --request POST \
+ --url /threads \
+ --header 'Content-Type: application/json' \
+ --data '{}' | jq -r '.thread_id')
+
+# Run the graph with the assistant ID (UUID)
+curl --request POST \
+ --url "/threads/${thread_id}/runs/stream" \
+ --header 'Content-Type: application/json' \
+ --data '{
+ "assistant_id": "",
+ "input": {
+ "messages": [
+ {
+ "role": "user",
+ "content": "who made you?"
+ }
]
- }' | \
- sed 's/\r$//' | \
- awk '
- /^event:/ {
- if (data_content != "") {
- print data_content "\n"
- }
- sub(/^event: /, "Receiving event of type: ", $0)
- printf "%s...\n", $0
- data_content = ""
- }
- /^data:/ {
- sub(/^data: /, "", $0)
- data_content = $0
+ },
+ "stream_mode": ["updates"]
+ }' | \
+ sed 's/\r$//' | \
+ awk '
+ /^event:/ {
+ if (data_content != "") {
+ print data_content "\n"
}
- END {
- if (data_content != "") {
- print data_content "\n\n"
- }
+ sub(/^event: /, "Receiving event of type: ", $0)
+ printf "%s...\n", $0
+ data_content = ""
+ }
+ /^data:/ {
+ sub(/^data: /, "", $0)
+ data_content = $0
+ }
+ END {
+ if (data_content != "") {
+ print data_content "\n\n"
}
- '
- ```
-
-
+ }
+'
+```
+
-Output:
+**Response:**
+
+The stream returns events as the graph executes with your assistant's configuration:
```
Receiving event of type: metadata
{'run_id': '1ef6746e-5893-67b1-978a-0f1cd4060e16'}
-
-
Receiving event of type: updates
-{'agent': {'messages': [{'content': 'I was created by OpenAI, a research organization focused on developing and advancing artificial intelligence technology.', 'additional_kwargs': {}, 'response_metadata': {'finish_reason': 'stop', 'model_name': 'gpt-4o-2024-05-13', 'system_fingerprint': 'fp_157b3831f5'}, 'type': 'ai', 'name': None, 'id': 'run-e1a6b25c-8416-41f2-9981-f9cfe043f414', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]}}
+{'agent': {'messages': [{'content': 'I was created by OpenAI...', ...}]}}
```
-### LangSmith UI
+
+**Using graph ID vs assistant ID**
+
+You can pass either a **graph ID** or **assistant ID** when running your graph:
-Inside your deployment, select the "Assistants" tab. For the assistant you would like to use, click the **Studio** button. This will open Studio with the selected assistant. When you submit an input (either in Graph or Chat mode), the selected assistant and its configuration will be used.
+```python
+# Option 1: Use graph ID to get the default assistant
+client.runs.stream(thread_id, "agent", input=input)
+
+# Option 2: Use assistant ID (UUID) for a specific configuration
+client.runs.stream(thread_id, "62e209ca-9154-432a-b9e9-2d75c7a9219b", input=input)
+```
+
## Create a new version for your assistant
-### LangGraph SDK
+Use the @[AssistantsClient.update][AssistantsClient.update] method to create a new version of an assistant.
-To edit the assistant, use the `update` method. This will create a new version of the assistant with the provided edits. See the @[Python][AssistantsClient.update] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.AssistantsClient.html#update) SDK reference docs for more information.
+
+**Updates require full configuration**
-
-**Note**
-You must pass in the ENTIRE context (and metadata if you are using it). The update endpoint creates new versions completely from scratch and does not rely on previous versions.
-
+You must provide the **entire** configuration when updating. The update endpoint creates new versions from scratch and does not merge with previous versions. Include all configuration fields you want to retain.
+
-For example, to update your assistant's system prompt:
+For example, to add a system prompt to the assistant:
+
+
+```python Python
+# Update the assistant with a new configuration
+# IMPORTANT: Include ALL configuration fields, not just the ones you're changing
+openai_assistant_v2 = await client.assistants.update(
+ openai_assistant["assistant_id"], # Assistant ID (UUID)
+ context={
+ "model_name": "openai", # Must include existing fields
+ "system_prompt": "You are an unhelpful assistant!", # New field
+ },
+)
+
+# This creates version 2 and sets it as the active version
+# Future runs using this assistant_id will use version 2
+```
-
-
- ```python
- openai_assistant_v2 = await client.assistants.update(
- openai_assistant["assistant_id"],
- context={
- "model_name": "openai",
- "system_prompt": "You are an unhelpful assistant!",
- },
- )
- ```
-
-
- ```js
- const openaiAssistantV2 = await client.assistants.update(
- openai_assistant["assistant_id"],
- {
- context: {
- model_name: 'openai',
- system_prompt: 'You are an unhelpful assistant!',
- },
+```javascript JavaScript
+// Update the assistant with a new configuration
+// IMPORTANT: Include ALL configuration fields, not just the ones you're changing
+const openaiAssistantV2 = await client.assistants.update(
+ openAIAssistant["assistant_id"], // Assistant ID (UUID)
+ {
+ context: {
+ model_name: 'openai', // Must include existing fields
+ system_prompt: 'You are an unhelpful assistant!', // New field
},
- );
- ```
-
-
- ```bash
- curl --request PATCH \
- --url /assistants/ \
- --header 'Content-Type: application/json' \
- --data '{
- "context": {"model_name": "openai", "system_prompt": "You are an unhelpful assistant!"}
- }'
- ```
-
-
+ },
+);
-This will create a new version of the assistant with the updated parameters and set this as the active version of your assistant. If you now run your graph and pass in this assistant id, it will use this latest version.
+// This creates version 2 and sets it as the active version
+// Future runs using this assistant_id will use version 2
+```
-### LangSmith UI
+```bash cURL
+curl --request PATCH \
+--url /assistants/ \
+--header 'Content-Type: application/json' \
+--data '{
+"context": {"model_name": "openai", "system_prompt": "You are an unhelpful assistant!"}
+}'
+```
+
-You can also edit assistants from the LangSmith UI.
+The update creates a new version and automatically sets it as active. All future runs using this assistant ID will use the new configuration.
-Inside your deployment, select the "Assistants" tab. This will load a table of all of the assistants in your deployment, across all graphs.
+## Use a previous assistant version
-To edit an existing assistant, select the "Edit" button for the specified assistant. This will open a form where you can edit the assistant's name, description, and configuration.
+Use the `setLatest` method to change which version is active:
-Additionally, if using Studio, you can edit the assistants and create new versions via the "Manage Assistants" button.
+
+```python Python
+# Roll back to version 1 of the assistant
+await client.assistants.set_latest(
+ openai_assistant['assistant_id'], # Assistant ID (UUID)
+ 1 # Version number
+)
-## Use a previous assistant version
+# All future runs using this assistant_id will now use version 1
+```
-### LangGraph SDK
+```javascript JavaScript
+// Roll back to version 1 of the assistant
+await client.assistants.setLatest(
+ openaiAssistant['assistant_id'], // Assistant ID (UUID)
+ 1 // Version number
+);
-You can also change the active version of your assistant. To do so, use the `setLatest` method.
+// All future runs using this assistant_id will now use version 1
+```
-In the example above, to rollback to the first version of the assistant:
+```bash cURL
+curl --request POST \
+--url /assistants//latest \
+--header 'Content-Type: application/json' \
+--data '{
+"version": 1
+}'
+```
+
-
-
- ```python
- await client.assistants.set_latest(openai_assistant['assistant_id'], 1)
- ```
-
-
- ```js
- await client.assistants.setLatest(openaiAssistant['assistant_id'], 1);
- ```
-
-
- ```bash
- curl --request POST \
- --url /assistants//latest \
- --header 'Content-Type: application/json' \
- --data '{
- "version": 1
- }'
- ```
-
-
+After changing the active version, all runs using this assistant ID will use the specified version's configuration.
+
+
+
+
+## Create an assistant
+
+You can create assistants from the [LangSmith UI](https://smith.langchain.com):
+
+1. Navigate to your deployment and select the **Assistants** tab.
+1. Click **+ New assistant**.
+1. In the form that opens:
+ - Select the graph this assistant is for.
+ - Provide a name and description.
+ - Configure the assistant using the configuration schema for that graph.
+1. Click **Create assistant**.
+
+This will take you to [Studio](/langsmith/studio) where you can test the assistant. Return to the **Assistants** tab to see your newly created assistant in the table.
+
+## Use an assistant
-If you now run your graph and pass in this assistant id, it will use the first version of the assistant.
+To use an assistant in the LangSmith UI:
-### LangSmith UI
+1. Navigate to your deployment and select the **Assistants** tab.
+1. Find the assistant you want to use.
+1. Click **Studio** for that assistant.
-If using Studio, to set the active version of your assistant, click the "Manage Assistants" button and locate the assistant you would like to use. Select the assistant and the version, and then click the "Active" toggle. This will update the assistant to make the selected version active.
+This opens [Studio](/langsmith/studio) with the selected assistant. When you submit an input (in **Graph** or **Chat** mode), the assistant's configuration will be applied to the run.
+
+## Create a new version for your assistant
+
+To update an assistant and create a new version from the UI:
+
+**From the Assistants tab:**
+1. Navigate to your deployment and select the **Assistants** tab.
+1. Find the assistant you want to edit.
+1. Click **Edit**.
+1. Modify the assistant's name, description, or configuration.
+1. Save your changes.
+
+**From Studio:**
+1. Open Studio for the assistant.
+1. Click **Manage Assistants**.
+1. Edit the assistant's configuration.
+1. Save your changes.
+
+Either method creates a new version and sets it as the active version.
+
+## Use a previous assistant version
+
+To set a previous version as active from Studio:
+
+1. Open Studio for the assistant.
+2. Click **Manage Assistants**.
+3. Locate the assistant and select the version you want to use.
+4. Toggle the **Active** switch for that version.
+
+This updates the assistant to use the selected version for all future runs.
-**Deleting Assistants**
-Deleting as assistant will delete ALL of its versions. There is currently no way to delete a single version, but by pointing your assistant to the correct version you can skip any versions that you don't wish to use.
+Deleting an assistant will delete **all** of its versions. There is currently no way to delete a single version. To skip a version, simply set a different version as active.
+
+
+
diff --git a/src/langsmith/use-threads.mdx b/src/langsmith/use-threads.mdx
index 8624ae5a8e..6621b8ac9b 100644
--- a/src/langsmith/use-threads.mdx
+++ b/src/langsmith/use-threads.mdx
@@ -2,46 +2,104 @@
title: Use threads
sidebarTitle: Use threads
---
-In this guide, we will show how to create, view, and inspect [threads](/oss/langgraph/persistence#threads).
+
+This guide shows you how to create, view, and inspect _threads_. Threads work with [assistants](/langsmith/assistants) to enable [stateful](/oss/langgraph/persistence) execution of your [deployed graphs](/langsmith/deployments).
+
+## Understand threads
+
+A thread is a persistent conversation container that maintains state across multiple runs. Each time you execute a run on a thread, the graph processes the input with the thread's current state and updates that state with new information.
+
+Threads enable stateful interactions by preserving conversation history and context between runs. Without threads, each run would be stateless, with no memory of previous interactions. Threads are particularly useful for:
+
+- Multi-turn conversations where the assistant needs to remember what was discussed.
+- Long-running tasks that require maintaining context across multiple steps.
+- User-specific state management where each user has their own conversation history.
+
+The diagram illustrates how a thread maintains state across two runs. The second run has access to the messages from the first run, allowing the assistant to understand that the context of "What about tomorrow?" refers to the weather query from the first run:
+
+```mermaid
+sequenceDiagram
+ participant User
+ participant Thread
+ participant Assistant
+ participant Graph
+
+ Note over Thread: Thread ID: abc-123
Persistent conversation
+
+ User->>Thread: Run 1: "What's the weather?"
+ Thread->>Assistant: Use Assistant Config
+ Assistant->>Graph: Execute with context
+ Graph-->>Thread: Update State
{messages: [user_msg, ai_response]}
+ Thread-->>User: Response
+
+ Note over Thread: State persisted ✓
+
+ User->>Thread: Run 2: "What about tomorrow?"
+ Note over Thread: Previous messages
still in state
+ Thread->>Assistant: Use Assistant Config
+ Assistant->>Graph: Execute with full history
+ Graph-->>Thread: Update State
{messages: [...prev, new_msgs]}
+ Thread-->>User: Response with context
+```
+
+- A thread maintains a persistent conversation with a unique thread ID.
+- Each run applies the assistant's configuration to the graph execution.
+- State is updated after each run and persists for subsequent runs.
+- Later runs have access to the full conversation history.
+
+
+- **[Assistants](/langsmith/assistants)** define the configuration (model, prompts, tools) for how your graph executes. When creating a run, you can specify either a **graph name** (e.g., `"agent"`) to use the default assistant, or an **assistant ID** (UUID) to use a specific configuration.
+- **Threads** maintain the state and conversation history.
+- **Runs** combine an assistant and thread to execute your graph with a specific configuration and state.
+
## Create a thread
-To run your graph and the state persisted, you must first create a thread.
+To run your graph with state persistence, you must first create a thread:
+
+
+
### Empty thread
-To create a new thread, use the [LangGraph SDK](/langsmith/sdk) `create` method. See the @[Python][ThreadsClient.create] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.ThreadsClient.html#create) SDK reference docs for more information.
+To create a new thread, use one of:
-
-
- ```python
- from langgraph_sdk import get_client
-
- client = get_client(url=)
- thread = await client.threads.create()
-
- print(thread)
- ```
-
-
- ```js
- import { Client } from "@langchain/langgraph-sdk";
-
- const client = new Client({ apiUrl: });
- const thread = await client.threads.create();
-
- console.log(thread);
- ```
-
-
- ```bash
- curl --request POST \
- --url /threads \
- --header 'Content-Type: application/json' \
- --data '{}'
- ```
-
-
+
+```python Python
+from langgraph_sdk import get_client
+
+# Initialize the client with your deployment URL
+client = get_client(url=)
+
+# Create an empty thread
+# This creates a new thread with no initial state
+thread = await client.threads.create()
+
+print(thread)
+```
+
+```javascript JavaScript
+import { Client } from "@langchain/langgraph-sdk";
+
+// Initialize the client with your deployment URL
+const client = new Client({ apiUrl: });
+
+// Create an empty thread
+// This creates a new thread with no initial state
+const thread = await client.threads.create();
+
+console.log(thread);
+```
+
+```bash cURL
+curl --request POST \
+ --url /threads \
+ --header 'Content-Type: application/json' \
+ --data '{}'
+```
+
+
+For more information, refer to the @[Python][ThreadsClient.create] and @[JS][ThreadsClient.create] SDK docs, or the @[REST API][ThreadsAPI.create] reference.
Output:
@@ -58,144 +116,157 @@ Output:
### Copy thread
-Alternatively, if you already have a thread in your application whose state you wish to copy, you can use the `copy` method. This will create an independent thread whose history is identical to the original thread at the time of the operation. See the @[Python][ThreadsClient.copy] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.ThreadsClient.html#copy) SDK reference docs for more information.
+Alternatively, if you already have a thread in your application whose state you wish to copy, you can use the `copy` method. This will create an independent thread whose history is identical to the original thread at the time of the operation:
-
-
- ```python
- copied_thread = await client.threads.copy()
- ```
-
-
- ```js
- const copiedThread = await client.threads.copy();
- ```
-
-
- ```bash
- curl --request POST --url /threads//copy \
- --header 'Content-Type: application/json'
- ```
-
-
+
+```python Python
+# Copy an existing thread
+# The new thread will have the same state as the original at the time of copying
+copied_thread = await client.threads.copy()
+```
+
+```javascript JavaScript
+// Copy an existing thread
+// The new thread will have the same state as the original at the time of copying
+const copiedThread = await client.threads.copy();
+```
+
+```bash cURL
+curl --request POST --url /threads//copy \
+--header 'Content-Type: application/json'
+```
+
+
+For more information, refer to the @[Python][ThreadsClient.copy] and @[JS][ThreadsClient.copy] SDK docs, or the @[REST API][ThreadsAPI.copy] reference.
### Prepopulated State
-Finally, you can create a thread with an arbitrary pre-defined state by providing a list of `supersteps` into the `create` method. The `supersteps` describe a list of a sequence of state updates. For example:
+You can create a thread with an arbitrary pre-defined state by providing a list of `supersteps` into the `create` method. The `supersteps` describe a sequence of state updates that establish the initial state of the thread. This is useful when you want to:
-
-
- ```python
- from langgraph_sdk import get_client
-
- client = get_client(url=)
- thread = await client.threads.create(
- graph_id="agent",
- supersteps=[
+- Create a thread with existing conversation history.
+- Migrate conversations from another system.
+- Set up test scenarios with specific initial states.
+- Resume conversations from a previous session.
+
+For more information on checkpoints and state management, refer to the [LangGraph persistence documentation](/oss/langgraph/persistence).
+
+
+```python Python
+from langgraph_sdk import get_client
+
+# Initialize the client
+client = get_client(url=)
+
+# Create a thread with pre-populated conversation history
+# The supersteps define a sequence of state updates that build up the initial state
+thread = await client.threads.create(
+ graph_id="agent", # Specify which graph this thread is for
+ supersteps=[
+ {
+ updates: [
{
- updates: [
- {
- values: {},
- as_node: '__input__',
- },
- ],
+ values: {},
+ as_node: '__input__', # Initial input node
},
+ ],
+ },
+ {
+ updates: [
{
- updates: [
- {
- values: {
- messages: [
- {
- type: 'human',
- content: 'hello',
- },
- ],
+ values: {
+ messages: [
+ {
+ type: 'human',
+ content: 'hello',
},
- as_node: '__start__',
- },
- ],
+ ],
+ },
+ as_node: '__start__', # User's first message
},
+ ],
+ },
+ {
+ updates: [
{
- updates: [
- {
- values: {
- messages: [
- {
- content: 'Hello! How can I assist you today?',
- type: 'ai',
- },
- ],
+ values: {
+ messages: [
+ {
+ content: 'Hello! How can I assist you today?',
+ type: 'ai',
},
- as_node: 'call_model',
- },
- ],
+ ],
+ },
+ as_node: 'call_model', # Assistant's response
},
- ])
-
- print(thread)
- ```
-
-
- ```js
- import { Client } from "@langchain/langgraph-sdk";
-
- const client = new Client({ apiUrl: });
- const thread = await client.threads.create({
- graphId: 'agent',
- supersteps: [
+ ],
+ },
+ ])
+
+print(thread)
+```
+
+```javascript JavaScript
+import { Client } from "@langchain/langgraph-sdk";
+
+// Initialize the client
+const client = new Client({ apiUrl: });
+
+// Create a thread with pre-populated conversation history
+// The supersteps define a sequence of state updates that build up the initial state
+const thread = await client.threads.create({
+ graphId: 'agent', // Specify which graph this thread is for
+ supersteps: [
+ {
+ updates: [
{
- updates: [
- {
- values: {},
- asNode: '__input__',
- },
- ],
+ values: {},
+ asNode: '__input__', // Initial input node
},
+ ],
+ },
+ {
+ updates: [
{
- updates: [
- {
- values: {
- messages: [
- {
- type: 'human',
- content: 'hello',
- },
- ],
+ values: {
+ messages: [
+ {
+ type: 'human',
+ content: 'hello',
},
- asNode: '__start__',
- },
- ],
+ ],
+ },
+ asNode: '__start__', // User's first message
},
+ ],
+ },
+ {
+ updates: [
{
- updates: [
- {
- values: {
- messages: [
- {
- content: 'Hello! How can I assist you today?',
- type: 'ai',
- },
- ],
+ values: {
+ messages: [
+ {
+ content: 'Hello! How can I assist you today?',
+ type: 'ai',
},
- asNode: 'call_model',
- },
- ],
+ ],
+ },
+ asNode: 'call_model', // Assistant's response
},
],
- });
-
- console.log(thread);
- ```
-
-
- ```bash
- curl --request POST \
- --url /threads \
- --header 'Content-Type: application/json' \
- --data '{"metadata":{"graph_id":"agent"},"supersteps":[{"updates":[{"values":{},"as_node":"__input__"}]},{"updates":[{"values":{"messages":[{"type":"human","content":"hello"}]},"as_node":"__start__"}]},{"updates":[{"values":{"messages":[{"content":"Hello\u0021 How can I assist you today?","type":"ai"}]},"as_node":"call_model"}]}]}'
- ```
-
-
+ },
+ ],
+});
+
+console.log(thread);
+```
+
+```bash cURL
+curl --request POST \
+ --url /threads \
+ --header 'Content-Type: application/json' \
+ --data '{"metadata":{"graph_id":"agent"},"supersteps":[{"updates":[{"values":{},"as_node":"__input__"}]},{"updates":[{"values":{"messages":[{"type":"human","content":"hello"}]},"as_node":"__start__"}]},{"updates":[{"values":{"messages":[{"content":"Hello\u0021 How can I assist you today?","type":"ai"}]},"as_node":"call_model"}]}]}'
+```
+
Output:
@@ -235,36 +306,55 @@ Output:
}
```
-## List threads
+
+
+
+You can also create threads directly from the [LangSmith UI](https://smith.langchain.com):
-### LangGraph SDK
+1. Navigate to your [deployment](/langsmith/deployments).
+2. Select the **Threads** tab.
+3. Click **+ New thread**.
+4. Optionally provide metadata or initial state for the thread.
+5. Click **Create thread**.
-To list threads, use the [LangGraph SDK](/langsmith/sdk) `search` method. This will list the threads in the application that match the provided filters. See the @[Python][ThreadsClient.search] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.ThreadsClient.html#search) SDK reference docs for more information.
+The newly created thread will appear in the threads table and can be used for runs immediately.
-#### Filter by thread status
+
+
-Use the `status` field to filter threads based on their status. Supported values are `idle`, `busy`, `interrupted`, and `error`. See @[here][ThreadStatus] for information on each status. For example, to view `idle` threads:
+## List threads
-
- ```python
- print(await client.threads.search(status="idle",limit=1))
- ```
-
-
- ```js
- console.log(await client.threads.search({ status: "idle", limit: 1 }));
- ```
-
-
- ```bash
- curl --request POST \
- --url /threads/search \
- --header 'Content-Type: application/json' \
- --data '{"status": "idle", "limit": 1}'
- ```
-
-
+
+
+To list threads, use the `search` method. This will list the threads in the application that match the provided filters:
+
+### Filter by thread status
+
+Use the `status` field to filter threads based on their status. Supported values are `idle`, `busy`, `interrupted`, and `error`. For example, to view `idle` threads:
+
+
+```python Python
+# Search for idle threads
+# The status filter accepts: idle, busy, interrupted, error
+print(await client.threads.search(status="idle", limit=1))
+```
+
+```javascript JavaScript
+// Search for idle threads
+// The status filter accepts: idle, busy, interrupted, error
+console.log(await client.threads.search({ status: "idle", limit: 1 }));
+```
+
+```bash cURL
+curl --request POST \
+--url /threads/search \
+--header 'Content-Type: application/json' \
+--data '{"status": "idle", "limit": 1}'
+```
+
+
+For more information, refer to the @[Python][ThreadsClient.search] and @[JS][ThreadsClient.search] SDK docs, or the @[REST API][ThreadsAPI.search] reference.
Output:
@@ -281,30 +371,30 @@ Output:
]
```
-#### Filter by metadata
+### Filter by metadata
-The `search` method allows you to filter on metadata:
+The `search` method allows you to filter on metadata. This is useful for finding threads associated with specific graphs, users, or custom metadata you've added to threads:
-
-
- ```python
- print((await client.threads.search(metadata={"graph_id":"agent"},limit=1)))
- ```
-
-
- ```js
- console.log((await client.threads.search({ metadata: { "graph_id": "agent" }, limit: 1 })));
- ```
-
-
- ```bash
- curl --request POST \
- --url /threads/search \
- --header 'Content-Type: application/json' \
- --data '{"metadata": {"graph_id":"agent"}, "limit": 1}'
- ```
-
-
+
+```python Python
+# Search for threads with specific metadata
+# Metadata filtering is useful for organizing threads by graph, user, or custom tags
+print((await client.threads.search(metadata={"graph_id":"agent"}, limit=1)))
+```
+
+```javascript JavaScript
+// Search for threads with specific metadata
+// Metadata filtering is useful for organizing threads by graph, user, or custom tags
+console.log((await client.threads.search({ metadata: { "graph_id": "agent" }, limit: 1 })));
+```
+
+```bash cURL
+curl --request POST \
+--url /threads/search \
+--header 'Content-Type: application/json' \
+--data '{"metadata": {"graph_id":"agent"}, "limit": 1}'
+```
+
Output:
@@ -321,45 +411,55 @@ Output:
]
```
-#### Sorting
+### Sorting
-The SDK also supports sorting threads by `thread_id`, `status`, `created_at`, and `updated_at` using the `sort_by` and `sort_order` params.
+The SDK also supports sorting threads by `thread_id`, `status`, `created_at`, and `updated_at` using the `sort_by` and `sort_order` parameters.
-### LangSmith UI
+
+
-You can also view threads in a deployment via the LangSmith UI.
+You can also view and manage threads in a deployment via the [LangSmith UI](https://smith.langchain.com):
-Inside your deployment, select the "Threads" tab. This will load a table of all of the threads in your deployment.
+1. Navigate to your [deployment](/langsmith/deployments).
+2. Select the **Threads** tab.
-To filter by thread status, select a status in the top bar. To sort by a supported property, click on the arrow icon for the desired column.
+This will load a table of all threads in your deployment.
-## Inspect threads
+**Filter by thread status:** Select a status in the top bar to filter threads by `idle`, `busy`, `interrupted`, or `error`.
-### LangGraph SDK
+**Sort threads:** Click on the arrow icon for any column header to sort by that property (`thread_id`, `status`, `created_at`, or `updated_at`).
-#### Get Thread
+
+
-To view a specific thread given its `thread_id`, use the `get` method:
+## Inspect threads
-
- ```python
- print((await client.threads.get()))
- ```
-
-
- ```js
- console.log((await client.threads.get()));
- ```
-
-
- ```bash
- curl --request GET \
- --url /threads/ \
- --header 'Content-Type: application/json'
- ```
-
-
+
+
+### Get Thread
+
+To view a specific thread given its `thread_id`, use the @[`get`][ThreadsClient.get] method:
+
+
+```python Python
+# Retrieve a specific thread by its ID
+# Returns the thread metadata including status, creation time, and metadata
+print((await client.threads.get()))
+```
+
+```javascript JavaScript
+// Retrieve a specific thread by its ID
+// Returns the thread metadata including status, creation time, and metadata
+console.log((await client.threads.get()));
+```
+
+```bash cURL
+curl --request GET \
+--url /threads/ \
+--header 'Content-Type: application/json'
+```
+
Output:
@@ -374,29 +474,31 @@ Output:
}
```
-#### Inspect thread state
+For more information, refer to the @[Python][ThreadsClient.get] and @[JS][ThreadsClient.get] SDK docs, or the @[REST API][ThreadsAPI.get] reference.
-To view the current state of a given thread, use the `get_state` method:
+### Inspect thread state
-
-
- ```python
- print((await client.threads.get_state()))
- ```
-
-
- ```js
- console.log((await client.threads.getState()));
- ```
-
-
- ```bash
- curl --request GET \
- --url /threads//state \
- --header 'Content-Type: application/json'
- ```
-
-
+To view the current state of a given thread, use the @[`get_state`][ThreadsClient.get_state] method. This returns the current values, next nodes to execute, and checkpoint information:
+
+
+```python Python
+# Get the current state of a thread
+# Returns values, next nodes, tasks, checkpoint info, and metadata
+print((await client.threads.get_state()))
+```
+
+```javascript JavaScript
+// Get the current state of a thread
+// Returns values, next nodes, tasks, checkpoint info, and metadata
+console.log((await client.threads.getState()));
+```
+
+```bash cURL
+curl --request GET \
+--url /threads//state \
+--header 'Content-Type: application/json'
+```
+
Output:
@@ -463,39 +565,93 @@ Output:
}
```
-Optionally, to view the state of a thread at a given checkpoint, simply pass in the checkpoint id (or the entire checkpoint object):
+For more information, refer to the @[Python][ThreadsClient.get_state] and @[JS][ThreadsClient.get_state] SDK docs, or the @[REST API][ThreadsAPI.get_state] reference.
-
-
- ```python
- thread_state = await client.threads.get_state(
- thread_id=
- checkpoint_id=
- )
- ```
-
-
- ```js
- const threadState = await client.threads.getState(, );
- ```
-
-
- ```bash
- curl --request GET \
- --url /threads//state/ \
- --header 'Content-Type: application/json'
- ```
-
-
+Optionally, to view the state of a thread at a given checkpoint, pass in the checkpoint ID. This is useful for inspecting the thread state at a specific point in its execution history:
-#### Inspect full thread history
+
+```python Python
+# Get thread state at a specific checkpoint
+# Useful for inspecting historical state or debugging
+thread_state = await client.threads.get_state(
+ thread_id=,
+ checkpoint_id=
+)
+```
-To view a thread's history, use the `get_history` method. This returns a list of every state the thread experienced. For more information see the @[Python][ThreadsClient.get_history] and [JS](https://reference.langchain.com/javascript/classes/_langchain_langgraph-sdk.client.ThreadsClient.html#gethistory) reference docs.
+```javascript JavaScript
+// Get thread state at a specific checkpoint
+// Useful for inspecting historical state or debugging
+const threadState = await client.threads.getState(, );
+```
-### LangSmith UI
+```bash cURL
+curl --request GET \
+--url /threads//state/ \
+--header 'Content-Type: application/json'
+```
+
-You can also view threads in a deployment via the LangSmith UI.
+### Inspect full thread history
-Inside your deployment, select the "Threads" tab. This will load a table of all of the threads in your deployment.
+To view a thread's history, use the @[`get_history`][ThreadsClient.get_history] method. This returns a list of every state the thread experienced, allowing you to trace the full execution path:
-Select a thread to inspect its current state. To view its full history and for further debugging, open the thread in [Studio](/langsmith/studio).
+
+```python Python
+# Get the full history of a thread
+# Returns a list of all state snapshots from the thread's execution
+history = await client.threads.get_history(
+ thread_id=,
+ limit=10 # Optional: limit the number of states returned
+)
+
+for state in history:
+ print(f"Checkpoint: {state['checkpoint_id']}")
+ print(f"Step: {state['metadata']['step']}")
+```
+
+```javascript JavaScript
+// Get the full history of a thread
+// Returns a list of all state snapshots from the thread's execution
+const history = await client.threads.getHistory(
+ ,
+ {
+ limit: 10 // Optional: limit the number of states returned
+ }
+);
+
+for (const state of history) {
+ console.log(`Checkpoint: ${state.checkpoint_id}`);
+ console.log(`Step: ${state.metadata.step}`);
+}
+```
+
+```bash cURL
+curl --request POST \
+--url /threads//history \
+--header 'Content-Type: application/json' \
+--data '{"limit": 10}'
+```
+
+
+This method is particularly useful for:
+- Debugging execution flow by seeing how state evolved.
+- Understanding decision points in your graph's execution.
+- Auditing conversation history and state changes.
+- Replaying or analyzing past interactions.
+
+For more information, refer to the @[Python][ThreadsClient.get_history] and @[JS][ThreadsClient.get_history] SDK docs, or the @[REST API][ThreadsAPI.get_history] reference.
+
+
+
+
+You can also view and inspect threads in the [LangSmith UI](https://smith.langchain.com):
+
+1. Navigate to your [deployment](/langsmith/deployments).
+2. Select the **Threads** tab to view all threads.
+3. Click on a thread to inspect its current state.
+
+To view the full thread history and perform detailed debugging, click **Open in Studio** to open the thread in [Studio](/langsmith/studio). Studio provides a visual interface for exploring the thread's execution history, state changes, and checkpoint details.
+
+
+