diff --git a/.github/workflows/dojo-e2e.yml b/.github/workflows/dojo-e2e.yml index 2030f53b7..e1b2e7847 100644 --- a/.github/workflows/dojo-e2e.yml +++ b/.github/workflows/dojo-e2e.yml @@ -26,10 +26,14 @@ jobs: test_path: tests/crewAITests services: ["dojo","crew-ai"] wait_on: http://localhost:9999,tcp:localhost:8003 - - suite: langgraph - test_path: tests/langgraphTests - services: ["dojo","langgraph-platform-python","langgraph-platform-typescript"] - wait_on: http://localhost:9999,tcp:localhost:8005,tcp:localhost:8006 + - suite: langgraph-python + test_path: tests/langgraphPythonTests + services: ["dojo","langgraph-platform-python"] + wait_on: http://localhost:9999,tcp:localhost:8005 + - suite: langgraph-typescript + test_path: tests/langgraphTypescriptTests + services: ["dojo","langgraph-platform-typescript"] + wait_on: http://localhost:9999,tcp:localhost:8006 - suite: langgraph-fastapi test_path: tests/langgraphFastAPITests services: ["dojo","langgraph-fastapi"] diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/agenticChatPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/agenticChatPage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/agenticChatPage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/agenticChatPage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/agenticGenUI.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/agenticGenUI.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/agenticGenUI.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/agenticGenUI.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/humanInTheLoopPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/humanInTheLoopPage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/humanInTheLoopPage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/humanInTheLoopPage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/predictiveStateUpdatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/predictiveStateUpdatePage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/predictiveStateUpdatePage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/predictiveStateUpdatePage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/sharedStatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/sharedStatePage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/sharedStatePage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/sharedStatePage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/subgraphsPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/subgraphsPage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/subgraphsPage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/subgraphsPage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTests/toolBasedGenUIPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/toolBasedGenUIPage.spec.ts similarity index 100% rename from typescript-sdk/apps/dojo/e2e/tests/langgraphTests/toolBasedGenUIPage.spec.ts rename to typescript-sdk/apps/dojo/e2e/tests/langgraphPythonTests/toolBasedGenUIPage.spec.ts diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticChatPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticChatPage.spec.ts new file mode 100644 index 000000000..9ec869deb --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticChatPage.spec.ts @@ -0,0 +1,114 @@ +import { + test, + expect, + waitForAIResponse, + retryOnAIFailure, +} from "../../test-isolation-helper"; +import { AgenticChatPage } from "../../featurePages/AgenticChatPage"; + +test("[LangGraph] Agentic Chat sends and receives a message", async ({ + page, +}) => { + await retryOnAIFailure(async () => { + await page.goto( + "/langgraph-typescript/feature/agentic_chat" + ); + + const chat = new AgenticChatPage(page); + + await chat.openChat(); + await chat.agentGreeting.isVisible; + await chat.sendMessage("Hi, I am duaa"); + + await waitForAIResponse(page); + await chat.assertUserMessageVisible("Hi, I am duaa"); + await chat.assertAgentReplyVisible(/Hello/i); + }); +}); + +test("[LangGraph] Agentic Chat changes background on message and reset", async ({ + page, +}) => { + await retryOnAIFailure(async () => { + await page.goto( + "/langgraph-typescript/feature/agentic_chat" + ); + + const chat = new AgenticChatPage(page); + + await chat.openChat(); + await chat.agentGreeting.waitFor({ state: "visible" }); + + // Store initial background color + const initialBackground = await chat.getBackground(); + console.log("Initial background color:", initialBackground); + + // 1. Send message to change background to blue + await chat.sendMessage("Hi change the background color to blue"); + await chat.assertUserMessageVisible( + "Hi change the background color to blue" + ); + await waitForAIResponse(page); + + const backgroundBlue = await chat.getBackground(); + expect(backgroundBlue).not.toBe(initialBackground); + // Check if background is blue (string color name or contains blue) + expect(backgroundBlue.toLowerCase()).toMatch(/blue|rgb\(.*,.*,.*\)|#[0-9a-f]{6}/); + + // 2. Change to pink + await chat.sendMessage("Hi change the background color to pink"); + await chat.assertUserMessageVisible( + "Hi change the background color to pink" + ); + await waitForAIResponse(page); + + const backgroundPink = await chat.getBackground(); + expect(backgroundPink).not.toBe(backgroundBlue); + // Check if background is pink (string color name or contains pink) + expect(backgroundPink.toLowerCase()).toMatch(/pink|rgb\(.*,.*,.*\)|#[0-9a-f]{6}/); + }); +}); + +test("[LangGraph] Agentic Chat retains memory of user messages during a conversation", async ({ + page, +}) => { + await retryOnAIFailure(async () => { + await page.goto( + "/langgraph-typescript/feature/agentic_chat" + ); + + const chat = new AgenticChatPage(page); + await chat.openChat(); + await chat.agentGreeting.click(); + + await chat.sendMessage("Hey there"); + await chat.assertUserMessageVisible("Hey there"); + await waitForAIResponse(page); + await chat.assertAgentReplyVisible(/how can I assist you/i); + + const favFruit = "Mango"; + await chat.sendMessage(`My favorite fruit is ${favFruit}`); + await chat.assertUserMessageVisible(`My favorite fruit is ${favFruit}`); + await waitForAIResponse(page); + await chat.assertAgentReplyVisible(new RegExp(favFruit, "i")); + + await chat.sendMessage("and I love listening to Kaavish"); + await chat.assertUserMessageVisible("and I love listening to Kaavish"); + await waitForAIResponse(page); + await chat.assertAgentReplyVisible(/Kaavish/i); + + await chat.sendMessage("tell me an interesting fact about Moon"); + await chat.assertUserMessageVisible( + "tell me an interesting fact about Moon" + ); + await waitForAIResponse(page); + await chat.assertAgentReplyVisible(/Moon/i); + + await chat.sendMessage("Can you remind me what my favorite fruit is?"); + await chat.assertUserMessageVisible( + "Can you remind me what my favorite fruit is?" + ); + await waitForAIResponse(page); + await chat.assertAgentReplyVisible(new RegExp(favFruit, "i")); + }); +}); diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticGenUI.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticGenUI.spec.ts new file mode 100644 index 000000000..555174aba --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/agenticGenUI.spec.ts @@ -0,0 +1,69 @@ +import { test, expect } from "@playwright/test"; +import { AgenticGenUIPage } from "../../pages/langGraphPages/AgenticUIGenPage"; + +test.describe("Agent Generative UI Feature", () => { + test("[LangGraph] should interact with the chat to get a planner on prompt", async ({ + page, + }) => { + const genUIAgent = new AgenticGenUIPage(page); + + await page.goto( + "/langgraph-typescript/feature/agentic_generative_ui" + ); + + await genUIAgent.openChat(); + await genUIAgent.sendMessage("Hi"); + await genUIAgent.sendButton.click(); + await genUIAgent.assertAgentReplyVisible(/Hello/); + + await genUIAgent.sendMessage("Give me a plan to make brownies"); + await genUIAgent.sendButton.click(); + + await expect(genUIAgent.agentPlannerContainer).toBeVisible({ timeout: 15000 }); + + await genUIAgent.plan(); + + await page.waitForFunction( + () => { + const messages = Array.from(document.querySelectorAll('.copilotKitAssistantMessage')); + const lastMessage = messages[messages.length - 1]; + const content = lastMessage?.textContent?.trim() || ''; + + return messages.length >= 3 && content.length > 0; + }, + { timeout: 30000 } + ); + }); + + test("[LangGraph] should interact with the chat using predefined prompts and perform steps", async ({ + page, + }) => { + const genUIAgent = new AgenticGenUIPage(page); + + await page.goto( + "/langgraph-typescript/feature/agentic_generative_ui" + ); + + await genUIAgent.openChat(); + await genUIAgent.sendMessage("Hi"); + await genUIAgent.sendButton.click(); + await genUIAgent.assertAgentReplyVisible(/Hello/); + + await genUIAgent.sendMessage("Go to Mars"); + await genUIAgent.sendButton.click(); + + await expect(genUIAgent.agentPlannerContainer).toBeVisible({ timeout: 15000 }); + await genUIAgent.plan(); + + await page.waitForFunction( + () => { + const messages = Array.from(document.querySelectorAll('.copilotKitAssistantMessage')); + const lastMessage = messages[messages.length - 1]; + const content = lastMessage?.textContent?.trim() || ''; + + return messages.length >= 3 && content.length > 0; + }, + { timeout: 30000 } + ); + }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/humanInTheLoopPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/humanInTheLoopPage.spec.ts new file mode 100644 index 000000000..5c1e2f756 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/humanInTheLoopPage.spec.ts @@ -0,0 +1,91 @@ +import { test, expect, waitForAIResponse, retryOnAIFailure } from "../../test-isolation-helper"; +import { HumanInLoopPage } from "../../pages/langGraphPages/HumanInLoopPage"; + +test.describe("Human in the Loop Feature", () => { + test("[LangGraph] should interact with the chat and perform steps", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const humanInLoop = new HumanInLoopPage(page); + + await page.goto( + "/langgraph-typescript/feature/human_in_the_loop" + ); + + await humanInLoop.openChat(); + + await humanInLoop.sendMessage("Hi"); + await humanInLoop.agentGreeting.isVisible(); + + await humanInLoop.sendMessage( + "Give me a plan to make brownies, there should be only one step with eggs and one step with oven, this is a strict requirement so adhere" + ); + await waitForAIResponse(page); + await expect(humanInLoop.plan).toBeVisible({ timeout: 10000 }); + + const itemText = "eggs"; + await page.waitForTimeout(5000); + await humanInLoop.uncheckItem(itemText); + await humanInLoop.performSteps(); + await page.waitForFunction( + () => { + const messages = Array.from(document.querySelectorAll('.copilotKitAssistantMessage')); + const lastMessage = messages[messages.length - 1]; + const content = lastMessage?.textContent?.trim() || ''; + + return messages.length >= 3 && content.length > 0; + }, + { timeout: 30000 } + ); + + await humanInLoop.sendMessage( + `Does the planner include ${itemText}? ⚠️ Reply with only words 'Yes' or 'No' (no explanation, no punctuation).` + ); + await waitForAIResponse(page); + }); + }); + + test("should interact with the chat using predefined prompts and perform steps", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const humanInLoop = new HumanInLoopPage(page); + await page.goto( + "/langgraph-typescript/feature/human_in_the_loop" + ); + + await humanInLoop.openChat(); + + await humanInLoop.sendMessage("Hi"); + await humanInLoop.agentGreeting.isVisible(); + + await humanInLoop.sendMessage( + "Plan a mission to Mars with the first step being Start The Planning" + ); + await waitForAIResponse(page); + await expect(humanInLoop.plan).toBeVisible({ timeout: 10000 }); + + const uncheckedItem = "Start The Planning"; + + await page.waitForTimeout(5000); + await humanInLoop.uncheckItem(uncheckedItem); + await humanInLoop.performSteps(); + + await page.waitForFunction( + () => { + const messages = Array.from(document.querySelectorAll('.copilotKitAssistantMessage')); + const lastMessage = messages[messages.length - 1]; + const content = lastMessage?.textContent?.trim() || ''; + + return messages.length >= 3 && content.length > 0; + }, + { timeout: 30000 } + ); + + await humanInLoop.sendMessage( + `Does the planner include ${uncheckedItem}? ⚠️ Reply with only words 'Yes' or 'No' (no explanation, no punctuation).` + ); + await waitForAIResponse(page); + }); + }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/predictiveStateUpdatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/predictiveStateUpdatePage.spec.ts new file mode 100644 index 000000000..9a7f2dea2 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/predictiveStateUpdatePage.spec.ts @@ -0,0 +1,91 @@ +import { + test, + expect, + retryOnAIFailure, +} from "../../test-isolation-helper"; +import { PredictiveStateUpdatesPage } from "../../pages/langGraphPages/PredictiveStateUpdatesPage"; + +test.fixme("Predictive Status Updates Feature", () => { + test("[LangGraph] should interact with agent and approve asked changes", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const predictiveStateUpdates = new PredictiveStateUpdatesPage(page); + + await page.goto( + "/langgraph-typescript/feature/predictive_state_updates" + ); + + await predictiveStateUpdates.openChat(); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.sendMessage( + "Give me a story for a dragon called Atlantis in document" + ); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.getPredictiveResponse(); + await predictiveStateUpdates.getUserApproval(); + await predictiveStateUpdates.confirmedChangesResponse.isVisible(); + const dragonName = await predictiveStateUpdates.verifyAgentResponse( + "Atlantis" + ); + expect(dragonName).not.toBeNull(); + + await page.waitForTimeout(3000); + + await predictiveStateUpdates.sendMessage("Change dragon name to Lola"); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.verifyHighlightedText(); + await predictiveStateUpdates.getUserApproval(); + await predictiveStateUpdates.confirmedChangesResponse.isVisible(); + const dragonNameNew = await predictiveStateUpdates.verifyAgentResponse( + "Lola" + ); + expect(dragonNameNew).not.toBe(dragonName); + }); + }); + + test.fixme("[LangGraph] should interact with agent and reject asked changes", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const predictiveStateUpdates = new PredictiveStateUpdatesPage(page); + + await page.goto( + "/langgraph-typescript/feature/predictive_state_updates" + ); + + await predictiveStateUpdates.openChat(); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.sendMessage( + "Give me a story for a dragon called Atlantis in document" + ); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.getPredictiveResponse(); + await predictiveStateUpdates.getUserApproval(); + await predictiveStateUpdates.confirmedChangesResponse.isVisible(); + const dragonName = await predictiveStateUpdates.verifyAgentResponse( + "Atlantis" + ); + expect(dragonName).not.toBeNull(); + + await page.waitForTimeout(3000); + + await predictiveStateUpdates.sendMessage("Change dragon name to Lola"); + await page.waitForTimeout(2000); + + await predictiveStateUpdates.verifyHighlightedText(); + await predictiveStateUpdates.getUserRejection(); + await predictiveStateUpdates.rejectedChangesResponse.isVisible(); + const dragonNameAfterRejection = await predictiveStateUpdates.verifyAgentResponse( + "Atlantis" + ); + expect(dragonNameAfterRejection).toBe(dragonName); + expect(dragonNameAfterRejection).not.toBe("Lola"); + }); + }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/sharedStatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/sharedStatePage.spec.ts new file mode 100644 index 000000000..873ef3871 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/sharedStatePage.spec.ts @@ -0,0 +1,56 @@ +import { test, expect } from "@playwright/test"; +import { SharedStatePage } from "../../featurePages/SharedStatePage"; + +test.describe("Shared State Feature", () => { + test("[LangGraph] should interact with the chat to get a recipe on prompt", async ({ + page, + }) => { + const sharedStateAgent = new SharedStatePage(page); + + // Update URL to new domain + await page.goto( + "/langgraph-typescript/feature/shared_state" + ); + + await sharedStateAgent.openChat(); + await sharedStateAgent.sendMessage('Please give me a pasta recipe of your choosing, but one of the ingredients should be "Pasta"'); + await sharedStateAgent.loader(); + await sharedStateAgent.awaitIngredientCard('Pasta'); + await sharedStateAgent.getInstructionItems( + sharedStateAgent.instructionsContainer + ); + }); + + test("[LangGraph] should share state between UI and chat", async ({ + page, + }) => { + const sharedStateAgent = new SharedStatePage(page); + + await page.goto( + "/langgraph-typescript/feature/shared_state" + ); + + await sharedStateAgent.openChat(); + + // Add new ingredient via UI + await sharedStateAgent.addIngredient.click(); + + // Fill in the new ingredient details + const newIngredientCard = page.locator('.ingredient-card').last(); + await newIngredientCard.locator('.ingredient-name-input').fill('Potatoes'); + await newIngredientCard.locator('.ingredient-amount-input').fill('12'); + + // Wait for UI to update + await page.waitForTimeout(1000); + + // Ask chat for all ingredients + await sharedStateAgent.sendMessage("Give me all the ingredients"); + await sharedStateAgent.loader(); + + // Verify chat response includes both existing and new ingredients + await expect(sharedStateAgent.agentMessage.getByText(/Potatoes/)).toBeVisible(); + await expect(sharedStateAgent.agentMessage.getByText(/12/)).toBeVisible(); + await expect(sharedStateAgent.agentMessage.getByText(/Carrots/)).toBeVisible(); + await expect(sharedStateAgent.agentMessage.getByText(/All-Purpose Flour/)).toBeVisible(); + }); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/subgraphsPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/subgraphsPage.spec.ts new file mode 100644 index 000000000..12a9a3007 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/subgraphsPage.spec.ts @@ -0,0 +1,148 @@ +import { test, expect, waitForAIResponse, retryOnAIFailure } from "../../test-isolation-helper"; +import { SubgraphsPage } from "../../pages/langGraphPages/SubgraphsPage"; + +test.describe("Subgraphs Travel Agent Feature", () => { + test("[LangGraph] should complete full travel planning flow with feature validation", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const subgraphsPage = new SubgraphsPage(page); + + await page.goto("/langgraph-typescript/feature/subgraphs"); + + await subgraphsPage.openChat(); + + // Initiate travel planning + await subgraphsPage.sendMessage("Help me plan a trip to San Francisco"); + await waitForAIResponse(page); + + // FEATURE TEST: Wait for supervisor coordination + await subgraphsPage.waitForSupervisorCoordination(); + await expect(subgraphsPage.supervisorIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Supervisor indicator not found, verifying through content"); + }); + + // FEATURE TEST: Flights Agent - verify agent indicator becomes active + await subgraphsPage.waitForFlightsAgent(); + await expect(subgraphsPage.flightsAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Flights agent indicator not found, checking content instead"); + }); + + await subgraphsPage.verifyStaticFlightData(); + + // FEATURE TEST: Test interrupt pause behavior - flow shouldn't auto-proceed + await page.waitForTimeout(3000); + // await expect(page.getByText(/hotel.*options|accommodation|Zephyr|Ritz-Carlton|Hotel Zoe/i)).not.toBeVisible(); + + // Select KLM flight through interrupt + await subgraphsPage.selectFlight('KLM'); + + // FEATURE TEST: Verify immediate state update after selection + await expect(subgraphsPage.selectedFlight).toContainText('KLM').catch(async () => { + await expect(page.getByText(/KLM/i)).toBeVisible({ timeout: 2000 }); + }); + + await waitForAIResponse(page); + + // FEATURE TEST: Hotels Agent - verify agent indicator switches + await subgraphsPage.waitForHotelsAgent(); + await expect(subgraphsPage.hotelsAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Hotels agent indicator not found, checking content instead"); + }); + + await subgraphsPage.verifyStaticHotelData(); + + // FEATURE TEST: Test interrupt pause behavior again + await page.waitForTimeout(3000); + + // Select Hotel Zoe through interrupt + await subgraphsPage.selectHotel('Zoe'); + + // FEATURE TEST: Verify hotel selection immediately updates state + await expect(subgraphsPage.selectedHotel).toContainText('Zoe').catch(async () => { + await expect(page.getByText(/Hotel Zoe|Zoe/i)).toBeVisible({ timeout: 2000 }); + }); + + await waitForAIResponse(page); + + // FEATURE TEST: Experiences Agent - verify agent indicator becomes active + await subgraphsPage.waitForExperiencesAgent(); + await expect(subgraphsPage.experiencesAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Experiences agent indicator not found, checking content instead"); + }); + + await subgraphsPage.verifyStaticExperienceData(); + }); + }); + + test("[LangGraph] should handle different selections and demonstrate supervisor routing patterns", async ({ + page, + }) => { + await retryOnAIFailure(async () => { + const subgraphsPage = new SubgraphsPage(page); + + await page.goto("/langgraph-typescript/feature/subgraphs"); + + await subgraphsPage.openChat(); + + await subgraphsPage.sendMessage("I want to visit San Francisco from Amsterdam"); + await waitForAIResponse(page); + + // FEATURE TEST: Wait for supervisor coordination + await subgraphsPage.waitForSupervisorCoordination(); + await expect(subgraphsPage.supervisorIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Supervisor indicator not found, verifying through content"); + }); + + // FEATURE TEST: Flights Agent - verify agent indicator becomes active + await subgraphsPage.waitForFlightsAgent(); + await expect(subgraphsPage.flightsAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Flights agent indicator not found, checking content instead"); + }); + + await subgraphsPage.verifyStaticFlightData(); + + // FEATURE TEST: Test different selection - United instead of KLM + await subgraphsPage.selectFlight('United'); + + // FEATURE TEST: Verify immediate state update after selection + await expect(subgraphsPage.selectedFlight).toContainText('United').catch(async () => { + await expect(page.getByText(/United/i)).toBeVisible({ timeout: 2000 }); + }); + + await waitForAIResponse(page); + + // FEATURE TEST: Hotels Agent - verify agent indicator switches + await subgraphsPage.waitForHotelsAgent(); + await expect(subgraphsPage.hotelsAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Hotels agent indicator not found, checking content instead"); + }); + + // FEATURE TEST: Test different hotel selection - Ritz-Carlton + await subgraphsPage.selectHotel('Ritz-Carlton'); + + // FEATURE TEST: Verify hotel selection immediately updates state + await expect(subgraphsPage.selectedHotel).toContainText('Ritz-Carlton').catch(async () => { + await expect(page.getByText(/Ritz-Carlton/i)).toBeVisible({ timeout: 2000 }); + }); + + await waitForAIResponse(page); + + // FEATURE TEST: Experiences Agent - verify agent indicator becomes active + await subgraphsPage.waitForExperiencesAgent(); + await expect(subgraphsPage.experiencesAgentIndicator).toBeVisible({ timeout: 10000 }).catch(() => { + console.log("Experiences agent indicator not found, checking content instead"); + }); + + // FEATURE TEST: Verify subgraph streaming detection - experiences agent is active + await expect(subgraphsPage.experiencesAgentIndicator).toHaveClass(/active/).catch(() => { + console.log("Experiences agent not active, checking content instead"); + }); + + // FEATURE TEST: Verify complete state persistence across all agents + await expect(subgraphsPage.selectedFlight).toContainText('United'); // Flight selection persisted + await expect(subgraphsPage.selectedHotel).toContainText('Ritz-Carlton'); // Hotel selection persisted + await subgraphsPage.verifyStaticExperienceData(); // Experiences provided based on selections + }); + }); +}); diff --git a/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/toolBasedGenUIPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/toolBasedGenUIPage.spec.ts new file mode 100644 index 000000000..f146f2b81 --- /dev/null +++ b/typescript-sdk/apps/dojo/e2e/tests/langgraphTypescriptTests/toolBasedGenUIPage.spec.ts @@ -0,0 +1,38 @@ +import { test, expect } from "@playwright/test"; +import { ToolBaseGenUIPage } from "../../featurePages/ToolBaseGenUIPage"; + +const pageURL = + "/langgraph-typescript/feature/tool_based_generative_ui"; + +test('[LangGraph] Haiku generation and display verification', async ({ + page, +}) => { + await page.goto(pageURL); + + const genAIAgent = new ToolBaseGenUIPage(page); + + await expect(genAIAgent.haikuAgentIntro).toBeVisible(); + await genAIAgent.generateHaiku('Generate Haiku for "I will always win"'); + await genAIAgent.checkGeneratedHaiku(); + await genAIAgent.checkHaikuDisplay(page); +}); + +test('[LangGraph] Haiku generation and UI consistency for two different prompts', async ({ + page, +}) => { + await page.goto(pageURL); + + const genAIAgent = new ToolBaseGenUIPage(page); + + await expect(genAIAgent.haikuAgentIntro).toBeVisible(); + + const prompt1 = 'Generate Haiku for "I will always win"'; + await genAIAgent.generateHaiku(prompt1); + await genAIAgent.checkGeneratedHaiku(); + await genAIAgent.checkHaikuDisplay(page); + + const prompt2 = 'Generate Haiku for "The moon shines bright"'; + await genAIAgent.generateHaiku(prompt2); + await genAIAgent.checkGeneratedHaiku(); + await genAIAgent.checkHaikuDisplay(page); +}); \ No newline at end of file diff --git a/typescript-sdk/apps/dojo/src/files.json b/typescript-sdk/apps/dojo/src/files.json index ec37827d2..25ffe46c3 100644 --- a/typescript-sdk/apps/dojo/src/files.json +++ b/typescript-sdk/apps/dojo/src/files.json @@ -526,7 +526,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, MessagesAnnotation, StateGraph } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return {\n messages: updatedMessages,\n steps: state.steps\n };\n }\n }\n\n return {\n messages: messages,\n steps: state.steps\n };\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", "language": "ts", "type": "file" } @@ -985,7 +985,7 @@ }, { "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, MessagesAnnotation, StateGraph } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return {\n messages: updatedMessages,\n steps: state.steps\n };\n }\n }\n\n return {\n messages: messages,\n steps: state.steps\n };\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", + "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", "language": "ts", "type": "file" } diff --git a/typescript-sdk/integrations/langgraph/examples/typescript/src/agents/agentic_generative_ui/agent.ts b/typescript-sdk/integrations/langgraph/examples/typescript/src/agents/agentic_generative_ui/agent.ts index 818b56e3f..c944f4590 100644 --- a/typescript-sdk/integrations/langgraph/examples/typescript/src/agents/agentic_generative_ui/agent.ts +++ b/typescript-sdk/integrations/langgraph/examples/typescript/src/agents/agentic_generative_ui/agent.ts @@ -6,7 +6,7 @@ import { ChatOpenAI } from "@langchain/openai"; import { SystemMessage } from "@langchain/core/messages"; import { RunnableConfig } from "@langchain/core/runnables"; import { dispatchCustomEvent } from "@langchain/core/callbacks/dispatch"; -import { Annotation, MessagesAnnotation, StateGraph } from "@langchain/langgraph"; +import { Annotation, Command, MessagesAnnotation, StateGraph, END } from "@langchain/langgraph"; // This tool simulates performing a task on the server. // The tool call will be streamed to the frontend as it is being generated. @@ -150,17 +150,23 @@ async function chatNode(state: AgentState, config?: RunnableConfig) { await dispatchCustomEvent("manually_emit_state", state, config); } - return { - messages: updatedMessages, - steps: state.steps - }; + return new Command({ + goto: "start_flow", + update: { + messages: updatedMessages, + steps: state.steps + } + }); } } - return { - messages: messages, - steps: state.steps - }; + return new Command({ + goto: END, + update: { + messages: messages, + steps: state.steps + } + }); } // Define the graph