Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 134 additions & 36 deletions apps/webapp/app/presenters/v3/RunPresenter.server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { getUsername } from "~/utils/username";
import { resolveEventRepositoryForStore } from "~/v3/eventRepository/index.server";
import { SpanSummary } from "~/v3/eventRepository/eventRepository.types";
import { getTaskEventStoreTableForRun } from "~/v3/taskEventStore.server";
import { isFinalRunStatus } from "~/v3/taskStatus";
import { isFailedRunStatus, isFinalRunStatus } from "~/v3/taskStatus";
import { env } from "~/env.server";

type Result = Awaited<ReturnType<RunPresenter["call"]>>;
Expand Down Expand Up @@ -124,6 +124,7 @@ export class RunPresenter {
isFinished: isFinalRunStatus(run.status),
startedAt: run.startedAt,
completedAt: run.completedAt,
createdAt: run.createdAt,
logsDeletedAt: showDeletedLogs ? null : run.logsDeletedAt,
rootTaskRun: run.rootTaskRun,
parentTaskRun: run.parentTaskRun,
Expand Down Expand Up @@ -208,52 +209,76 @@ export class RunPresenter {

//we need the start offset for each item, and the total duration of the entire tree
const treeRootStartTimeMs = tree ? tree?.data.startTime.getTime() : 0;

const postgresRunDuration =
runData.isFinished && run.completedAt
? millisecondsToNanoseconds(
run.completedAt.getTime() -
(run.rootTaskRun?.createdAt ?? run.createdAt).getTime()
)
: 0;

let totalDuration = tree?.data.duration ?? 0;
const events = tree
? flattenTree(tree).map((n) => {
const offset = millisecondsToNanoseconds(
n.data.startTime.getTime() - treeRootStartTimeMs
);
//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + n.data.duration);
}
return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: n.data.isPartial ? null : n.data.duration,
offset,
isRoot: n.id === traceSummary.rootSpan.id,
},
};
})
? flattenTree(tree).map((n, index) => {
const isRoot = index === 0;
const offset = millisecondsToNanoseconds(n.data.startTime.getTime() - treeRootStartTimeMs);

let nIsPartial = n.data.isPartial;
let nDuration = n.data.duration;
let nIsError = n.data.isError;

// NOTE: Clickhouse trace ingestion is eventually consistent.
// When a run is marked finished in Postgres, we reconcile the
// root span to reflect completion even if telemetry is still partial.
// This is a deliberate UI-layer tradeoff to prevent stale or "stuck"
// run states in the dashboard.
if (isRoot && runData.isFinished && nIsPartial) {
nIsPartial = false;
nDuration = Math.max(nDuration ?? 0, postgresRunDuration);
nIsError = isFailedRunStatus(runData.status);
}

//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + (nIsPartial ? 0 : nDuration));
}

return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: nIsPartial ? null : nDuration,
isPartial: nIsPartial,
isError: nIsError,
offset,
isRoot,
},
};
})
: [];

if (runData.isFinished) {
totalDuration = Math.max(totalDuration, postgresRunDuration);
}

//total duration should be a minimum of 1ms
totalDuration = Math.max(totalDuration, millisecondsToNanoseconds(1));

let rootSpanStatus: "executing" | "completed" | "failed" = "executing";
if (events[0]) {
if (events[0].data.isError) {
rootSpanStatus = "failed";
} else if (!events[0].data.isPartial) {
rootSpanStatus = "completed";
}
}
const reconciled = reconcileTraceWithRunLifecycle(runData, traceSummary.rootSpan.id, events, totalDuration);

return {
run: runData,
trace: {
rootSpanStatus,
events: events,
duration: totalDuration,
rootSpanStatus: reconciled.rootSpanStatus,
events: reconciled.events,
duration: reconciled.totalDuration,
rootStartedAt: tree?.data.startTime,
startedAt: run.startedAt,
queuedDuration: run.startedAt
Expand All @@ -265,3 +290,76 @@ export class RunPresenter {
};
}
}

// NOTE: Clickhouse trace ingestion is eventually consistent.
// When a run is marked finished in Postgres, we reconcile the
// root span to reflect completion even if telemetry is still partial.
// This is a deliberate UI-layer tradeoff to prevent stale or "stuck"
// run states in the dashboard.
export function reconcileTraceWithRunLifecycle(
runData: {
isFinished: boolean;
status: Run["status"];
createdAt: Date;
completedAt: Date | null;
rootTaskRun: { createdAt: Date } | null;
},
rootSpanId: string,
events: RunEvent[],
totalDuration: number
): {
events: RunEvent[];
totalDuration: number;
rootSpanStatus: "executing" | "completed" | "failed";
} {
const rootEvent = events[0];
const isActualRoot = rootEvent?.id === rootSpanId;

const currentStatus: "executing" | "completed" | "failed" =
isActualRoot && rootEvent
? rootEvent.data.isError
? "failed"
: !rootEvent.data.isPartial
? "completed"
: "executing"
: "executing";

if (!runData.isFinished) {
return { events, totalDuration, rootSpanStatus: currentStatus };
}

const postgresRunDuration = runData.completedAt
? millisecondsToNanoseconds(
runData.completedAt.getTime() -
(runData.rootTaskRun?.createdAt ?? runData.createdAt).getTime()
)
: 0;

const updatedTotalDuration = Math.max(totalDuration, postgresRunDuration);

// We only need to potentially update the root event (the first one) if it matches our ID
if (isActualRoot && rootEvent && rootEvent.data.isPartial) {
const updatedEvents = [...events];
updatedEvents[0] = {
...rootEvent,
data: {
...rootEvent.data,
isPartial: false,
duration: Math.max(rootEvent.data.duration ?? 0, postgresRunDuration),
isError: isFailedRunStatus(runData.status),
},
};

return {
events: updatedEvents,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}

return {
events,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}
32 changes: 20 additions & 12 deletions apps/webapp/app/utils/throttle.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,28 @@
//From: https://kettanaito.com/blog/debounce-vs-throttle

/** A very simple throttle. Will execute the function at the end of each period and discard any other calls during that period. */
/** A throttle that fires the first call immediately and ensures the last call during the duration is also fired. */
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this throttle change is here now, just curious why you keep adding this to your PRs?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Regarding the throttle change: It was originally in PR #2874, but after your feedback that it was unrelated to the SDK fix, I've isolated it here. In this PR (#2875), it's one of the two main components of the fix for the dashboard refresh bug (#2798). The original throttle could drop final status updates; this trailing-edge version ensures the completion events are always delivered to the UI

export function throttle(
func: (...args: any[]) => void,
durationMs: number
): (...args: any[]) => void {
let isPrimedToFire = false;

return (...args: any[]) => {
if (!isPrimedToFire) {
isPrimedToFire = true;
let timeoutId: NodeJS.Timeout | null = null;
let nextArgs: any[] | null = null;

setTimeout(() => {
func(...args);
isPrimedToFire = false;
}, durationMs);
const wrapped = (...args: any[]) => {
if (timeoutId) {
nextArgs = args;
return;
}

func(...args);

timeoutId = setTimeout(() => {
timeoutId = null;
if (nextArgs) {
const argsToUse = nextArgs;
nextArgs = null;
wrapped(...argsToUse);
}
}, durationMs);
};

return wrapped;
}
141 changes: 141 additions & 0 deletions apps/webapp/test/RunPresenter.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import { vi, describe, it, expect } from "vitest";
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd rather the reconcileTraceWithRunLifecycle be moved to a different file that doesn't need all these mocks and makes testing it a bit easier

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @ericallam, great suggestion! I've moved the reconciliation logic to its own utility file reconcileTrace.server.ts. This allowed me to simplify the tests significantly by removing the heavy presenter/repository mocks. All tests are passing independently now


vi.mock("../app/env.server", () => ({
env: {
MAXIMUM_LIVE_RELOADING_EVENTS: 1000,
},
}));

vi.mock("../app/db.server", () => ({
prisma: {},
$replica: {},
$transaction: vi.fn(),
}));

vi.mock("../app/v3/eventRepository/index.server", () => ({
resolveEventRepositoryForStore: vi.fn(),
}));

vi.mock("../app/v3/taskEventStore.server", () => ({
getTaskEventStoreTableForRun: vi.fn(),
}));

vi.mock("../app/utils/username", () => ({
getUsername: vi.fn(),
}));

import { reconcileTraceWithRunLifecycle } from "../app/presenters/v3/RunPresenter.server";
import { millisecondsToNanoseconds } from "@trigger.dev/core/v3";

describe("reconcileTraceWithRunLifecycle", () => {
const rootSpanId = "root-span-id";
const createdAt = new Date("2024-01-01T00:00:00Z");
const completedAt = new Date("2024-01-01T00:00:05Z");

const runData: any = {
isFinished: true,
status: "COMPLETED_SUCCESSFULLY",
createdAt,
completedAt,
rootTaskRun: null,
};

const initialEvents = [
{
id: rootSpanId,
data: {
isPartial: true,
duration: millisecondsToNanoseconds(1000), // 1s, less than the 5s run duration
isError: false,
},
},
{
id: "child-span-id",
data: {
isPartial: false,
duration: millisecondsToNanoseconds(500),
isError: false,
},
},
];

it("should reconcile a finished run with lagging partial telemetry", () => {
const totalDuration = millisecondsToNanoseconds(1000);
const result = reconcileTraceWithRunLifecycle(runData, rootSpanId, initialEvents as any, totalDuration);

expect(result.rootSpanStatus).toBe("completed");

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isPartial).toBe(false);
// 5s duration = 5000ms
expect(rootEvent?.data.duration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
});

it("should not override duration if Clickhouse already has a longer finished duration", () => {
const longDuration = millisecondsToNanoseconds(10000);
const finishedEvents = [
{
id: rootSpanId,
data: {
isPartial: false,
duration: longDuration,
isError: false,
},
},
];

const result = reconcileTraceWithRunLifecycle(runData, rootSpanId, finishedEvents as any, longDuration);

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.duration).toBe(longDuration);
expect(rootEvent?.data.isPartial).toBe(false);
expect(result.totalDuration).toBe(longDuration);
});

it("should handle unfinished runs without modification", () => {
const unfinishedRun = { ...runData, isFinished: false, completedAt: null };
const totalDuration = millisecondsToNanoseconds(1000);
const result = reconcileTraceWithRunLifecycle(unfinishedRun, rootSpanId, initialEvents as any, totalDuration);

expect(result.rootSpanStatus).toBe("executing");

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isPartial).toBe(true);
expect(rootEvent?.data.duration).toBe(millisecondsToNanoseconds(1000));
});

it("should reconcile failed runs correctly", () => {
const failedRun = { ...runData, status: "COMPLETED_WITH_ERRORS" };
const result = reconcileTraceWithRunLifecycle(failedRun, rootSpanId, initialEvents as any, millisecondsToNanoseconds(1000));

expect(result.rootSpanStatus).toBe("failed");
const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isError).toBe(true);
expect(rootEvent?.data.isPartial).toBe(false);
});

it("should use rootTaskRun createdAt if available for duration calculation", () => {
const rootTaskCreatedAt = new Date("2023-12-31T23:59:50Z"); // 10s before run.createdAt
const runDataWithRoot: any = {
...runData,
rootTaskRun: { createdAt: rootTaskCreatedAt },
};

const result = reconcileTraceWithRunLifecycle(runDataWithRoot, rootSpanId, initialEvents as any, millisecondsToNanoseconds(1000));

// Duration should be from 23:59:50 to 00:00:05 = 15s
const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.duration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(15000));
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(15000));
});

it("should handle missing root span gracefully", () => {
const result = reconcileTraceWithRunLifecycle(runData, "non-existent-id", initialEvents as any, millisecondsToNanoseconds(1000));

expect(result.rootSpanStatus).toBe("completed");
expect(result.events).toEqual(initialEvents);
// totalDuration should still be updated to postgres duration even if root span is missing from events list
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
});
});