Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 97 additions & 35 deletions apps/webapp/app/presenters/v3/RunPresenter.server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { getUsername } from "~/utils/username";
import { resolveEventRepositoryForStore } from "~/v3/eventRepository/index.server";
import { SpanSummary } from "~/v3/eventRepository/eventRepository.types";
import { getTaskEventStoreTableForRun } from "~/v3/taskEventStore.server";
import { isFinalRunStatus } from "~/v3/taskStatus";
import { isFailedRunStatus, isFinalRunStatus } from "~/v3/taskStatus";
import { env } from "~/env.server";

type Result = Awaited<ReturnType<RunPresenter["call"]>>;
Expand Down Expand Up @@ -211,49 +211,47 @@ export class RunPresenter {
let totalDuration = tree?.data.duration ?? 0;
const events = tree
? flattenTree(tree).map((n) => {
const offset = millisecondsToNanoseconds(
n.data.startTime.getTime() - treeRootStartTimeMs
);
//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + n.data.duration);
}
return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: n.data.isPartial ? null : n.data.duration,
offset,
isRoot: n.id === traceSummary.rootSpan.id,
},
};
})
const offset = millisecondsToNanoseconds(
n.data.startTime.getTime() - treeRootStartTimeMs
);
//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + n.data.duration);
}
return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: n.data.isPartial ? null : n.data.duration,
offset,
isRoot: n.id === traceSummary.rootSpan.id,
},
};
})
: [];

//total duration should be a minimum of 1ms
totalDuration = Math.max(totalDuration, millisecondsToNanoseconds(1));

let rootSpanStatus: "executing" | "completed" | "failed" = "executing";
if (events[0]) {
if (events[0].data.isError) {
rootSpanStatus = "failed";
} else if (!events[0].data.isPartial) {
rootSpanStatus = "completed";
}
}
const reconciled = reconcileTraceWithRunLifecycle(
runData,
traceSummary.rootSpan.id,
events,
totalDuration
);

return {
run: runData,
trace: {
rootSpanStatus,
events: events,
duration: totalDuration,
rootSpanStatus: reconciled.rootSpanStatus,
events: reconciled.events,
duration: reconciled.totalDuration,
rootStartedAt: tree?.data.startTime,
startedAt: run.startedAt,
queuedDuration: run.startedAt
Expand All @@ -265,3 +263,67 @@ export class RunPresenter {
};
}
}

// NOTE: Clickhouse trace ingestion is eventually consistent.
// When a run is marked finished in Postgres, we reconcile the
// root span to reflect completion even if telemetry is still partial.
// This is a deliberate UI-layer tradeoff to prevent stale or "stuck"
// run states in the dashboard.
export function reconcileTraceWithRunLifecycle(
runData: {
isFinished: boolean;
status: Run["status"];
createdAt: Date;
completedAt: Date | null;
rootTaskRun: { createdAt: Date } | null;
},
rootSpanId: string,
events: RunEvent[],
totalDuration: number
): {
events: RunEvent[];
totalDuration: number;
rootSpanStatus: "executing" | "completed" | "failed";
} {
const currentStatus: "executing" | "completed" | "failed" = events[0]
? events[0].data.isError
? "failed"
: !events[0].data.isPartial
? "completed"
: "executing"
: "executing";

if (!runData.isFinished) {
return { events, totalDuration, rootSpanStatus: currentStatus };
}

const postgresRunDuration = runData.completedAt
? millisecondsToNanoseconds(
runData.completedAt.getTime() -
(runData.rootTaskRun?.createdAt ?? runData.createdAt).getTime()
)
: 0;

const updatedTotalDuration = Math.max(totalDuration, postgresRunDuration);

const updatedEvents = events.map((e) => {
if (e.id === rootSpanId && e.data.isPartial) {
return {
...e,
data: {
...e.data,
isPartial: false,
duration: Math.max(e.data.duration ?? 0, postgresRunDuration),
isError: isFailedRunStatus(runData.status),
},
};
}
return e;
});

return {
events: updatedEvents,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}
141 changes: 141 additions & 0 deletions apps/webapp/test/RunPresenter.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import { vi, describe, it, expect } from "vitest";
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd rather the reconcileTraceWithRunLifecycle be moved to a different file that doesn't need all these mocks and makes testing it a bit easier

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @ericallam, great suggestion! I've moved the reconciliation logic to its own utility file reconcileTrace.server.ts. This allowed me to simplify the tests significantly by removing the heavy presenter/repository mocks. All tests are passing independently now


vi.mock("../app/env.server", () => ({
env: {
MAXIMUM_LIVE_RELOADING_EVENTS: 1000,
},
}));

vi.mock("../app/db.server", () => ({
prisma: {},
$replica: {},
$transaction: vi.fn(),
}));

vi.mock("../app/v3/eventRepository/index.server", () => ({
resolveEventRepositoryForStore: vi.fn(),
}));

vi.mock("../app/v3/taskEventStore.server", () => ({
getTaskEventStoreTableForRun: vi.fn(),
}));

vi.mock("../app/utils/username", () => ({
getUsername: vi.fn(),
}));

import { reconcileTraceWithRunLifecycle } from "../app/presenters/v3/RunPresenter.server";
import { millisecondsToNanoseconds } from "@trigger.dev/core/v3";

describe("reconcileTraceWithRunLifecycle", () => {
const rootSpanId = "root-span-id";
const createdAt = new Date("2024-01-01T00:00:00Z");
const completedAt = new Date("2024-01-01T00:00:05Z");

const runData: any = {
isFinished: true,
status: "COMPLETED_SUCCESSFULLY",
createdAt,
completedAt,
rootTaskRun: null,
};

const initialEvents = [
{
id: rootSpanId,
data: {
isPartial: true,
duration: millisecondsToNanoseconds(1000), // 1s, less than the 5s run duration
isError: false,
},
},
{
id: "child-span-id",
data: {
isPartial: false,
duration: millisecondsToNanoseconds(500),
isError: false,
},
},
];

it("should reconcile a finished run with lagging partial telemetry", () => {
const totalDuration = millisecondsToNanoseconds(1000);
const result = reconcileTraceWithRunLifecycle(runData, rootSpanId, initialEvents as any, totalDuration);

expect(result.rootSpanStatus).toBe("completed");

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isPartial).toBe(false);
// 5s duration = 5000ms
expect(rootEvent?.data.duration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
});

it("should not override duration if Clickhouse already has a longer finished duration", () => {
const longDuration = millisecondsToNanoseconds(10000);
const finishedEvents = [
{
id: rootSpanId,
data: {
isPartial: false,
duration: longDuration,
isError: false,
},
},
];

const result = reconcileTraceWithRunLifecycle(runData, rootSpanId, finishedEvents as any, longDuration);

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.duration).toBe(longDuration);
expect(rootEvent?.data.isPartial).toBe(false);
expect(result.totalDuration).toBe(longDuration);
});

it("should handle unfinished runs without modification", () => {
const unfinishedRun = { ...runData, isFinished: false, completedAt: null };
const totalDuration = millisecondsToNanoseconds(1000);
const result = reconcileTraceWithRunLifecycle(unfinishedRun, rootSpanId, initialEvents as any, totalDuration);

expect(result.rootSpanStatus).toBe("executing");

const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isPartial).toBe(true);
expect(rootEvent?.data.duration).toBe(millisecondsToNanoseconds(1000));
});

it("should reconcile failed runs correctly", () => {
const failedRun = { ...runData, status: "COMPLETED_WITH_ERRORS" };
const result = reconcileTraceWithRunLifecycle(failedRun, rootSpanId, initialEvents as any, millisecondsToNanoseconds(1000));

expect(result.rootSpanStatus).toBe("failed");
const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.isError).toBe(true);
expect(rootEvent?.data.isPartial).toBe(false);
});

it("should use rootTaskRun createdAt if available for duration calculation", () => {
const rootTaskCreatedAt = new Date("2023-12-31T23:59:50Z"); // 10s before run.createdAt
const runDataWithRoot: any = {
...runData,
rootTaskRun: { createdAt: rootTaskCreatedAt },
};

const result = reconcileTraceWithRunLifecycle(runDataWithRoot, rootSpanId, initialEvents as any, millisecondsToNanoseconds(1000));

// Duration should be from 23:59:50 to 00:00:05 = 15s
const rootEvent = result.events.find((e: any) => e.id === rootSpanId);
expect(rootEvent?.data.duration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(15000));
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(15000));
});

it("should handle missing root span gracefully", () => {
const result = reconcileTraceWithRunLifecycle(runData, "non-existent-id", initialEvents as any, millisecondsToNanoseconds(1000));

expect(result.rootSpanStatus).toBe("completed");
expect(result.events).toEqual(initialEvents);
// totalDuration should still be updated to postgres duration even if root span is missing from events list
expect(result.totalDuration).toBeGreaterThanOrEqual(millisecondsToNanoseconds(5000));
});
});