Skip to content
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 98 additions & 35 deletions apps/webapp/app/presenters/v3/RunPresenter.server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { getUsername } from "~/utils/username";
import { resolveEventRepositoryForStore } from "~/v3/eventRepository/index.server";
import { SpanSummary } from "~/v3/eventRepository/eventRepository.types";
import { getTaskEventStoreTableForRun } from "~/v3/taskEventStore.server";
import { isFinalRunStatus } from "~/v3/taskStatus";
import { isFailedRunStatus, isFinalRunStatus } from "~/v3/taskStatus";
import { env } from "~/env.server";

type Result = Awaited<ReturnType<RunPresenter["call"]>>;
Expand Down Expand Up @@ -211,49 +211,47 @@ export class RunPresenter {
let totalDuration = tree?.data.duration ?? 0;
const events = tree
? flattenTree(tree).map((n) => {
const offset = millisecondsToNanoseconds(
n.data.startTime.getTime() - treeRootStartTimeMs
);
//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + n.data.duration);
}
return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: n.data.isPartial ? null : n.data.duration,
offset,
isRoot: n.id === traceSummary.rootSpan.id,
},
};
})
const offset = millisecondsToNanoseconds(
n.data.startTime.getTime() - treeRootStartTimeMs
);
//only let non-debug events extend the total duration
if (!n.data.isDebug) {
totalDuration = Math.max(totalDuration, offset + n.data.duration);
}
return {
...n,
data: {
...n.data,
timelineEvents: createTimelineSpanEventsFromSpanEvents(
n.data.events,
user?.admin ?? false,
treeRootStartTimeMs
),
//set partial nodes to null duration
duration: n.data.isPartial ? null : n.data.duration,
offset,
isRoot: n.id === traceSummary.rootSpan.id,
},
};
})
: [];

//total duration should be a minimum of 1ms
totalDuration = Math.max(totalDuration, millisecondsToNanoseconds(1));

let rootSpanStatus: "executing" | "completed" | "failed" = "executing";
if (events[0]) {
if (events[0].data.isError) {
rootSpanStatus = "failed";
} else if (!events[0].data.isPartial) {
rootSpanStatus = "completed";
}
}
const reconciled = reconcileTraceWithRunLifecycle(
runData,
traceSummary.rootSpan.id,
events,
totalDuration
);

return {
run: runData,
trace: {
rootSpanStatus,
events: events,
duration: totalDuration,
rootSpanStatus: reconciled.rootSpanStatus,
events: reconciled.events,
duration: reconciled.totalDuration,
rootStartedAt: tree?.data.startTime,
startedAt: run.startedAt,
queuedDuration: run.startedAt
Expand All @@ -265,3 +263,68 @@ export class RunPresenter {
};
}
}

// NOTE: Clickhouse trace ingestion is eventually consistent.
// When a run is marked finished in Postgres, we reconcile the
// root span to reflect completion even if telemetry is still partial.
// This is a deliberate UI-layer tradeoff to prevent stale or "stuck"
// run states in the dashboard.
export function reconcileTraceWithRunLifecycle(
runData: {
isFinished: boolean;
status: Run["status"];
createdAt: Date;
completedAt: Date | null;
rootTaskRun: { createdAt: Date } | null;
},
rootSpanId: string,
events: RunEvent[],
totalDuration: number
): {
events: RunEvent[];
totalDuration: number;
rootSpanStatus: "executing" | "completed" | "failed";
} {
const rootEvent = events.find((e) => e.id === rootSpanId);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a big risk if there are 50k events or possibly more

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @ericallam — good call.

I’ve refactored this in the latest commits to address that concern:

Removed the events.find lookup entirely.

The root span is now accessed via index 0 (flattened tree guarantees order), making it O(1).

The reconciliation logic is merged into the existing trace construction pass, so there’s no additional iteration even for large event sets.

I also fixed a missing createdAt edge case flagged by CodeRabbit.
Latest commit: 4c3f0e8. Let me know if you’d like me to split anything further.

const currentStatus: "executing" | "completed" | "failed" = rootEvent
? rootEvent.data.isError
? "failed"
: !rootEvent.data.isPartial
? "completed"
: "executing"
: "executing";

if (!runData.isFinished) {
return { events, totalDuration, rootSpanStatus: currentStatus };
}

const postgresRunDuration = runData.completedAt
? millisecondsToNanoseconds(
runData.completedAt.getTime() -
(runData.rootTaskRun?.createdAt ?? runData.createdAt).getTime()
)
: 0;

const updatedTotalDuration = Math.max(totalDuration, postgresRunDuration);

const updatedEvents = events.map((e) => {
if (e.id === rootSpanId && e.data.isPartial) {
return {
...e,
data: {
...e.data,
isPartial: false,
duration: Math.max(e.data.duration ?? 0, postgresRunDuration),
isError: isFailedRunStatus(runData.status),
},
};
}
return e;
});

return {
events: updatedEvents,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}
89 changes: 89 additions & 0 deletions apps/webapp/app/presenters/v3/reconcileTrace.server.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import { millisecondsToNanoseconds } from "@trigger.dev/core/v3";
import { isFailedRunStatus } from "~/v3/taskStatus";
import type { TaskRunStatus } from "@trigger.dev/database";

export type ReconcileRunData = {
isFinished: boolean;
status: TaskRunStatus;
createdAt: Date;
completedAt: Date | null;
rootTaskRun: { createdAt: Date } | null;
};

export type ReconcileEvent = {
id: string;
data: {
isPartial: boolean;
isError: boolean;
duration?: number | null;
};
};

export type ReconcileResult = {
events: any[];
totalDuration: number;
rootSpanStatus: "executing" | "completed" | "failed";
};

// NOTE: Clickhouse trace ingestion is eventually consistent.
// When a run is marked finished in Postgres, we reconcile the
// root span to reflect completion even if telemetry is still partial.
// This is a deliberate UI-layer tradeoff to prevent stale or "stuck"
// run states in the dashboard.
export function reconcileTraceWithRunLifecycle(
runData: ReconcileRunData,
rootSpanId: string,
events: any[],
totalDuration: number
): ReconcileResult {
const rootEvent = events[0];
const isActualRoot = rootEvent?.id === rootSpanId;

const currentStatus: "executing" | "completed" | "failed" =
isActualRoot && rootEvent
? rootEvent.data.isError
? "failed"
: !rootEvent.data.isPartial
? "completed"
: "executing"
: "executing";

if (!runData.isFinished) {
return { events, totalDuration, rootSpanStatus: currentStatus };
}

const postgresRunDuration = runData.completedAt
? millisecondsToNanoseconds(
runData.completedAt.getTime() -
(runData.rootTaskRun?.createdAt ?? runData.createdAt).getTime()
)
: 0;

const updatedTotalDuration = Math.max(totalDuration, postgresRunDuration);

// We only need to potentially update the root event (the first one) if it matches our ID
if (isActualRoot && rootEvent && rootEvent.data.isPartial) {
const updatedEvents = [...events];
updatedEvents[0] = {
...rootEvent,
data: {
...rootEvent.data,
isPartial: false,
duration: Math.max(rootEvent.data.duration ?? 0, postgresRunDuration),
isError: isFailedRunStatus(runData.status),
},
};

return {
events: updatedEvents,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}

return {
events,
totalDuration: updatedTotalDuration,
rootSpanStatus: isFailedRunStatus(runData.status) ? "failed" : "completed",
};
}
32 changes: 20 additions & 12 deletions apps/webapp/app/utils/throttle.ts
Original file line number Diff line number Diff line change
@@ -1,20 +1,28 @@
//From: https://kettanaito.com/blog/debounce-vs-throttle

/** A very simple throttle. Will execute the function at the end of each period and discard any other calls during that period. */
/** A throttle that fires the first call immediately and ensures the last call during the duration is also fired. */
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this throttle change is here now, just curious why you keep adding this to your PRs?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Regarding the throttle change: It was originally in PR #2874, but after your feedback that it was unrelated to the SDK fix, I've isolated it here. In this PR (#2875), it's one of the two main components of the fix for the dashboard refresh bug (#2798). The original throttle could drop final status updates; this trailing-edge version ensures the completion events are always delivered to the UI

export function throttle(
func: (...args: any[]) => void,
durationMs: number
): (...args: any[]) => void {
let isPrimedToFire = false;

return (...args: any[]) => {
if (!isPrimedToFire) {
isPrimedToFire = true;
let timeoutId: NodeJS.Timeout | null = null;
let nextArgs: any[] | null = null;

setTimeout(() => {
func(...args);
isPrimedToFire = false;
}, durationMs);
const wrapped = (...args: any[]) => {
if (timeoutId) {
nextArgs = args;
return;
}

func(...args);

timeoutId = setTimeout(() => {
timeoutId = null;
if (nextArgs) {
const argsToUse = nextArgs;
nextArgs = null;
wrapped(...argsToUse);
}
}, durationMs);
};

return wrapped;
}
Loading