From 5974326a7165a8032fd0df76f38b4801a8547899 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 17:46:53 +0100 Subject: [PATCH 001/485] WIP RunEngine --- .../app/v3/services/triggerTaskV2.server.ts | 591 +++++++ .../database/prisma/schema.prisma | 144 +- internal-packages/run-engine/README.md | 324 ++++ internal-packages/run-engine/package.json | 27 + .../run-engine/src/engine/index.test.ts | 150 ++ .../run-engine/src/engine/index.ts | 541 +++++++ internal-packages/run-engine/src/index.ts | 1 + .../run-engine/src/run-queue/index.test.ts | 453 ++++++ .../run-engine/src/run-queue/index.ts | 1384 +++++++++++++++++ .../src/run-queue/keyProducer.test.ts | 361 +++++ .../run-engine/src/run-queue/keyProducer.ts | 195 +++ .../simpleWeightedPriorityStrategy.ts | 119 ++ .../run-engine/src/run-queue/types.ts | 116 ++ .../run-engine/src/shared/asyncWorker.ts | 34 + .../run-engine/src/shared/index.ts | 39 + internal-packages/run-engine/tsconfig.json | 29 + internal-packages/run-engine/vitest.config.ts | 8 + 17 files changed, 4515 insertions(+), 1 deletion(-) create mode 100644 apps/webapp/app/v3/services/triggerTaskV2.server.ts create mode 100644 internal-packages/run-engine/README.md create mode 100644 internal-packages/run-engine/package.json create mode 100644 internal-packages/run-engine/src/engine/index.test.ts create mode 100644 internal-packages/run-engine/src/engine/index.ts create mode 100644 internal-packages/run-engine/src/index.ts create mode 100644 internal-packages/run-engine/src/run-queue/index.test.ts create mode 100644 internal-packages/run-engine/src/run-queue/index.ts create mode 100644 internal-packages/run-engine/src/run-queue/keyProducer.test.ts create mode 100644 internal-packages/run-engine/src/run-queue/keyProducer.ts create mode 100644 internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts create mode 100644 internal-packages/run-engine/src/run-queue/types.ts create mode 100644 internal-packages/run-engine/src/shared/asyncWorker.ts create mode 100644 internal-packages/run-engine/src/shared/index.ts create mode 100644 internal-packages/run-engine/tsconfig.json create mode 100644 internal-packages/run-engine/vitest.config.ts diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts new file mode 100644 index 0000000000..4ea9394d2c --- /dev/null +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -0,0 +1,591 @@ +import { + IOPacket, + QueueOptions, + SemanticInternalAttributes, + TriggerTaskRequestBody, + packetRequiresOffloading, +} from "@trigger.dev/core/v3"; +import { env } from "~/env.server"; +import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; +import { workerQueue } from "~/services/worker.server"; +import { marqs, sanitizeQueueName } from "~/v3/marqs/index.server"; +import { eventRepository } from "../eventRepository.server"; +import { generateFriendlyId } from "../friendlyIdentifiers"; +import { uploadToObjectStore } from "../r2.server"; +import { startActiveSpan } from "../tracer.server"; +import { getEntitlement } from "~/services/platform.v3.server"; +import { BaseService, ServiceValidationError } from "./baseService.server"; +import { logger } from "~/services/logger.server"; +import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; +import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; +import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; +import { handleMetadataPacket } from "~/utils/packets"; +import { RunEngine } from "@internal/run-engine"; +import { prisma } from "~/db.server"; + +export type TriggerTaskServiceOptions = { + idempotencyKey?: string; + triggerVersion?: string; + traceContext?: Record; + spanParentAsLink?: boolean; + parentAsLinkType?: "replay" | "trigger"; + batchId?: string; + customIcon?: string; +}; + +export class OutOfEntitlementError extends Error { + constructor() { + super("You can't trigger a task because you have run out of credits."); + } +} + +//todo move this to a singleton somewhere +const engine = new RunEngine({ + prisma, + redis: { + port: env.REDIS_PORT, + host: env.REDIS_HOST, + username: env.REDIS_USERNAME, + password: env.REDIS_PASSWORD, + enableAutoPipelining: true, + ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), + }, + zodWorker: { + connectionString: env.DATABASE_URL, + concurrency: env.WORKER_CONCURRENCY, + pollInterval: env.WORKER_POLL_INTERVAL, + noPreparedStatements: env.DATABASE_URL !== env.DIRECT_URL, + schema: env.WORKER_SCHEMA, + maxPoolSize: env.WORKER_CONCURRENCY + 1, + shutdownTimeoutInMs: env.GRACEFUL_SHUTDOWN_TIMEOUT, + }, +}); + +export class TriggerTaskService extends BaseService { + public async call( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} + ) { + return await this.traceWithEnv("call()", environment, async (span) => { + span.setAttribute("taskId", taskId); + + const idempotencyKey = options.idempotencyKey ?? body.options?.idempotencyKey; + const delayUntil = await parseDelay(body.options?.delay); + + const ttl = + typeof body.options?.ttl === "number" + ? stringifyDuration(body.options?.ttl) + : body.options?.ttl ?? (environment.type === "DEVELOPMENT" ? "10m" : undefined); + + const existingRun = idempotencyKey + ? await this._prisma.taskRun.findUnique({ + where: { + runtimeEnvironmentId_taskIdentifier_idempotencyKey: { + runtimeEnvironmentId: environment.id, + idempotencyKey, + taskIdentifier: taskId, + }, + }, + }) + : undefined; + + if (existingRun) { + span.setAttribute("runId", existingRun.friendlyId); + + return existingRun; + } + + if (environment.type !== "DEVELOPMENT") { + const result = await getEntitlement(environment.organizationId); + if (result && result.hasAccess === false) { + throw new OutOfEntitlementError(); + } + } + + if ( + body.options?.tags && + typeof body.options.tags !== "string" && + body.options.tags.length > MAX_TAGS_PER_RUN + ) { + throw new ServiceValidationError( + `Runs can only have ${MAX_TAGS_PER_RUN} tags, you're trying to set ${body.options.tags.length}.` + ); + } + + const runFriendlyId = generateFriendlyId("run"); + + const payloadPacket = await this.#handlePayloadPacket( + body.payload, + body.options?.payloadType ?? "application/json", + runFriendlyId, + environment + ); + + const metadataPacket = body.options?.metadata + ? handleMetadataPacket( + body.options?.metadata, + body.options?.metadataType ?? "application/json" + ) + : undefined; + + const dependentAttempt = body.options?.dependentAttempt + ? await this._prisma.taskRunAttempt.findUnique({ + where: { friendlyId: body.options.dependentAttempt }, + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }) + : undefined; + + if ( + dependentAttempt && + (isFinalAttemptStatus(dependentAttempt.status) || + isFinalRunStatus(dependentAttempt.taskRun.status)) + ) { + logger.debug("Dependent attempt or run is in a terminal state", { + dependentAttempt: dependentAttempt, + }); + + if (isFinalAttemptStatus(dependentAttempt.status)) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentAttempt.status}` + ); + } else { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent run has a status of ${dependentAttempt.taskRun.status}` + ); + } + } + + const parentAttempt = body.options?.parentAttempt + ? await this._prisma.taskRunAttempt.findUnique({ + where: { friendlyId: body.options.parentAttempt }, + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }) + : undefined; + + const dependentBatchRun = body.options?.dependentBatch + ? await this._prisma.batchTaskRun.findUnique({ + where: { friendlyId: body.options.dependentBatch }, + include: { + dependentTaskAttempt: { + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }, + }, + }) + : undefined; + + if ( + dependentBatchRun && + dependentBatchRun.dependentTaskAttempt && + (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status) || + isFinalRunStatus(dependentBatchRun.dependentTaskAttempt.taskRun.status)) + ) { + logger.debug("Dependent batch run task attempt or run has been canceled", { + dependentBatchRunId: dependentBatchRun.id, + status: dependentBatchRun.status, + attempt: dependentBatchRun.dependentTaskAttempt, + }); + + if (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status)) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentBatchRun.dependentTaskAttempt.status}` + ); + } else { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent run has a status of ${dependentBatchRun.dependentTaskAttempt.taskRun.status}` + ); + } + } + + const parentBatchRun = body.options?.parentBatch + ? await this._prisma.batchTaskRun.findUnique({ + where: { friendlyId: body.options.parentBatch }, + include: { + dependentTaskAttempt: { + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + }, + }, + }, + }, + }, + }) + : undefined; + + return await eventRepository.traceEvent( + taskId, + { + context: options.traceContext, + spanParentAsLink: options.spanParentAsLink, + parentAsLinkType: options.parentAsLinkType, + kind: "SERVER", + environment, + taskSlug: taskId, + attributes: { + properties: { + [SemanticInternalAttributes.SHOW_ACTIONS]: true, + }, + style: { + icon: options.customIcon ?? "task", + }, + runIsTest: body.options?.test ?? false, + batchId: options.batchId, + idempotencyKey, + }, + incomplete: true, + immediate: true, + }, + async (event, traceContext, traceparent) => { + const run = await autoIncrementCounter.incrementInTransaction( + `v3-run:${environment.id}:${taskId}`, + async (num, tx) => { + const lockedToBackgroundWorker = body.options?.lockToVersion + ? await tx.backgroundWorker.findUnique({ + where: { + projectId_runtimeEnvironmentId_version: { + projectId: environment.projectId, + runtimeEnvironmentId: environment.id, + version: body.options?.lockToVersion, + }, + }, + }) + : undefined; + + let queueName = sanitizeQueueName( + await this.#getQueueName(taskId, environment, body.options?.queue?.name) + ); + + // Check that the queuename is not an empty string + if (!queueName) { + queueName = sanitizeQueueName(`task/${taskId}`); + } + + event.setAttribute("queueName", queueName); + span.setAttribute("queueName", queueName); + + //upsert tags + let tagIds: string[] = []; + const bodyTags = + typeof body.options?.tags === "string" ? [body.options.tags] : body.options?.tags; + if (bodyTags && bodyTags.length > 0) { + for (const tag of bodyTags) { + const tagRecord = await createTag({ + tag, + projectId: environment.projectId, + }); + if (tagRecord) { + tagIds.push(tagRecord.id); + } + } + } + + const depth = dependentAttempt + ? dependentAttempt.taskRun.depth + 1 + : parentAttempt + ? parentAttempt.taskRun.depth + 1 + : dependentBatchRun?.dependentTaskAttempt + ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 + : 0; + + event.setAttribute("runId", runFriendlyId); + span.setAttribute("runId", runFriendlyId); + + const taskRun = await engine.trigger( + { + number: num, + friendlyId: runFriendlyId, + environment: environment, + idempotencyKey, + taskIdentifier: taskId, + payload: payloadPacket.data ?? "", + payloadType: payloadPacket.dataType, + context: body.context, + traceContext: traceContext, + traceId: event.traceId, + spanId: event.spanId, + parentSpanId: + options.parentAsLinkType === "replay" ? undefined : traceparent?.spanId, + lockedToVersionId: lockedToBackgroundWorker?.id, + concurrencyKey: body.options?.concurrencyKey, + queueName, + queue: body.options?.queue, + //todo multiple worker pools support + masterQueue: "main", + isTest: body.options?.test ?? false, + delayUntil, + queuedAt: delayUntil ? undefined : new Date(), + maxAttempts: body.options?.maxAttempts, + ttl, + tags: tagIds, + parentTaskRunId: parentAttempt?.taskRun.id, + parentTaskRunAttemptId: parentAttempt?.id, + rootTaskRunId: parentAttempt?.taskRun.rootTaskRunId ?? parentAttempt?.taskRun.id, + batchId: dependentBatchRun?.id ?? parentBatchRun?.id, + resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), + depth, + metadata: metadataPacket?.data, + metadataType: metadataPacket?.dataType, + seedMetadata: metadataPacket?.data, + seedMetadataType: metadataPacket?.dataType, + }, + this._prisma + ); + + return taskRun; + }, + async (_, tx) => { + const counter = await tx.taskRunNumberCounter.findUnique({ + where: { + taskIdentifier_environmentId: { + taskIdentifier: taskId, + environmentId: environment.id, + }, + }, + select: { lastNumber: true }, + }); + + return counter?.lastNumber; + }, + this._prisma + ); + + return run; + } + ); + }); + } + + async #getQueueName(taskId: string, environment: AuthenticatedEnvironment, queueName?: string) { + if (queueName) { + return queueName; + } + + const defaultQueueName = `task/${taskId}`; + + const worker = await findCurrentWorkerFromEnvironment(environment); + + if (!worker) { + logger.debug("Failed to get queue name: No worker found", { + taskId, + environmentId: environment.id, + }); + + return defaultQueueName; + } + + const task = await this._prisma.backgroundWorkerTask.findUnique({ + where: { + workerId_slug: { + workerId: worker.id, + slug: taskId, + }, + }, + }); + + if (!task) { + console.log("Failed to get queue name: No task found", { + taskId, + environmentId: environment.id, + }); + + return defaultQueueName; + } + + const queueConfig = QueueOptions.optional().nullable().safeParse(task.queueConfig); + + if (!queueConfig.success) { + console.log("Failed to get queue name: Invalid queue config", { + taskId, + environmentId: environment.id, + queueConfig: task.queueConfig, + }); + + return defaultQueueName; + } + + return queueConfig.data?.name ?? defaultQueueName; + } + + async #handlePayloadPacket( + payload: any, + payloadType: string, + pathPrefix: string, + environment: AuthenticatedEnvironment + ) { + return await startActiveSpan("handlePayloadPacket()", async (span) => { + const packet = this.#createPayloadPacket(payload, payloadType); + + if (!packet.data) { + return packet; + } + + const { needsOffloading, size } = packetRequiresOffloading( + packet, + env.TASK_PAYLOAD_OFFLOAD_THRESHOLD + ); + + if (!needsOffloading) { + return packet; + } + + const filename = `${pathPrefix}/payload.json`; + + await uploadToObjectStore(filename, packet.data, packet.dataType, environment); + + return { + data: filename, + dataType: "application/store", + }; + }); + } + + #createPayloadPacket(payload: any, payloadType: string): IOPacket { + if (payloadType === "application/json") { + return { data: JSON.stringify(payload), dataType: "application/json" }; + } + + if (typeof payload === "string") { + return { data: payload, dataType: payloadType }; + } + + return { dataType: payloadType }; + } +} + +export async function parseDelay(value?: string | Date): Promise { + if (!value) { + return; + } + + if (value instanceof Date) { + return value; + } + + try { + const date = new Date(value); + + // Check if the date is valid + if (isNaN(date.getTime())) { + return parseNaturalLanguageDuration(value); + } + + if (date.getTime() <= Date.now()) { + return; + } + + return date; + } catch (error) { + return parseNaturalLanguageDuration(value); + } +} + +export function parseNaturalLanguageDuration(duration: string): Date | undefined { + const regexPattern = /^(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?$/; + + const result: Date = new Date(); + let hasMatch = false; + + const elements = duration.match(regexPattern); + if (elements) { + if (elements[1]) { + const weeks = Number(elements[1].slice(0, -1)); + if (weeks >= 0) { + result.setDate(result.getDate() + 7 * weeks); + hasMatch = true; + } + } + if (elements[2]) { + const days = Number(elements[2].slice(0, -1)); + if (days >= 0) { + result.setDate(result.getDate() + days); + hasMatch = true; + } + } + if (elements[3]) { + const hours = Number(elements[3].slice(0, -1)); + if (hours >= 0) { + result.setHours(result.getHours() + hours); + hasMatch = true; + } + } + if (elements[4]) { + const minutes = Number(elements[4].slice(0, -1)); + if (minutes >= 0) { + result.setMinutes(result.getMinutes() + minutes); + hasMatch = true; + } + } + if (elements[5]) { + const seconds = Number(elements[5].slice(0, -1)); + if (seconds >= 0) { + result.setSeconds(result.getSeconds() + seconds); + hasMatch = true; + } + } + } + + if (hasMatch) { + return result; + } + + return undefined; +} + +function stringifyDuration(seconds: number): string | undefined { + if (seconds <= 0) { + return; + } + + const units = { + w: Math.floor(seconds / 604800), + d: Math.floor((seconds % 604800) / 86400), + h: Math.floor((seconds % 86400) / 3600), + m: Math.floor((seconds % 3600) / 60), + s: Math.floor(seconds % 60), + }; + + // Filter the units having non-zero values and join them + const result: string = Object.entries(units) + .filter(([unit, val]) => val != 0) + .map(([unit, val]) => `${val}${unit}`) + .join(""); + + return result; +} diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index d346d4584c..37bd8b49a3 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -470,6 +470,8 @@ model Project { alertStorages ProjectAlertStorage[] bulkActionGroups BulkActionGroup[] BackgroundWorkerFile BackgroundWorkerFile[] + waitpoints Waitpoint[] + taskRunWaitpoints TaskRunWaitpoint[] } enum ProjectVersion { @@ -1111,6 +1113,8 @@ model TaskAttempt { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + executionSnapshot TaskRunExecutionSnapshot[] + @@unique([taskId, number]) } @@ -1654,6 +1658,8 @@ model TaskRun { number Int @default(0) friendlyId String @unique + engine RunEngineVersion @default(V1) + status TaskRunStatus @default(PENDING) idempotencyKey String? @@ -1675,8 +1681,12 @@ model TaskRun { project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String + // The specific queue this run is in queue String + /// The main queue that this run is part of + masterQueue String @default("main") + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -1708,9 +1718,16 @@ model TaskRun { expiredAt DateTime? maxAttempts Int? + ///When this run is finished, the waitpoint will be marked as completed + associatedWaitpoint Waitpoint? + + ///If there are any blocked waitpoints, the run won't be executed + blockedByWaitpoints TaskRunWaitpoint[] + batchItems BatchTaskRunItem[] dependency TaskRunDependency? CheckpointRestoreEvent CheckpointRestoreEvent[] + executionSnapshot TaskRunExecutionSnapshot[] alerts ProjectAlert[] @@ -1840,6 +1857,131 @@ enum TaskRunStatus { TIMED_OUT } +enum RunEngineVersion { + /// The original version that uses marqs v1 and Graphile + V1 + V2 +} + +/// Used by the RunEngine during TaskRun execution +/// It has the required information to transactionally progress a run through states, +/// and prevent side effects like heartbeats failing a run that has progressed. +/// It is optimised for performance and is designed to be cleared at some point, +/// so there are no cascading relationships to other models. +model TaskRunExecutionSnapshot { + id String @id @default(cuid()) + + /// This should never be V1 + engine RunEngineVersion @default(V2) + + /// The execution status + executionStatus TaskRunExecutionStatus + /// For debugging + description String + + /// Run + runId String + run TaskRun @relation(fields: [runId], references: [id]) + runStatus TaskRunStatus + + /// Attempt + currentAttemptId String? + currentAttempt TaskAttempt? @relation(fields: [currentAttemptId], references: [id]) + currentAttemptStatus TaskAttemptStatus? + + /// todo Checkpoint + + /// These are only ever appended, so we don't need updatedAt + createdAt DateTime @default(now()) + + ///todo machine spec? + + ///todo worker + + /// Used to get the latest state quickly + @@index([runId, createdAt(sort: Desc)]) +} + +enum TaskRunExecutionStatus { + RUN_CREATED + DEQUEUED_FOR_EXECUTION + EXECUTING + BLOCKED_BY_WAITPOINTS + FINISHED +} + +/// A Waitpoint blocks a run from continuing until it's completed +/// If there's a waitpoint blocking a run, it shouldn't be in the queue +model Waitpoint { + id String @id @default(cuid()) + + type WaitpointType + status WaitpointStatus @default(PENDING) + + completedAt DateTime? + + /// If it's an Event type waitpoint, this is the event. It can also be provided for the DATETIME type + idempotencyKey String + userProvidedIdempotencyKey Boolean + + /// If an idempotencyKey is no longer active, we store it here and generate a new one for the idempotencyKey field. + /// This is a workaround because Prisma doesn't support partial indexes. + inactiveIdempotencyKey String? + + /// If it's a RUN type waitpoint, this is the associated run + completedByTaskRunId String? @unique + completedByTaskRun TaskRun? @relation(fields: [completedByTaskRunId], references: [id], onDelete: SetNull) + + /// If it's a DATETIME type waitpoint, this is the date + completedAfter DateTime? + + /// The runs this waitpoint is blocking + blockingTaskRuns TaskRunWaitpoint[] + + /// When completed, an output can be stored here + output String? + outputType String @default("application/json") + + project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) + projectId String + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@unique([projectId, idempotencyKey]) +} + +enum WaitpointType { + RUN + DATETIME + EVENT +} + +enum WaitpointStatus { + PENDING + COMPLETED +} + +model TaskRunWaitpoint { + id String @id @default(cuid()) + + taskRun TaskRun @relation(fields: [taskRunId], references: [id]) + taskRunId String + + waitpoint Waitpoint @relation(fields: [waitpointId], references: [id]) + waitpointId String + + project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) + projectId String + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@unique([taskRunId, waitpointId]) + @@index([taskRunId]) + @@index([waitpointId]) +} + model TaskRunTag { id String @id @default(cuid()) name String @@ -1869,7 +2011,7 @@ model TaskRunDependency { checkpointEvent CheckpointRestoreEvent? @relation(fields: [checkpointEventId], references: [id], onDelete: Cascade, onUpdate: Cascade) checkpointEventId String? @unique - /// An attempt that is dependent on this task run. + /// An attempt that is dependent on this task run. dependentAttempt TaskRunAttempt? @relation(fields: [dependentAttemptId], references: [id]) dependentAttemptId String? diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md new file mode 100644 index 0000000000..2960756f71 --- /dev/null +++ b/internal-packages/run-engine/README.md @@ -0,0 +1,324 @@ +# Trigger.dev Run Engine + +The Run Engine process runs from triggering, to executing, and completing them. + +It is responsible for: + +- Creating and updating runs as they progress. +- Operating the run queue, including handling concurrency. + +## Components + +### Run Engine + +This is used to actually process a run and store the state at each step. It coordinates with the other components. + +#### Atomicity + +Operations on the run are "atomic" in the sense that only a single operation can mutate them at a time. We use RedLock to ensure this. + +#### Valid state transitions + +The run engine ensures that the run can only transition to valid states. + +#### State history + +When a run is mutated in any way, we store the state. This data is used for the next step for the run, and also for debugging. + +`TaskRunState` is a decent table name. We should have a "description" column which describes the change, this would be purely for internal use but would be very useful for debugging. + +### Run Queue + +This is used to queue, dequeue, and manage concurrency. It also provides visibility into the concurrency for the env, org, etc. + +Run IDs are enqueued. They're pulled from the queue in a fair way with advanced options for debouncing and visibility. + +### Heartbeats + +Heartbeats are used to determine if a run has stopped responding. If a heartbeat isn't received within a defined period then the run is judged to have become stuck and the attempt is failed. + +### Checkpoints + +Checkpoints allow pausing an executing run and then resuming it later. + +## Waitpoints + +A "Waitpoint" is something that prevents a run from continuing: + +- `wait.for()` a future time. +- `triggerAndWait()` until the run is finished. +- `batchTriggerAndWait()` until all runs are finished. +- `wait.forRequest()` wait until a request has been received (not implemented yet). + +They block run execution from continuing until all of them are completed/removed. + +Some of them have data associated with them, e.g. the finished run payload. + +Could a run have multiple at once? That might allow us to support Promise.all wrapped. It would also allow more advanced use cases. + +Could this be how we implement other features like `delay`, `rate limit`, and retries waiting before the next try? + +Could we even open up a direct API/SDK for creating one inside a run (would pause execution)? And then completing one (would continue execution)? It could also be "failed" which the run could act upon differently. + +## Notes from call with Eric + +We could expose the API/SDK for creating/completing Waitpoints. + +> They need to be associated with attempts, because that's what gets continued. And if an attempts fails, we don't want to keep the waitpoints. + +> We should have idempotency keys for `wait.for()` and `wait.until()`, so they wouldn't wait on a second attempt. "Waitpoints" have idempotency keys, and these are used for a `wait.forEvent()` (or whatever we call it). + +> How would debounce use this? When the waitpoint is completed, we would "clear" the "idempotencyKey" which would be the user-provided "debounceKey". It wouldn't literally clear it necessarily. Maybe another column `idempotencyKeyActive` would be set to `false`. Or move the key to another column, which is just for reference. + +> `triggerAndWait`, cancelling a child task run. It would clear the waitpoint `idempotencyKey`, same as above. + +> Copying the output from the run into the waitpoint actually does make sense. It simplifies the API for continuing runs. + +> Inside a run you could wait for another run or runs using the run ID. `const output = await wait.forRunToComplete(runId)`. This would basically just get a run by ID, then wait for it's waitpoint to be completed. This means every run would have a waitpoint associated with it. + +```ts +//inside a run function +import { runs } from "@trigger.dev/sdk/v3"; + +// Loop through all runs with the tag "user_123456" that have completed + +for await (const run of runs.list({ tag: "user_123456" })) { + await wait.forRunToComplete(run.id); +} + +//wait for many runs to complete +await wait.forRunToComplete(runId); +await wait.forRunsToComplete({ tag: "user_123456" }); +``` + +Rate limit inside a task. This is much trickier. + +```ts +//simple time-based rate limit +await wait.forRateLimit(`timed-${payload.user.id}`, { per: { minute: 10 } }); + +const openAiResult = await wait.forRateLimit( + `openai-${payload.user.id}`, + { limit: 100, recharge: { seconds: 2 } }, + (rateLimit, refreshes) => { + const result = await openai.createCompletion({ + model: "gpt-3.5-turbo", + prompt: "What is the meaning of life?", + }); + const tokensUsed = result.tokensUsed; + + await rateLimit.used(tokensUsed); + + return result; + } +); + +//do stuff with openAiResult +``` + +#### `triggerAndWait()` implementation + +Inside the SDK + +```ts +function triggerAndWait_internal(data) { + //if you don't pass in a string, it won't have a "key" + const waitpoint = await createWaitpoint(); + const response = await apiClient.triggerTask({ ...data, waitpointId: waitpoint.id }); + + //...do normal stuff + + // wait for the waitpoint to be completed + // in reality this probably needs to happen inside the runtime + const result = await waitpointCompletion(waitpoint.id); +} +``` + +Pseudo-code for completing a run and completing the waitpoint: + +```ts +function completeRun(tx, data) { + //complete the child run + const run = await tx.taskRun.update({ where: { id: runId }, data, include: { waitpoint } }); + if (run.waitpoint) { + await completeWaitpoint(tx, { id: run.waitpoint.id }); + + //todo in completeWaitpoint it would check if the blocked runs can now continue + //if they have no more blockers then they can continue + + //batchTriggerAndWait with two items + //blocked_by: ["w_1", "w_2"] + //blocked_by: ["w_2"] + //blocked_by: [] then you can continue + } + + const state = await tx.taskRunState.create({ + where: { runId: id }, + data: { runId, status: run.status }, + }); + + const previousState = await tx.taskRunState.findFirst({ where: { runId: runId, latest: true } }); + const waitingOn = previousState.waitingOn?.filter((w) => w !== waitpoint?.id) ?? []; + + if (waitingOn.length === 0) { + } +} +``` + +#### `batchTriggerAndWait()` implementation + +```ts +//todo +``` + +### Example: User-defined waitpoint + +A user's backend code: + +```ts +import { waitpoint } from "@trigger.dev/sdk/v3"; +import type { NextApiRequest, NextApiResponse } from "next"; + +export default async function handler(req: NextApiRequest, res: NextApiResponse<{ id: string }>) { + const userId = req.query.userId; + const isPaying = req.query.isPaying; + + //internal SDK calls, this would be nicer for users to use + const waitpoint = waitpoint(`${userId}/onboarding-completed`); + await waitpoint.complete({ data: { isPaying } }); + + //todo instead this would be a single call + + res.status(200).json(handle); +} +``` + +Inside a user's run + +```ts +export const myTask = task({ + id: "my-task", + run: async (payload) => { + //it doesn't matter if this was completed before the run started + const result = await wait.forPoint<{ isPaying: boolean }>( + `${payload.userId}/onboarding-completed` + ); + }, +}); +``` + +### How would we implement `batchTriggerAndWait`? + +```ts + +``` + +## How does it work? + +It's very important that a run can only be acted on by one process at a time. We lock runs using RedLock while they're being mutated. This prevents some network-related race conditions like the timing of checkpoints and heartbeats permanently hanging runs. + +# Legacy system + +These are all the TaskRun mutations happening right now: + +## 1. TriggerTaskService + +This is called from: + +- trigger task API +- `BatchTriggerTaskService` for each item +- `ReplayTaskRunService` +- `TestTaskService` +- `TriggerScheduledTaskService` when the CRON fires + +Directly creates a run if it doesn't exist, either in the `PENDING` or `DELAYED` states. +Enqueues the run. + +[TriggerTaskService.call()](/apps//webapp/app/v3/services/triggerTask.server.ts#246) + +## 2. Batch trigger + +## 3. DevQueueConsumer executing a run + +### a. Lock run and set status to `EXECUTING` + +[DevQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts#371) + +### b. If an error is thrown, unlock the run and set status to `PENDING` + +[DevQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts#477) + +## 4. SharedQueueConsumer executing a run + +### a. `EXECUTE`, lock the run + +We lock the run and update some basic metadata (but not status). +[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#394) + +### b. `EXECUTE`, if an error is thrown, unlock the run + +We unlock the run, but don't change the status. +[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#552) + +### c. `EXECUTE`, if the run has no deployment set the status to `WAITING_FOR_DEPLOY` + +[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#876) + +## 5. CompleteAttemptService retrying a run + +### a. When an attempt has failed, we set the status to `RETRYING_AFTER_FAILURE` + +[CompleteAttemptService.#completeAttemptFailed()](/apps/webapp/app/v3/services/completeAttempt.server.ts#239) + +## 6. CreateTaskRunAttemptService creating a new attempt, setting the run to `EXECUTING` + +We call this when: + +- [Executing a DEV run from the CLI.](/packages/cli-v3//src/dev/workerRuntime.ts#305) +- [Deprecated: directly from the SharedQueueCOnsumer when we don't support lazy attempts](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#501) +- [When we receive a `CREATE_TASK_RUN_ATTEMPT` message from the coordinator](/apps/webapp//app/v3//handleSocketIo.server.ts#187) + +This is the actual very simple TaskRun update: +[CreateTaskRunAttemptService.call()](/apps/webapp/app/v3/services/createTaskRunAttempt.server.ts#134) + +## 7. EnqueueDelayedRunService set a run to `PENDING` when the `delay` has elapsed + +When the run attempt gets created it will be marked as `EXECUTING`. + +[EnqueueDelayedRunService.#call()](/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts#41) + +## 8. FinalizeTaskRunService finalizing a run + +This service is called from many places, when a run is in a "final" state. This means the run can't be acted on anymore. + +We set the status, expiredAt and completedAt fields. + +[FinalizeTaskRunService.#call()](/apps/webapp/app/v3/services/finalizeTaskRun.server.ts#63) + +This function is called from: + +- [`FailedTaskRunService` when a run has SYSTEM_FAILURE](/apps/webapp/app/v3/failedTaskRun.server.ts#41) +- [`CancelAttemptService` when an attempt is canceled](/apps/webapp/app/v3/services/cancelAttempt.server.ts#66) +- [`CancelTaskRunService` when a run is canceled](/apps/webapp/app/v3/services/cancelTaskRun.server.ts#51) +- `CompleteAttemptService` when a SYSTEM_FAILURE happens + - [No attempt](/apps/webapp/app/v3/services/completeAttempt.server.ts#74) + - [`completeAttemptFailed` and there's no checkpoint](/apps/webapp/app/v3/services/completeAttempt.server.ts#280) + - [`completeAttemptFailed` and the error is internal and a graceful exit timeout](/apps/webapp/app/v3/services/completeAttempt.server.ts#321) +- `CompleteTaskRunService` when a run has failed (this isn't a bug) + - [`completeAttemptFailed`](/apps/webapp/app/v3/services/completeAttempt.server.ts#352) +- `CompleteTaskRunService` when a run is completed successfully + - [`completeAttemptSuccessfully`](/apps/webapp/app/v3/services/completeAttempt.server.ts#135) +- `CrashTaskRunService` when a run has crashed + - [`call`](/apps/webapp/app/v3/services/crashTaskRun.server.ts#47) +- `ExpireEnqueuedRunService` when a run has expired + - [`call`](/apps/webapp/app/v3/services/expireEnqueuedRun.server.ts#42) + +## 9. RescheduleTaskRunService (when further delaying a delayed run) + +[RescheduleTaskRunService.#call()](/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts#21) + +## 10. Triggering a scheduled run + +Graphile Worker calls this function based on the schedule. We add the schedule data onto the run, and call `TriggerTaskService.call()`. + +[TriggerScheduledRunService.#call()](/apps/webapp/app/v3/services/triggerScheduledTask.server.ts#131) diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json new file mode 100644 index 0000000000..a2cf01cbdc --- /dev/null +++ b/internal-packages/run-engine/package.json @@ -0,0 +1,27 @@ +{ + "name": "@internal/run-engine", + "private": true, + "version": "0.0.1", + "main": "./src/index.ts", + "types": "./src/index.ts", + "dependencies": { + "@internal/zod-worker": "workspace:*", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@trigger.dev/core": "workspace:*", + "@trigger.dev/database": "workspace:*", + "ioredis": "^5.3.2", + "nanoid": "^3.3.4", + "redlock": "5.0.0-beta.2", + "typescript": "^5.5.4", + "zod": "3.22.3" + }, + "devDependencies": { + "@internal/testcontainers": "workspace:*", + "vitest": "^1.4.0" + }, + "scripts": { + "typecheck": "tsc --noEmit", + "test": "vitest" + } +} diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts new file mode 100644 index 0000000000..64bfe6c06b --- /dev/null +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -0,0 +1,150 @@ +import { expect } from "vitest"; +import { containerTest } from "@internal/testcontainers"; +import { RunEngine } from "./index.js"; +import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; + +describe("RunEngine", () => { + containerTest( + "Trigger a simple run", + { timeout: 15_000 }, + async ({ postgresContainer, prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + zodWorker: { + connectionString: postgresContainer.getConnectionUri(), + shutdownTimeoutInMs: 100, + }, + }); + + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier: "test-task", + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", + }, + }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); + + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength).toBe(1); + + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); + + //dequeue the run + const dequeued = await engine.runQueue.dequeueMessageInSharedQueue( + "test_12345", + run.masterQueue + ); + expect(dequeued?.messageId).toBe(run.id); + + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); + } + ); + + //todo triggerAndWait + + //todo batchTriggerAndWait + + //todo checkpoints + + //todo heartbeats + + //todo failing a run + + //todo cancelling a run + + //todo expiring a run + + //todo delaying a run +}); + +async function setupAuthenticatedEnvironment(prisma: PrismaClient, type: RuntimeEnvironmentType) { + // Your database setup logic here + const org = await prisma.organization.create({ + data: { + title: "Test Organization", + slug: "test-organization", + }, + }); + + const project = await prisma.project.create({ + data: { + name: "Test Project", + slug: "test-project", + externalRef: "proj_1234", + organizationId: org.id, + }, + }); + + const environment = await prisma.runtimeEnvironment.create({ + data: { + type, + slug: "slug", + projectId: project.id, + organizationId: org.id, + apiKey: "api_key", + pkApiKey: "pk_api_key", + shortcode: "short_code", + maximumConcurrencyLimit: 10, + }, + }); + + return await prisma.runtimeEnvironment.findUniqueOrThrow({ + where: { + id: environment.id, + }, + include: { + project: true, + organization: true, + orgMember: true, + }, + }); +} diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts new file mode 100644 index 0000000000..f74d11ee91 --- /dev/null +++ b/internal-packages/run-engine/src/engine/index.ts @@ -0,0 +1,541 @@ +import { RunnerOptions, ZodWorker } from "@internal/zod-worker"; +import { trace } from "@opentelemetry/api"; +import { Logger } from "@trigger.dev/core/logger"; +import { QueueOptions } from "@trigger.dev/core/v3"; +import { generateFriendlyId, parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; +import { + $transaction, + Prisma, + PrismaClient, + PrismaClientOrTransaction, + TaskRun, + Waitpoint, +} from "@trigger.dev/database"; +import { Redis, type RedisOptions } from "ioredis"; +import Redlock from "redlock"; +import { z } from "zod"; +import { RunQueue } from "../run-queue"; +import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; +import { MinimalAuthenticatedEnvironment } from "../shared"; + +import { nanoid } from "nanoid"; + +type Options = { + redis: RedisOptions; + prisma: PrismaClient; + zodWorker: RunnerOptions & { + shutdownTimeoutInMs: number; + }; +}; + +type TriggerParams = { + friendlyId: string; + number: number; + environment: MinimalAuthenticatedEnvironment; + idempotencyKey?: string; + taskIdentifier: string; + payload: string; + payloadType: string; + context: any; + traceContext: Record; + traceId: string; + spanId: string; + parentSpanId?: string; + lockedToVersionId?: string; + concurrencyKey?: string; + masterQueue: string; + queueName: string; + queue?: QueueOptions; + isTest: boolean; + delayUntil?: Date; + queuedAt?: Date; + maxAttempts?: number; + ttl?: string; + tags: string[]; + parentTaskRunId?: string; + parentTaskRunAttemptId?: string; + rootTaskRunId?: string; + batchId?: string; + resumeParentOnCompletion?: boolean; + depth?: number; + metadata?: string; + metadataType?: string; + seedMetadata?: string; + seedMetadataType?: string; +}; + +const schema = { + "runengine.waitpointCompleteDateTime": z.object({ + waitpointId: z.string(), + }), + "runengine.expireRun": z.object({ + runId: z.string(), + }), +}; + +type EngineWorker = ZodWorker; + +export class RunEngine { + private redis: Redis; + private prisma: PrismaClient; + private redlock: Redlock; + runQueue: RunQueue; + private zodWorker: EngineWorker; + private logger = new Logger("RunEngine", "debug"); + + constructor(private readonly options: Options) { + this.prisma = options.prisma; + this.redis = new Redis(options.redis); + this.redlock = new Redlock([this.redis], { + driftFactor: 0.01, + retryCount: 10, + retryDelay: 200, // time in ms + retryJitter: 200, // time in ms + automaticExtensionThreshold: 500, // time in ms + }); + + this.runQueue = new RunQueue({ + name: "rq", + tracer: trace.getTracer("rq"), + queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), + envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), + workers: 1, + defaultEnvConcurrency: 10, + enableRebalancing: false, + logger: new Logger("RunQueue", "warn"), + redis: options.redis, + }); + + this.zodWorker = new ZodWorker({ + name: "runQueueWorker", + prisma: options.prisma, + replica: options.prisma, + logger: new Logger("RunQueueWorker", "debug"), + runnerOptions: options.zodWorker, + shutdownTimeoutInMs: options.zodWorker.shutdownTimeoutInMs, + schema, + tasks: { + "runengine.waitpointCompleteDateTime": { + priority: 0, + maxAttempts: 10, + handler: async (payload, job) => { + await this.#completeWaitpoint(payload.waitpointId); + }, + }, + "runengine.expireRun": { + priority: 0, + maxAttempts: 10, + handler: async (payload, job) => { + await this.expireRun(payload.runId); + }, + }, + }, + }); + } + + //MARK: - Run functions + + /** "Triggers" one run. + */ + async trigger( + { + friendlyId, + number, + environment, + idempotencyKey, + taskIdentifier, + payload, + payloadType, + context, + traceContext, + traceId, + spanId, + parentSpanId, + lockedToVersionId, + concurrencyKey, + masterQueue, + queueName, + queue, + isTest, + delayUntil, + queuedAt, + maxAttempts, + ttl, + tags, + parentTaskRunId, + parentTaskRunAttemptId, + rootTaskRunId, + batchId, + resumeParentOnCompletion, + depth, + metadata, + metadataType, + seedMetadata, + seedMetadataType, + }: TriggerParams, + tx?: PrismaClientOrTransaction + ) { + const prisma = tx ?? this.prisma; + + const status = delayUntil ? "DELAYED" : "PENDING"; + + //create run + const taskRun = await prisma.taskRun.create({ + data: { + status, + number, + friendlyId, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + idempotencyKey, + taskIdentifier, + payload, + payloadType, + context, + traceContext, + traceId, + spanId, + parentSpanId, + lockedToVersionId, + concurrencyKey, + queue: queueName, + masterQueue, + isTest, + delayUntil, + queuedAt, + maxAttempts, + ttl, + tags: + tags.length === 0 + ? undefined + : { + connect: tags.map((id) => ({ id })), + }, + parentTaskRunId, + parentTaskRunAttemptId, + rootTaskRunId, + batchId, + resumeParentOnCompletion, + depth, + metadata, + metadataType, + seedMetadata, + seedMetadataType, + executionSnapshot: { + create: { + engine: "V2", + executionStatus: "RUN_CREATED", + description: "Run was created", + runStatus: status, + }, + }, + }, + }); + + await this.redlock.using([taskRun.id], 5000, async (signal) => { + //todo add this in some places throughout this code + if (signal.aborted) { + throw signal.error; + } + + //create associated waitpoint (this completes when the run completes) + const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { + projectId: environment.project.id, + completedByTaskRunId: taskRun.id, + }); + + //triggerAndWait or batchTriggerAndWait + if (resumeParentOnCompletion && parentTaskRunId) { + //this will block the parent run from continuing until this waitpoint is completed (and removed) + await this.#blockRunWithWaitpoint(prisma, { + orgId: environment.organization.id, + runId: parentTaskRunId, + waitpoint: associatedWaitpoint, + }); + } + + if (queue) { + const concurrencyLimit = + typeof queue.concurrencyLimit === "number" + ? Math.max(0, queue.concurrencyLimit) + : undefined; + + let taskQueue = await prisma.taskQueue.findFirst({ + where: { + runtimeEnvironmentId: environment.id, + name: queueName, + }, + }); + + if (taskQueue) { + taskQueue = await prisma.taskQueue.update({ + where: { + id: taskQueue.id, + }, + data: { + concurrencyLimit, + rateLimit: queue.rateLimit, + }, + }); + } else { + taskQueue = await prisma.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + rateLimit: queue.rateLimit, + type: "NAMED", + }, + }); + } + + if (typeof taskQueue.concurrencyLimit === "number") { + await this.runQueue.updateQueueConcurrencyLimits( + environment, + taskQueue.name, + taskQueue.concurrencyLimit + ); + } else { + await this.runQueue.removeQueueConcurrencyLimits(environment, taskQueue.name); + } + } + + if (taskRun.delayUntil) { + const delayWaitpoint = await this.#createDateTimeWaitpoint(prisma, { + projectId: environment.project.id, + completedAfter: taskRun.delayUntil, + }); + + await this.#blockRunWithWaitpoint(prisma, { + orgId: environment.organization.id, + runId: taskRun.id, + waitpoint: delayWaitpoint, + }); + } + + if (!taskRun.delayUntil && taskRun.ttl) { + const expireAt = parseNaturalLanguageDuration(taskRun.ttl); + + if (expireAt) { + await this.zodWorker.enqueue( + "runengine.expireRun", + { runId: taskRun.id }, + { tx, runAt: expireAt, jobKey: `runengine.expireRun.${taskRun.id}` } + ); + } + } + + await this.enqueueRun(taskRun, environment, prisma); + }); + + //todo release parent concurrency (for the project, task, and environment, but not for the queue?) + //todo if this has been triggered with triggerAndWait or batchTriggerAndWait + + return taskRun; + } + + /** Triggers multiple runs. + * This doesn't start execution, but it will create a batch and schedule them for execution. + */ + async batchTrigger() {} + + /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ + async enqueueRun( + run: TaskRun, + env: MinimalAuthenticatedEnvironment, + tx?: PrismaClientOrTransaction + ) { + await this.runQueue.enqueueMessage({ + env, + masterQueue: run.masterQueue, + message: { + runId: run.id, + taskIdentifier: run.taskIdentifier, + orgId: env.organization.id, + projectId: env.project.id, + environmentId: env.id, + environmentType: env.type, + queue: run.queue, + concurrencyKey: run.concurrencyKey ?? undefined, + timestamp: Date.now(), + }, + }); + + //todo update the TaskRunExecutionSnapshot + } + + async dequeueRun(consumerId: string, masterQueue: string) { + const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); + //todo update the TaskRunExecutionSnapshot + //todo update the TaskRun status? + return message; + } + + /** We want to actually execute the run, this could be a continuation of a previous execution. + * This is called from the queue, when the run has been pulled. */ + //todo think more about this, when do we create the attempt? + //todo what does this actually do? + //todo how does it get sent to the worker? DEV and PROD + async prepareForExecution(runId: string) {} + + async prepareForAttempt(runId: string) {} + + async complete(runId: string, completion: any) {} + + async expireRun(runId: string) {} + + //MARK: - Waitpoints + async #createRunAssociatedWaitpoint( + tx: PrismaClientOrTransaction, + { projectId, completedByTaskRunId }: { projectId: string; completedByTaskRunId: string } + ) { + return tx.waitpoint.create({ + data: { + type: "RUN", + status: "PENDING", + idempotencyKey: nanoid(24), + userProvidedIdempotencyKey: false, + projectId, + completedByTaskRunId, + }, + }); + } + + async #createDateTimeWaitpoint( + tx: PrismaClientOrTransaction, + { projectId, completedAfter }: { projectId: string; completedAfter: Date } + ) { + const waitpoint = await tx.waitpoint.create({ + data: { + type: "DATETIME", + status: "PENDING", + idempotencyKey: nanoid(24), + userProvidedIdempotencyKey: false, + projectId, + completedAfter, + }, + }); + + await this.zodWorker.enqueue( + "runengine.waitpointCompleteDateTime", + { waitpointId: waitpoint.id }, + { tx, runAt: completedAfter, jobKey: `waitpointCompleteDateTime.${waitpoint.id}` } + ); + + return waitpoint; + } + + async #blockRunWithWaitpoint( + tx: PrismaClientOrTransaction, + { orgId, runId, waitpoint }: { orgId: string; runId: string; waitpoint: Waitpoint } + ) { + //todo it would be better if we didn't remove from the queue, because this removes the payload + //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload + //todo + // await this.runQueue.acknowledgeMessage(orgId, runId); + + //todo release concurrency and make sure the run isn't in the queue + // await this.runQueue.blockMessage(orgId, runId); + + return tx.taskRunWaitpoint.create({ + data: { + taskRunId: runId, + waitpointId: waitpoint.id, + projectId: waitpoint.projectId, + }, + }); + } + + /** This completes a waitpoint and then continues any runs blocked by the waitpoint, + * if they're no longer blocked. This doesn't suffer from race conditions. */ + async #completeWaitpoint(id: string) { + const waitpoint = await this.prisma.waitpoint.findUnique({ + where: { id }, + }); + + if (!waitpoint) { + throw new Error(`Waitpoint ${id} not found`); + } + + if (waitpoint.status === "COMPLETED") { + return; + } + + await $transaction( + this.prisma, + async (tx) => { + // 1. Find the TaskRuns associated with this waitpoint + const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ + where: { waitpointId: id }, + select: { taskRunId: true }, + }); + + if (affectedTaskRuns.length === 0) { + throw new Error(`No TaskRunWaitpoints found for waitpoint ${id}`); + } + + // 2. Delete the TaskRunWaitpoint entries for this specific waitpoint + await tx.taskRunWaitpoint.deleteMany({ + where: { waitpointId: id }, + }); + + // 3. Update the waitpoint status + await tx.waitpoint.update({ + where: { id }, + data: { status: "COMPLETED" }, + }); + + // 4. Check which of the affected TaskRuns now have no waitpoints + const taskRunsToResume = await tx.taskRun.findMany({ + where: { + id: { in: affectedTaskRuns.map((run) => run.taskRunId) }, + blockedByWaitpoints: { none: {} }, + status: { in: ["PENDING", "WAITING_TO_RESUME"] }, + }, + include: { + runtimeEnvironment: { + select: { + id: true, + type: true, + maximumConcurrencyLimit: true, + project: { select: { id: true } }, + organization: { select: { id: true } }, + }, + }, + }, + }); + + // 5. Continue the runs that have no more waitpoints + for (const run of taskRunsToResume) { + await this.enqueueRun(run, run.runtimeEnvironment, tx); + } + }, + (error) => { + this.logger.error(`Error completing waitpoint ${id}, retrying`, { error }); + throw error; + }, + { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } + ); + } +} + +/* +Starting execution flow: + +1. Run id is pulled from a queue +2. Prepare the run for an attempt (returns data to send to the worker) + a. The run is marked as "waiting to start"? + b. Create a TaskRunState with the run id, and the state "waiting to start". + c. Start a heartbeat with the TaskRunState id, in case it never starts. +3. The run is sent to the worker +4. When the worker has received the run, it ask the platform for an attempt +5. The attempt is created + a. The attempt is created + b. The TaskRunState is updated to "EXECUTING" + c. Start a heartbeat with the TaskRunState id. + c. The TaskRun is updated to "EXECUTING" +6. A response is sent back to the worker with the attempt data +7. The code executes... +*/ diff --git a/internal-packages/run-engine/src/index.ts b/internal-packages/run-engine/src/index.ts new file mode 100644 index 0000000000..b71175be2a --- /dev/null +++ b/internal-packages/run-engine/src/index.ts @@ -0,0 +1 @@ +export { RunEngine } from "./engine/index"; diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts new file mode 100644 index 0000000000..8a3f40ada9 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -0,0 +1,453 @@ +import { trace } from "@opentelemetry/api"; +import { Logger } from "@trigger.dev/core/logger"; +import { describe } from "node:test"; +import { redisTest } from "@internal/testcontainers"; +import { RunQueue } from "./index.js"; +import { RunQueueShortKeyProducer } from "./keyProducer.js"; +import { SimpleWeightedChoiceStrategy } from "./simpleWeightedPriorityStrategy.js"; +import { InputPayload } from "./types.js"; +import { abort } from "node:process"; + +const testOptions = { + name: "rq", + tracer: trace.getTracer("rq"), + queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), + envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), + workers: 1, + defaultEnvConcurrency: 10, + enableRebalancing: false, + logger: new Logger("RunQueue", "warn"), +}; + +const authenticatedEnvProd = { + id: "e1234", + type: "PRODUCTION" as const, + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, +}; + +const authenticatedEnvDev = { + id: "e1234", + type: "DEVELOPMENT" as const, + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, +}; + +const messageProd: InputPayload = { + runId: "r1234", + taskIdentifier: "task/my-task", + orgId: "o1234", + projectId: "p1234", + environmentId: "e1234", + environmentType: "PRODUCTION", + queue: "task/my-task", + timestamp: Date.now(), +}; + +const messageDev: InputPayload = { + runId: "r4321", + taskIdentifier: "task/my-task", + orgId: "o1234", + projectId: "p1234", + environmentId: "e4321", + environmentType: "DEVELOPMENT", + queue: "task/my-task", + timestamp: Date.now(), +}; + +describe("RunQueue", () => { + redisTest( + "Get/set Queue concurrency limit", + { timeout: 5_000 }, + async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + //initial value + const initial = await queue.getQueueConcurrencyLimit(authenticatedEnvProd, "task/my-task"); + expect(initial).toBe(undefined); + + //set 20 + const result = await queue.updateQueueConcurrencyLimits( + authenticatedEnvProd, + "task/my-task", + 20 + ); + expect(result).toBe("OK"); + + //get 20 + const updated = await queue.getQueueConcurrencyLimit(authenticatedEnvProd, "task/my-task"); + expect(updated).toBe(20); + + //remove + const result2 = await queue.removeQueueConcurrencyLimits( + authenticatedEnvProd, + "task/my-task" + ); + expect(result2).toBe(1); + + //get undefined + const removed = await queue.getQueueConcurrencyLimit(authenticatedEnvProd, "task/my-task"); + expect(removed).toBe(undefined); + } finally { + await queue.quit(); + } + } + ); + + redisTest( + "Update env concurrency limits", + { timeout: 5_000 }, + async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + //initial value + const initial = await queue.getEnvConcurrencyLimit(authenticatedEnvProd); + expect(initial).toBe(10); + + //set 20 + await queue.updateEnvConcurrencyLimits({ + ...authenticatedEnvProd, + maximumConcurrencyLimit: 20, + }); + + //get 20 + const updated = await queue.getEnvConcurrencyLimit(authenticatedEnvProd); + expect(updated).toBe(20); + } finally { + await queue.quit(); + } + } + ); + + redisTest( + "Enqueue/Dequeue a message in env (DEV run, no concurrency key)", + { timeout: 5_000 }, + async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + //initial queue length + const result = await queue.lengthOfQueue(authenticatedEnvDev, messageDev.queue); + expect(result).toBe(0); + + //initial oldest message + const oldestScore = await queue.oldestMessageInQueue(authenticatedEnvDev, messageDev.queue); + expect(oldestScore).toBe(undefined); + + const envMasterQueue = `env:${authenticatedEnvDev.id}`; + + //enqueue message + await queue.enqueueMessage({ + env: authenticatedEnvDev, + message: messageDev, + masterQueue: `env:${authenticatedEnvDev.id}`, + }); + + //queue length + const result2 = await queue.lengthOfQueue(authenticatedEnvDev, messageDev.queue); + expect(result2).toBe(1); + + //oldest message + const oldestScore2 = await queue.oldestMessageInQueue( + authenticatedEnvDev, + messageDev.queue + ); + expect(oldestScore2).toBe(messageDev.timestamp); + + //concurrencies + const queueConcurrency = await queue.currentConcurrencyOfQueue( + authenticatedEnvDev, + messageDev.queue + ); + expect(queueConcurrency).toBe(0); + const envConcurrency = await queue.currentConcurrencyOfEnvironment(authenticatedEnvDev); + expect(envConcurrency).toBe(0); + const projectConcurrency = await queue.currentConcurrencyOfProject(authenticatedEnvDev); + expect(projectConcurrency).toBe(0); + const taskConcurrency = await queue.currentConcurrencyOfTask( + authenticatedEnvDev, + messageDev.taskIdentifier + ); + expect(taskConcurrency).toBe(0); + + const dequeued = await queue.dequeueMessageInSharedQueue("test_12345", envMasterQueue); + expect(dequeued?.messageId).toEqual(messageDev.runId); + expect(dequeued?.message.orgId).toEqual(messageDev.orgId); + expect(dequeued?.message.version).toEqual("1"); + expect(dequeued?.message.masterQueue).toEqual(envMasterQueue); + + //concurrencies + const queueConcurrency2 = await queue.currentConcurrencyOfQueue( + authenticatedEnvDev, + messageDev.queue + ); + expect(queueConcurrency2).toBe(1); + const envConcurrency2 = await queue.currentConcurrencyOfEnvironment(authenticatedEnvDev); + expect(envConcurrency2).toBe(1); + const projectConcurrency2 = await queue.currentConcurrencyOfProject(authenticatedEnvDev); + expect(projectConcurrency2).toBe(1); + const taskConcurrency2 = await queue.currentConcurrencyOfTask( + authenticatedEnvDev, + messageDev.taskIdentifier + ); + expect(taskConcurrency2).toBe(1); + + const dequeued2 = await queue.dequeueMessageInSharedQueue("test_12345", envMasterQueue); + expect(dequeued2).toBe(undefined); + } finally { + await queue.quit(); + } + } + ); + + redisTest( + "Enqueue/Dequeue a message from the shared queue (PROD run, no concurrency key)", + { timeout: 5_000 }, + async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + //initial queue length + const result = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(result).toBe(0); + + //initial oldest message + const oldestScore = await queue.oldestMessageInQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(oldestScore).toBe(undefined); + + //enqueue message + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueue: "main", + }); + + //queue length + const result2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(result2).toBe(1); + + //oldest message + const oldestScore2 = await queue.oldestMessageInQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(oldestScore2).toBe(messageProd.timestamp); + + //concurrencies + const queueConcurrency = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency).toBe(0); + const envConcurrency = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency).toBe(0); + const projectConcurrency = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency).toBe(0); + const taskConcurrency = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency).toBe(0); + + //dequeue + const dequeued = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(dequeued?.messageId).toEqual(messageProd.runId); + expect(dequeued?.message.orgId).toEqual(messageProd.orgId); + expect(dequeued?.message.version).toEqual("1"); + expect(dequeued?.message.masterQueue).toEqual("main"); + + //concurrencies + const queueConcurrency2 = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency2).toBe(1); + const envConcurrency2 = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency2).toBe(1); + const projectConcurrency2 = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency2).toBe(1); + const taskConcurrency2 = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency2).toBe(1); + + //queue length + const length2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(length2).toBe(0); + + const dequeued2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(dequeued2).toBe(undefined); + } finally { + await queue.quit(); + } + } + ); + + redisTest("Get shared queue details", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + const result = await queue.getSharedQueueDetails("main"); + expect(result.selectionId).toBe("getSharedQueueDetails"); + expect(result.queueCount).toBe(0); + expect(result.queueChoice.choice).toStrictEqual({ abort: true }); + + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueue: "main", + }); + + const result2 = await queue.getSharedQueueDetails("main"); + expect(result2.selectionId).toBe("getSharedQueueDetails"); + expect(result2.queueCount).toBe(1); + expect(result2.queues[0].score).toBe(messageProd.timestamp); + expect(result2.queueChoice.choice).toBe( + "{org:o1234}:proj:p1234:env:e1234:queue:task/my-task" + ); + } finally { + await queue.quit(); + } + }); + + redisTest("Acking", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueue: "main", + }); + + const message = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(message).toBeDefined(); + + //check the message is gone + const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const exists = await redis.exists(key); + expect(exists).toBe(1); + + await queue.acknowledgeMessage(message!.message.orgId, message!.messageId); + + //concurrencies + const queueConcurrency = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency).toBe(0); + const envConcurrency = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency).toBe(0); + const projectConcurrency = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency).toBe(0); + const taskConcurrency = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency).toBe(0); + + //check the message is gone + const exists2 = await redis.exists(key); + expect(exists2).toBe(0); + + //dequeue + const message2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(message2).toBeUndefined(); + } finally { + await queue.quit(); + } + }); + + redisTest("Nacking", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueue: "main", + }); + + const message = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(message).toBeDefined(); + + //check the message is there + const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const exists = await redis.exists(key); + expect(exists).toBe(1); + + //concurrencies + const queueConcurrency = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency).toBe(1); + const envConcurrency = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency).toBe(1); + const projectConcurrency = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency).toBe(1); + const taskConcurrency = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency).toBe(1); + + await queue.nackMessage(message!.message.orgId, message!.messageId); + + //concurrencies + const queueConcurrency2 = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency2).toBe(0); + const envConcurrency2 = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency2).toBe(0); + const projectConcurrency2 = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency2).toBe(0); + const taskConcurrency2 = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency2).toBe(0); + + //check the message is there + const exists2 = await redis.exists(key); + expect(exists2).toBe(1); + + //dequeue + const message2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + expect(message2?.messageId).toBe(messageProd.runId); + } finally { + await queue.quit(); + } + }); +}); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts new file mode 100644 index 0000000000..cb07054051 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -0,0 +1,1384 @@ +import { context, propagation, Span, SpanKind, SpanOptions, Tracer } from "@opentelemetry/api"; +import { + SEMATTRS_MESSAGE_ID, + SEMATTRS_MESSAGING_OPERATION, + SEMATTRS_MESSAGING_SYSTEM, +} from "@opentelemetry/semantic-conventions"; +import { Logger } from "@trigger.dev/core/logger"; +import { flattenAttributes } from "@trigger.dev/core/v3"; +import { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; +import { AsyncWorker } from "../shared/asyncWorker.js"; +import { + attributesFromAuthenticatedEnv, + MinimalAuthenticatedEnvironment, +} from "../shared/index.js"; +import { + InputPayload, + OutputPayload, + QueueCapacities, + QueueRange, + RunQueueKeyProducer, + RunQueuePriorityStrategy, +} from "./types.js"; +import { RunQueueShortKeyProducer } from "./keyProducer.js"; + +const SemanticAttributes = { + QUEUE: "runqueue.queue", + MASTER_QUEUE: "runqueue.masterQueue", + RUN_ID: "runqueue.runId", + CONCURRENCY_KEY: "runqueue.concurrencyKey", + ORG_ID: "runqueue.orgId", +}; + +export type RunQueueOptions = { + name: string; + tracer: Tracer; + redis: RedisOptions; + defaultEnvConcurrency: number; + windowSize?: number; + workers: number; + queuePriorityStrategy: RunQueuePriorityStrategy; + envQueuePriorityStrategy: RunQueuePriorityStrategy; + enableRebalancing?: boolean; + verbose?: boolean; + logger: Logger; +}; + +/** + * RunQueue – the queue that's used to process runs + */ +export class RunQueue { + private logger: Logger; + private redis: Redis; + public keys: RunQueueKeyProducer; + private queuePriorityStrategy: RunQueuePriorityStrategy; + #rebalanceWorkers: Array = []; + + constructor(private readonly options: RunQueueOptions) { + this.redis = new Redis(options.redis); + this.logger = options.logger; + + this.keys = new RunQueueShortKeyProducer("rq:"); + this.queuePriorityStrategy = options.queuePriorityStrategy; + + this.#registerCommands(); + } + + get name() { + return this.options.name; + } + + get tracer() { + return this.options.tracer; + } + + public async updateQueueConcurrencyLimits( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrency: number + ) { + return this.redis.set(this.keys.queueConcurrencyLimitKey(env, queue), concurrency); + } + + public async removeQueueConcurrencyLimits(env: MinimalAuthenticatedEnvironment, queue: string) { + return this.redis.del(this.keys.queueConcurrencyLimitKey(env, queue)); + } + + public async getQueueConcurrencyLimit(env: MinimalAuthenticatedEnvironment, queue: string) { + const result = await this.redis.get(this.keys.queueConcurrencyLimitKey(env, queue)); + + return result ? Number(result) : undefined; + } + + public async updateEnvConcurrencyLimits(env: MinimalAuthenticatedEnvironment) { + await this.#callUpdateGlobalConcurrencyLimits({ + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKey(env), + envConcurrencyLimit: env.maximumConcurrencyLimit, + }); + } + + public async getEnvConcurrencyLimit(env: MinimalAuthenticatedEnvironment) { + const result = await this.redis.get(this.keys.envConcurrencyLimitKey(env)); + + return result ? Number(result) : this.options.defaultEnvConcurrency; + } + + public async lengthOfQueue( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrencyKey?: string + ) { + return this.redis.zcard(this.keys.queueKey(env, queue, concurrencyKey)); + } + + public async oldestMessageInQueue( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrencyKey?: string + ) { + // Get the "score" of the sorted set to get the oldest message score + const result = await this.redis.zrange( + this.keys.queueKey(env, queue, concurrencyKey), + 0, + 0, + "WITHSCORES" + ); + + if (result.length === 0) { + return; + } + + return Number(result[1]); + } + + public async currentConcurrencyOfQueue( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrencyKey?: string + ) { + return this.redis.scard(this.keys.currentConcurrencyKey(env, queue, concurrencyKey)); + } + + public async currentConcurrencyOfEnvironment(env: MinimalAuthenticatedEnvironment) { + return this.redis.scard(this.keys.envCurrentConcurrencyKey(env)); + } + + public async currentConcurrencyOfProject(env: MinimalAuthenticatedEnvironment) { + return this.redis.scard(this.keys.projectCurrentConcurrencyKey(env)); + } + + public async currentConcurrencyOfTask( + env: MinimalAuthenticatedEnvironment, + taskIdentifier: string + ) { + return this.redis.scard(this.keys.taskIdentifierCurrentConcurrencyKey(env, taskIdentifier)); + } + + public async enqueueMessage({ + env, + message, + masterQueue, + }: { + env: MinimalAuthenticatedEnvironment; + message: InputPayload; + masterQueue: string; + }) { + return await this.#trace( + "enqueueMessage", + async (span) => { + const { runId, concurrencyKey } = message; + + const queue = this.keys.queueKey(env, message.queue, concurrencyKey); + + propagation.inject(context.active(), message); + + span.setAttributes({ + [SemanticAttributes.QUEUE]: queue, + [SemanticAttributes.RUN_ID]: runId, + [SemanticAttributes.CONCURRENCY_KEY]: concurrencyKey, + [SemanticAttributes.MASTER_QUEUE]: masterQueue, + }); + + const messagePayload: OutputPayload = { + ...message, + version: "1", + queue, + masterQueue, + }; + + await this.#callEnqueueMessage(messagePayload, masterQueue); + }, + { + kind: SpanKind.PRODUCER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "publish", + [SEMATTRS_MESSAGE_ID]: message.runId, + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + ...attributesFromAuthenticatedEnv(env), + }, + } + ); + } + + public async getSharedQueueDetails(masterQueue: string) { + const { range } = await this.queuePriorityStrategy.nextCandidateSelection( + masterQueue, + "getSharedQueueDetails" + ); + const queues = await this.#getChildQueuesWithScores(masterQueue, range); + + const queuesWithScores = await this.#calculateQueueScores(queues, (queue) => + this.#calculateMessageQueueCapacities(queue) + ); + + // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue + const choice = this.queuePriorityStrategy.chooseQueue( + queuesWithScores, + masterQueue, + "getSharedQueueDetails", + range + ); + + return { + selectionId: "getSharedQueueDetails", + queues, + queuesWithScores, + nextRange: range, + queueCount: queues.length, + queueChoice: choice, + }; + } + + /** + * Dequeue a message from the shared queue (this should be used in production environments) + */ + public async dequeueMessageInSharedQueue(consumerId: string, masterQueue: string) { + return this.#trace( + "dequeueMessageInSharedQueue", + async (span) => { + // Read the parent queue for matching queues + const messageQueue = await this.#getRandomQueueFromParentQueue( + masterQueue, + this.options.queuePriorityStrategy, + (queue) => this.#calculateMessageQueueCapacities(queue, { checkForDisabled: true }), + consumerId + ); + + if (!messageQueue) { + return; + } + + // If the queue includes a concurrency key, we need to remove the ck:concurrencyKey from the queue name + const message = await this.#callDequeueMessage({ + messageQueue, + masterQueue, + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(messageQueue), + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(messageQueue), + envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue), + projectCurrentConcurrencyKey: + this.keys.projectCurrentConcurrencyKeyFromQueue(messageQueue), + messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(messageQueue), + taskCurrentConcurrentKeyPrefix: + this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(messageQueue), + }); + + if (!message) { + return; + } + + span.setAttributes({ + [SEMATTRS_MESSAGE_ID]: message.messageId, + [SemanticAttributes.QUEUE]: message.message.queue, + [SemanticAttributes.RUN_ID]: message.message.runId, + [SemanticAttributes.CONCURRENCY_KEY]: message.message.concurrencyKey, + [SemanticAttributes.MASTER_QUEUE]: masterQueue, + }); + + return message; + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "receive", + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + }, + } + ); + } + + /** + * Acknowledge a message, which will: + * - remove all data from the queue + * - release all concurrency + * This is done when the run is in a final state. + * @param messageId + */ + public async acknowledgeMessage(orgId: string, messageId: string) { + return this.#trace( + "acknowledgeMessage", + async (span) => { + const message = await this.#readMessage(orgId, messageId); + + if (!message) { + this.logger.log(`[${this.name}].acknowledgeMessage() message not found`, { + messageId, + service: this.name, + }); + return; + } + + span.setAttributes({ + [SemanticAttributes.QUEUE]: message.queue, + [SemanticAttributes.ORG_ID]: message.orgId, + [SemanticAttributes.RUN_ID]: messageId, + [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, + }); + + await this.#callAcknowledgeMessage({ + messageId, + messageQueue: message.queue, + masterQueue: message.masterQueue, + messageKey: this.keys.messageKey(orgId, messageId), + concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(message.queue), + envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(message.queue), + taskConcurrencyKey: this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( + message.queue, + message.taskIdentifier + ), + projectConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(message.queue), + }); + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "ack", + [SEMATTRS_MESSAGE_ID]: messageId, + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + }, + } + ); + } + + /** + * Negative acknowledge a message, which will requeue the message (with an optional future date) + */ + public async nackMessage(orgId: string, messageId: string, retryAt: number = Date.now()) { + return this.#trace( + "nackMessage", + async (span) => { + const message = await this.#readMessage(orgId, messageId); + if (!message) { + this.logger.log(`[${this.name}].nackMessage() message not found`, { + orgId, + messageId, + retryAt, + service: this.name, + }); + return; + } + + span.setAttributes({ + [SemanticAttributes.QUEUE]: message.queue, + [SemanticAttributes.RUN_ID]: messageId, + [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, + [SemanticAttributes.MASTER_QUEUE]: message.masterQueue, + }); + + const messageKey = this.keys.messageKey(orgId, messageId); + const messageQueue = message.queue; + const parentQueue = message.masterQueue; + const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); + const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); + const taskConcurrencyKey = this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( + message.queue, + message.taskIdentifier + ); + const projectConcurrencyKey = this.keys.projectCurrentConcurrencyKeyFromQueue( + message.queue + ); + + const messageScore = retryAt; + + this.logger.debug("Calling nackMessage", { + messageKey, + messageQueue, + parentQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + taskConcurrencyKey, + messageId, + messageScore, + service: this.name, + }); + + await this.redis.nackMessage( + //keys + messageKey, + messageQueue, + parentQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + taskConcurrencyKey, + //args + messageId, + String(messageScore) + ); + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "nack", + [SEMATTRS_MESSAGE_ID]: messageId, + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + }, + } + ); + } + + public async releaseConcurrency(messageId: string, releaseForRun: boolean = false) { + return this.#trace( + "releaseConcurrency", + async (span) => { + // span.setAttributes({ + // [SemanticAttributes.MESSAGE_ID]: messageId, + // }); + // const message = await this.readMessage(messageId); + // if (!message) { + // logger.log(`[${this.name}].releaseConcurrency() message not found`, { + // messageId, + // releaseForRun, + // service: this.name, + // }); + // return; + // } + // span.setAttributes({ + // [SemanticAttributes.QUEUE]: message.queue, + // [SemanticAttributes.MESSAGE_ID]: message.messageId, + // [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, + // [SemanticAttributes.PARENT_QUEUE]: message.parentQueue, + // }); + // const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); + // const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); + // const orgConcurrencyKey = this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue); + // logger.debug("Calling releaseConcurrency", { + // messageId, + // queue: message.queue, + // concurrencyKey, + // envConcurrencyKey, + // orgConcurrencyKey, + // service: this.name, + // releaseForRun, + // }); + // return this.redis.releaseConcurrency( + // //don't release the for the run, it breaks concurrencyLimits + // releaseForRun ? concurrencyKey : "", + // envConcurrencyKey, + // orgConcurrencyKey, + // message.messageId + // ); + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "releaseConcurrency", + [SEMATTRS_MESSAGE_ID]: messageId, + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + }, + } + ); + } + + queueConcurrencyScanStream( + count: number = 100, + onEndCallback?: () => void, + onErrorCallback?: (error: Error) => void + ) { + const pattern = this.keys.queueCurrentConcurrencyScanPattern(); + + this.logger.debug("Starting queue concurrency scan stream", { + pattern, + component: "runqueue", + operation: "queueConcurrencyScanStream", + service: this.name, + count, + }); + + const redis = this.redis.duplicate(); + + const stream = redis.scanStream({ + match: pattern, + type: "set", + count, + }); + + stream.on("end", () => { + onEndCallback?.(); + redis.quit(); + }); + + stream.on("error", (error) => { + onErrorCallback?.(error); + redis.quit(); + }); + + return { stream, redis }; + } + + async quit() { + await Promise.all(this.#rebalanceWorkers.map((worker) => worker.stop())); + await this.redis.quit(); + } + + async #trace( + name: string, + fn: (span: Span) => Promise, + options?: SpanOptions & { sampleRate?: number } + ): Promise { + return this.tracer.startActiveSpan( + name, + { + ...options, + attributes: { + ...options?.attributes, + }, + }, + async (span) => { + try { + return await fn(span); + } catch (e) { + if (e instanceof Error) { + span.recordException(e); + } else { + span.recordException(new Error(String(e))); + } + + throw e; + } finally { + span.end(); + } + } + ); + } + + async #readMessage(orgId: string, messageId: string) { + return this.#trace( + "readMessage", + async (span) => { + const rawMessage = await this.redis.get(this.keys.messageKey(orgId, messageId)); + + if (!rawMessage) { + return; + } + + const message = OutputPayload.safeParse(JSON.parse(rawMessage)); + + if (!message.success) { + this.logger.error(`[${this.name}] Failed to parse message`, { + messageId, + error: message.error, + service: this.name, + }); + + return; + } + + return message.data; + }, + { + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "receive", + [SEMATTRS_MESSAGE_ID]: messageId, + [SEMATTRS_MESSAGING_SYSTEM]: "marqs", + [SemanticAttributes.RUN_ID]: messageId, + }, + } + ); + } + + async #getRandomQueueFromParentQueue( + parentQueue: string, + queuePriorityStrategy: RunQueuePriorityStrategy, + calculateCapacities: (queue: string) => Promise, + consumerId: string + ) { + return this.#trace( + "getRandomQueueFromParentQueue", + async (span) => { + span.setAttribute("consumerId", consumerId); + + const { range } = await queuePriorityStrategy.nextCandidateSelection( + parentQueue, + consumerId + ); + + const queues = await this.#getChildQueuesWithScores(parentQueue, range, span); + span.setAttribute("queueCount", queues.length); + + const queuesWithScores = await this.#calculateQueueScores(queues, calculateCapacities); + span.setAttribute("queuesWithScoresCount", queuesWithScores.length); + + // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue + const { choice, nextRange } = this.queuePriorityStrategy.chooseQueue( + queuesWithScores, + parentQueue, + consumerId, + range + ); + + span.setAttributes({ + ...flattenAttributes(queues, "runqueue.queues"), + }); + span.setAttributes({ + ...flattenAttributes(queuesWithScores, "runqueue.queuesWithScores"), + }); + span.setAttribute("range.offset", range.offset); + span.setAttribute("range.count", range.count); + span.setAttribute("nextRange.offset", nextRange.offset); + span.setAttribute("nextRange.count", nextRange.count); + + if (this.options.verbose || nextRange.offset > 0) { + if (typeof choice === "string") { + this.logger.debug(`[${this.name}] getRandomQueueFromParentQueue`, { + queues, + queuesWithScores, + range, + nextRange, + queueCount: queues.length, + queuesWithScoresCount: queuesWithScores.length, + queueChoice: choice, + consumerId, + }); + } else { + this.logger.debug(`[${this.name}] getRandomQueueFromParentQueue`, { + queues, + queuesWithScores, + range, + nextRange, + queueCount: queues.length, + queuesWithScoresCount: queuesWithScores.length, + noQueueChoice: true, + consumerId, + }); + } + } + + if (typeof choice !== "string") { + span.setAttribute("noQueueChoice", true); + + return; + } else { + span.setAttribute("queueChoice", choice); + + return choice; + } + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "receive", + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + [SemanticAttributes.MASTER_QUEUE]: parentQueue, + }, + } + ); + } + + // Calculate the weights of the queues based on the age and the capacity + async #calculateQueueScores( + queues: Array<{ value: string; score: number }>, + calculateCapacities: (queue: string) => Promise + ) { + const now = Date.now(); + + const queueScores = await Promise.all( + queues.map(async (queue) => { + return { + queue: queue.value, + capacities: await calculateCapacities(queue.value), + age: now - queue.score, + size: await this.redis.zcard(queue.value), + }; + }) + ); + + return queueScores; + } + + async #calculateMessageQueueCapacities(queue: string, options?: { checkForDisabled?: boolean }) { + return await this.#callCalculateMessageCapacities({ + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), + currentEnvConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), + disabledConcurrencyLimitKey: options?.checkForDisabled + ? this.keys.disabledConcurrencyLimitKeyFromQueue(queue) + : undefined, + }); + } + + async #getChildQueuesWithScores( + key: string, + range: QueueRange, + span?: Span + ): Promise> { + const valuesWithScores = await this.redis.zrangebyscore( + key, + "-inf", + Date.now(), + "WITHSCORES", + "LIMIT", + range.offset, + range.count + ); + + span?.setAttribute("zrangebyscore.valuesWithScores.rawLength", valuesWithScores.length); + span?.setAttributes({ + ...flattenAttributes(valuesWithScores, "zrangebyscore.valuesWithScores.rawValues"), + }); + + const result: Array<{ value: string; score: number }> = []; + + for (let i = 0; i < valuesWithScores.length; i += 2) { + result.push({ + value: valuesWithScores[i], + score: Number(valuesWithScores[i + 1]), + }); + } + + return result; + } + + async #callEnqueueMessage(message: OutputPayload, parentQueue: string) { + const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); + const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); + const taskConcurrencyKey = this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( + message.queue, + message.taskIdentifier + ); + const projectConcurrencyKey = this.keys.projectCurrentConcurrencyKeyFromQueue(message.queue); + + this.logger.debug("Calling enqueueMessage", { + messagePayload: message, + concurrencyKey, + envConcurrencyKey, + service: this.name, + }); + + return this.redis.enqueueMessage( + message.queue, + parentQueue, + this.keys.messageKey(message.orgId, message.runId), + concurrencyKey, + envConcurrencyKey, + taskConcurrencyKey, + projectConcurrencyKey, + message.queue, + message.runId, + JSON.stringify(message), + String(message.timestamp) + ); + } + + async #callDequeueMessage({ + messageQueue, + masterQueue, + concurrencyLimitKey, + envConcurrencyLimitKey, + currentConcurrencyKey, + envCurrentConcurrencyKey, + projectCurrentConcurrencyKey, + messageKeyPrefix, + taskCurrentConcurrentKeyPrefix, + }: { + messageQueue: string; + masterQueue: string; + concurrencyLimitKey: string; + envConcurrencyLimitKey: string; + currentConcurrencyKey: string; + envCurrentConcurrencyKey: string; + projectCurrentConcurrencyKey: string; + messageKeyPrefix: string; + taskCurrentConcurrentKeyPrefix: string; + }) { + const result = await this.redis.dequeueMessage( + //keys + messageQueue, + masterQueue, + concurrencyLimitKey, + envConcurrencyLimitKey, + currentConcurrencyKey, + envCurrentConcurrencyKey, + projectCurrentConcurrencyKey, + messageKeyPrefix, + taskCurrentConcurrentKeyPrefix, + //args + messageQueue, + String(Date.now()), + String(this.options.defaultEnvConcurrency) + ); + + if (!result) { + return; + } + + this.logger.debug("Dequeue message result", { + result, + service: this.name, + }); + + if (result.length !== 3) { + this.logger.error("Invalid dequeue message result", { + result, + service: this.name, + }); + return; + } + + const [messageId, messageScore, rawMessage] = result; + + //read message + const parsedMessage = OutputPayload.safeParse(JSON.parse(rawMessage)); + if (!parsedMessage.success) { + this.logger.error(`[${this.name}] Failed to parse message`, { + messageId, + error: parsedMessage.error, + service: this.name, + }); + + return; + } + + const message = parsedMessage.data; + + return { + messageId, + messageScore, + message, + }; + } + + async #callAcknowledgeMessage({ + messageId, + masterQueue, + messageKey, + messageQueue, + concurrencyKey, + envConcurrencyKey, + taskConcurrencyKey, + projectConcurrencyKey, + }: { + masterQueue: string; + messageKey: string; + messageQueue: string; + concurrencyKey: string; + envConcurrencyKey: string; + taskConcurrencyKey: string; + projectConcurrencyKey: string; + messageId: string; + }) { + this.logger.debug("Calling acknowledgeMessage", { + messageKey, + messageQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + taskConcurrencyKey, + messageId, + masterQueue, + service: this.name, + }); + + return this.redis.acknowledgeMessage( + masterQueue, + messageKey, + messageQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + taskConcurrencyKey, + messageId + ); + } + + async #callCalculateMessageCapacities({ + currentConcurrencyKey, + currentEnvConcurrencyKey, + concurrencyLimitKey, + envConcurrencyLimitKey, + disabledConcurrencyLimitKey, + }: { + currentConcurrencyKey: string; + currentEnvConcurrencyKey: string; + concurrencyLimitKey: string; + envConcurrencyLimitKey: string; + disabledConcurrencyLimitKey: string | undefined; + }): Promise { + const capacities = disabledConcurrencyLimitKey + ? await this.redis.calculateMessageQueueCapacitiesWithDisabling( + currentConcurrencyKey, + currentEnvConcurrencyKey, + concurrencyLimitKey, + envConcurrencyLimitKey, + disabledConcurrencyLimitKey, + String(this.options.defaultEnvConcurrency) + ) + : await this.redis.calculateMessageQueueCapacities( + currentConcurrencyKey, + currentEnvConcurrencyKey, + concurrencyLimitKey, + envConcurrencyLimitKey, + String(this.options.defaultEnvConcurrency) + ); + + const queueCurrent = Number(capacities[0]); + const envLimit = Number(capacities[3]); + const isOrgEnabled = Boolean(capacities[4]); + const queueLimit = capacities[1] + ? Number(capacities[1]) + : Math.min(envLimit, isOrgEnabled ? Infinity : 0); + const envCurrent = Number(capacities[2]); + + return { + queue: { current: queueCurrent, limit: queueLimit }, + env: { current: envCurrent, limit: envLimit }, + }; + } + + #callUpdateGlobalConcurrencyLimits({ + envConcurrencyLimitKey, + envConcurrencyLimit, + }: { + envConcurrencyLimitKey: string; + envConcurrencyLimit: number; + }) { + return this.redis.updateGlobalConcurrencyLimits( + envConcurrencyLimitKey, + String(envConcurrencyLimit) + ); + } + + async #callRebalanceParentQueueChild({ + parentQueue, + childQueue, + currentScore, + }: { + parentQueue: string; + childQueue: string; + currentScore: string; + }) { + const rebalanceResult = await this.redis.rebalanceParentQueueChild( + childQueue, + parentQueue, + childQueue, + currentScore + ); + + if (rebalanceResult) { + this.logger.debug("Rebalanced parent queue child", { + parentQueue, + childQueue, + currentScore, + rebalanceResult, + operation: "rebalanceParentQueueChild", + service: this.name, + }); + } + + return rebalanceResult; + } + + #registerCommands() { + this.redis.defineCommand("enqueueMessage", { + numberOfKeys: 7, + lua: ` +local queue = KEYS[1] +local parentQueue = KEYS[2] +local messageKey = KEYS[3] +local concurrencyKey = KEYS[4] +local envConcurrencyKey = KEYS[5] +local taskConcurrencyKey = KEYS[6] +local projectConcurrencyKey = KEYS[7] + +local queueName = ARGV[1] +local messageId = ARGV[2] +local messageData = ARGV[3] +local messageScore = ARGV[4] + +-- Write the message to the message key +redis.call('SET', messageKey, messageData) + +-- Add the message to the queue +redis.call('ZADD', queue, messageScore, messageId) + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', queue, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, queueName) +else + redis.call('ZADD', parentQueue, earliestMessage[2], queueName) +end + +-- Update the concurrency keys +redis.call('SREM', concurrencyKey, messageId) +redis.call('SREM', envConcurrencyKey, messageId) +redis.call('SREM', taskConcurrencyKey, messageId) +redis.call('SREM', projectConcurrencyKey, messageId) + `, + }); + + this.redis.defineCommand("dequeueMessage", { + numberOfKeys: 9, + lua: ` +local childQueue = KEYS[1] +local parentQueue = KEYS[2] +local concurrencyLimitKey = KEYS[3] +local envConcurrencyLimitKey = KEYS[4] +local currentConcurrencyKey = KEYS[5] +local envCurrentConcurrencyKey = KEYS[6] +local projectConcurrencyKey = KEYS[7] +local messageKeyPrefix = KEYS[8] +local taskCurrentConcurrentKeyPrefix = KEYS[9] + +local childQueueName = ARGV[1] +local currentTime = tonumber(ARGV[2]) +local defaultEnvConcurrencyLimit = ARGV[3] + +-- Check current env concurrency against the limit +local envCurrentConcurrency = tonumber(redis.call('SCARD', envCurrentConcurrencyKey) or '0') +local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit) + +if envCurrentConcurrency >= envConcurrencyLimit then + return nil +end + +-- Check current queue concurrency against the limit +local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0') +local concurrencyLimit = tonumber(redis.call('GET', concurrencyLimitKey) or '1000000') + +-- Check condition only if concurrencyLimit exists +if currentConcurrency >= concurrencyLimit then + return nil +end + +-- Attempt to dequeue the next message +local messages = redis.call('ZRANGEBYSCORE', childQueue, '-inf', currentTime, 'WITHSCORES', 'LIMIT', 0, 1) + +if #messages == 0 then + return nil +end + +local messageId = messages[1] +local messageScore = tonumber(messages[2]) + +-- Get the message payload +local messageKey = messageKeyPrefix .. messageId +local messagePayload = redis.call('GET', messageKey) + +-- Parse JSON payload and extract taskIdentifier +local taskIdentifier = cjson.decode(messagePayload).taskIdentifier + +-- Perform SADD with taskIdentifier and messageId +local taskConcurrencyKey = taskCurrentConcurrentKeyPrefix .. taskIdentifier + +-- Update concurrency +redis.call('ZREM', childQueue, messageId) +redis.call('SADD', currentConcurrencyKey, messageId) +redis.call('SADD', envCurrentConcurrencyKey, messageId) +redis.call('SADD', projectConcurrencyKey, messageId) +redis.call('SADD', taskConcurrencyKey, messageId) + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', childQueue, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, childQueueName) +else + redis.call('ZADD', parentQueue, earliestMessage[2], childQueueName) +end + +return {messageId, messageScore, messagePayload} -- Return message details + `, + }); + + this.redis.defineCommand("acknowledgeMessage", { + numberOfKeys: 7, + lua: ` +-- Keys: +local parentQueue = KEYS[1] +local messageKey = KEYS[2] +local messageQueue = KEYS[3] +local concurrencyKey = KEYS[4] +local envCurrentConcurrencyKey = KEYS[5] +local projectCurrentConcurrencyKey = KEYS[6] +local taskCurrentConcurrencyKey = KEYS[7] + +-- Args: +local messageId = ARGV[1] + +-- Remove the message from the message key +redis.call('DEL', messageKey) + +-- Remove the message from the queue +redis.call('ZREM', messageQueue, messageId) + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, messageQueue) +else + redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) +end + +-- Update the concurrency keys +redis.call('SREM', concurrencyKey, messageId) +redis.call('SREM', envCurrentConcurrencyKey, messageId) +redis.call('SREM', projectCurrentConcurrencyKey, messageId) +redis.call('SREM', taskCurrentConcurrencyKey, messageId) +`, + }); + + this.redis.defineCommand("nackMessage", { + numberOfKeys: 7, + lua: ` +-- Keys: +local messageKey = KEYS[1] +local messageQueueKey = KEYS[2] +local parentQueueKey = KEYS[3] +local concurrencyKey = KEYS[4] +local envConcurrencyKey = KEYS[5] +local projectConcurrencyKey = KEYS[6] +local taskConcurrencyKey = KEYS[7] + +-- Args: +local messageId = ARGV[1] +local messageScore = tonumber(ARGV[2]) + +-- Update the concurrency keys +redis.call('SREM', concurrencyKey, messageId) +redis.call('SREM', envConcurrencyKey, messageId) +redis.call('SREM', projectConcurrencyKey, messageId) +redis.call('SREM', taskConcurrencyKey, messageId) + +-- Enqueue the message into the queue +redis.call('ZADD', messageQueueKey, messageScore, messageId) + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueueKey, messageQueueKey) +else + redis.call('ZADD', parentQueueKey, earliestMessage[2], messageQueueKey) +end +`, + }); + + this.redis.defineCommand("releaseConcurrency", { + numberOfKeys: 3, + lua: ` +local concurrencyKey = KEYS[1] +local envCurrentConcurrencyKey = KEYS[2] +local orgCurrentConcurrencyKey = KEYS[3] + +local messageId = ARGV[1] + +-- Update the concurrency keys +if concurrencyKey ~= "" then + redis.call('SREM', concurrencyKey, messageId) +end +redis.call('SREM', envCurrentConcurrencyKey, messageId) +redis.call('SREM', orgCurrentConcurrencyKey, messageId) +`, + }); + + this.redis.defineCommand("calculateMessageQueueCapacitiesWithDisabling", { + numberOfKeys: 5, + lua: ` +-- Keys +local currentConcurrencyKey = KEYS[1] +local currentEnvConcurrencyKey = KEYS[2] +local concurrencyLimitKey = KEYS[3] +local envConcurrencyLimitKey = KEYS[4] +local disabledConcurrencyLimitKey = KEYS[5] + +-- Args +local defaultEnvConcurrencyLimit = tonumber(ARGV[1]) + +-- Check if disabledConcurrencyLimitKey exists +local orgIsEnabled +if redis.call('EXISTS', disabledConcurrencyLimitKey) == 1 then + orgIsEnabled = false +else + orgIsEnabled = true +end + +local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0') +local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit) + +local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0') +local concurrencyLimit = redis.call('GET', concurrencyLimitKey) + +-- Return current capacity and concurrency limits for the queue, env, org +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, orgIsEnabled } + `, + }); + + this.redis.defineCommand("calculateMessageQueueCapacities", { + numberOfKeys: 4, + lua: ` +-- Keys: +local currentConcurrencyKey = KEYS[1] +local currentEnvConcurrencyKey = KEYS[2] +local concurrencyLimitKey = KEYS[3] +local envConcurrencyLimitKey = KEYS[4] + +-- Args +local defaultEnvConcurrencyLimit = tonumber(ARGV[1]) + +local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0') +local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit) + +local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0') +local concurrencyLimit = redis.call('GET', concurrencyLimitKey) + +-- Return current capacity and concurrency limits for the queue, env, org +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, true } + `, + }); + + this.redis.defineCommand("updateGlobalConcurrencyLimits", { + numberOfKeys: 1, + lua: ` +-- Keys: envConcurrencyLimitKey, orgConcurrencyLimitKey +local envConcurrencyLimitKey = KEYS[1] + +-- Args: envConcurrencyLimit, orgConcurrencyLimit +local envConcurrencyLimit = ARGV[1] + +redis.call('SET', envConcurrencyLimitKey, envConcurrencyLimit) + `, + }); + + this.redis.defineCommand("rebalanceParentQueueChild", { + numberOfKeys: 2, + lua: ` +-- Keys: childQueueKey, parentQueueKey +local childQueueKey = KEYS[1] +local parentQueueKey = KEYS[2] + +-- Args: childQueueName, currentScore +local childQueueName = ARGV[1] +local currentScore = ARGV[2] + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', childQueueKey, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueueKey, childQueueName) + + -- Return true because the parent queue was rebalanced + return true +else + -- If the earliest message is different, update the parent queue and return true, else return false + if earliestMessage[2] == currentScore then + return false + end + + redis.call('ZADD', parentQueueKey, earliestMessage[2], childQueueName) + + return earliestMessage[2] +end +`, + }); + } +} + +declare module "ioredis" { + interface RedisCommander { + enqueueMessage( + //keys + queue: string, + parentQueue: string, + messageKey: string, + concurrencyKey: string, + envConcurrencyKey: string, + taskConcurrencyKey: string, + projectConcurrencyKey: string, + //args + queueName: string, + messageId: string, + messageData: string, + messageScore: string, + callback?: Callback + ): Result; + + dequeueMessage( + //keys + childQueue: string, + parentQueue: string, + concurrencyLimitKey: string, + envConcurrencyLimitKey: string, + currentConcurrencyKey: string, + envConcurrencyKey: string, + projectConcurrencyKey: string, + messageKeyPrefix: string, + taskCurrentConcurrentKeyPrefix: string, + //args + childQueueName: string, + currentTime: string, + defaultEnvConcurrencyLimit: string, + callback?: Callback<[string, string]> + ): Result<[string, string, string] | null, Context>; + + acknowledgeMessage( + parentQueue: string, + messageKey: string, + messageQueue: string, + concurrencyKey: string, + envConcurrencyKey: string, + projectConcurrencyKey: string, + taskConcurrencyKey: string, + messageId: string, + callback?: Callback + ): Result; + + nackMessage( + messageKey: string, + messageQueue: string, + parentQueueKey: string, + concurrencyKey: string, + envConcurrencyKey: string, + projectConcurrencyKey: string, + taskConcurrencyKey: string, + messageId: string, + messageScore: string, + callback?: Callback + ): Result; + + releaseConcurrency( + concurrencyKey: string, + envConcurrencyKey: string, + orgConcurrencyKey: string, + messageId: string, + callback?: Callback + ): Result; + + calculateMessageQueueCapacities( + currentConcurrencyKey: string, + currentEnvConcurrencyKey: string, + concurrencyLimitKey: string, + envConcurrencyLimitKey: string, + defaultEnvConcurrencyLimit: string, + callback?: Callback + ): Result<[number, number, number, number, boolean], Context>; + + calculateMessageQueueCapacitiesWithDisabling( + currentConcurrencyKey: string, + currentEnvConcurrencyKey: string, + concurrencyLimitKey: string, + envConcurrencyLimitKey: string, + disabledConcurrencyLimitKey: string, + defaultEnvConcurrencyLimit: string, + callback?: Callback + ): Result<[number, number, number, number, boolean], Context>; + + updateGlobalConcurrencyLimits( + envConcurrencyLimitKey: string, + envConcurrencyLimit: string, + callback?: Callback + ): Result; + + rebalanceParentQueueChild( + childQueueKey: string, + parentQueueKey: string, + childQueueName: string, + currentScore: string, + callback?: Callback + ): Result; + } +} + +// Only allow alphanumeric characters, underscores, hyphens, and slashes (and only the first 128 characters) +export function sanitizeQueueName(queueName: string) { + return queueName.replace(/[^a-zA-Z0-9_\-\/]/g, "").substring(0, 128); +} diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts new file mode 100644 index 0000000000..886d695f59 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts @@ -0,0 +1,361 @@ +import { describe } from "node:test"; +import { expect, it } from "vitest"; +import { RunQueueShortKeyProducer } from "./keyProducer.js"; + +describe("KeyProducer", () => { + it("sharedQueueScanPattern", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const pattern = keyProducer.masterQueueScanPattern("main"); + expect(pattern).toBe("test:*main"); + }); + + it("queueCurrentConcurrencyScanPattern", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const pattern = keyProducer.queueCurrentConcurrencyScanPattern(); + expect(pattern).toBe("test:{org:*}:proj:*:env:*:queue:*:currentConcurrency"); + }); + + it("stripKeyPrefix", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.stripKeyPrefix("test:abc"); + expect(key).toBe("abc"); + }); + + it("queueConcurrencyLimitKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.queueConcurrencyLimitKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:concurrency"); + }); + + it("envConcurrencyLimitKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.envConcurrencyLimitKey({ + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:concurrency"); + }); + + it("queueKey (no concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name"); + }); + + it("queueKey (w concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name", + "c1234" + ); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:ck:c1234"); + }); + + it("concurrencyLimitKeyFromQueue (w concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name", + "c1234" + ); + const key = keyProducer.concurrencyLimitKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:concurrency"); + }); + + it("concurrencyLimitKeyFromQueue (no concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.concurrencyLimitKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:concurrency"); + }); + + it("currentConcurrencyKeyFromQueue (w concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name", + "c1234" + ); + const key = keyProducer.currentConcurrencyKeyFromQueue(queueKey); + expect(key).toBe( + "{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:ck:c1234:currentConcurrency" + ); + }); + + it("currentConcurrencyKeyFromQueue (no concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.currentConcurrencyKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:currentConcurrency"); + }); + + it("currentConcurrencyKey (w concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.currentConcurrencyKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name", + "c1234" + ); + expect(key).toBe( + "{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:ck:c1234:currentConcurrency" + ); + }); + + it("currentConcurrencyKey (no concurrency)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.currentConcurrencyKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:queue:task/task-name:currentConcurrency"); + }); + + it("taskIdentifierCurrentConcurrencyKeyPrefixFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queueKey); + expect(key).toBe("{org:o1234}:proj:p1234:task:"); + }); + + it("taskIdentifierCurrentConcurrencyKeyFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.taskIdentifierCurrentConcurrencyKeyFromQueue(queueKey, "task-name"); + expect(key).toBe("{org:o1234}:proj:p1234:task:task-name"); + }); + + it("taskIdentifierCurrentConcurrencyKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.taskIdentifierCurrentConcurrencyKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task-name" + ); + expect(key).toBe("{org:o1234}:proj:p1234:task:task-name"); + }); + + it("projectCurrentConcurrencyKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.projectCurrentConcurrencyKey({ + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }); + expect(key).toBe("{org:o1234}:proj:p1234:currentConcurrency"); + }); + + it("projectCurrentConcurrencyKeyFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.projectCurrentConcurrencyKeyFromQueue( + "{org:o1234}:proj:p1234:currentConcurrency" + ); + expect(key).toBe("{org:o1234}:proj:p1234:currentConcurrency"); + }); + + it("disabledConcurrencyLimitKeyFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.disabledConcurrencyLimitKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:disabledConcurrency"); + }); + + it("envConcurrencyLimitKeyFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.envConcurrencyLimitKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:env:e1234:concurrency"); + }); + + it("envCurrentConcurrencyKeyFromQueue", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const key = keyProducer.envCurrentConcurrencyKeyFromQueue(queueKey); + expect(key).toBe("{org:o1234}:env:e1234:currentConcurrency"); + }); + + it("envCurrentConcurrencyKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.envCurrentConcurrencyKey({ + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }); + expect(key).toBe("{org:o1234}:env:e1234:currentConcurrency"); + }); + + it("messageKey", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const key = keyProducer.messageKey("o1234", "m1234"); + expect(key).toBe("{org:o1234}:message:m1234"); + }); + + it("extractComponentsFromQueue (no concurrencyKey)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name" + ); + const components = keyProducer.extractComponentsFromQueue(queueKey); + expect(components).toEqual({ + orgId: "o1234", + projectId: "p1234", + envId: "e1234", + queue: "task/task-name", + concurrencyKey: undefined, + }); + }); + + it("extractComponentsFromQueue (w concurrencyKey)", () => { + const keyProducer = new RunQueueShortKeyProducer("test:"); + const queueKey = keyProducer.queueKey( + { + id: "e1234", + type: "PRODUCTION", + maximumConcurrencyLimit: 10, + project: { id: "p1234" }, + organization: { id: "o1234" }, + }, + "task/task-name", + "c1234" + ); + const components = keyProducer.extractComponentsFromQueue(queueKey); + expect(components).toEqual({ + orgId: "o1234", + projectId: "p1234", + envId: "e1234", + queue: "task/task-name", + concurrencyKey: "c1234", + }); + }); +}); diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.ts b/internal-packages/run-engine/src/run-queue/keyProducer.ts new file mode 100644 index 0000000000..8c145ce16e --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/keyProducer.ts @@ -0,0 +1,195 @@ +import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; +import { RunQueueKeyProducer } from "./types.js"; + +const constants = { + CURRENT_CONCURRENCY_PART: "currentConcurrency", + CONCURRENCY_LIMIT_PART: "concurrency", + DISABLED_CONCURRENCY_LIMIT_PART: "disabledConcurrency", + ENV_PART: "env", + ORG_PART: "org", + PROJECT_PART: "proj", + QUEUE_PART: "queue", + CONCURRENCY_KEY_PART: "ck", + TASK_PART: "task", + MESSAGE_PART: "message", +} as const; + +export class RunQueueShortKeyProducer implements RunQueueKeyProducer { + constructor(private _prefix: string) {} + + masterQueueScanPattern(masterQueue: string) { + return `${this._prefix}*${masterQueue}`; + } + + queueCurrentConcurrencyScanPattern() { + return `${this._prefix}{${constants.ORG_PART}:*}:${constants.PROJECT_PART}:*:${constants.ENV_PART}:*:${constants.QUEUE_PART}:*:${constants.CURRENT_CONCURRENCY_PART}`; + } + + stripKeyPrefix(key: string): string { + if (key.startsWith(this._prefix)) { + return key.slice(this._prefix.length); + } + + return key; + } + + queueConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment, queue: string) { + return [this.queueKey(env, queue), constants.CONCURRENCY_LIMIT_PART].join(":"); + } + + envConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment) { + return [ + this.orgKeySection(env.organization.id), + this.projKeySection(env.project.id), + this.envKeySection(env.id), + constants.CONCURRENCY_LIMIT_PART, + ].join(":"); + } + + queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string) { + return [ + this.orgKeySection(env.organization.id), + this.projKeySection(env.project.id), + this.envKeySection(env.id), + this.queueSection(queue), + ] + .concat(concurrencyKey ? this.concurrencyKeySection(concurrencyKey) : []) + .join(":"); + } + + concurrencyLimitKeyFromQueue(queue: string) { + const concurrencyQueueName = queue.replace(/:ck:.+$/, ""); + return `${concurrencyQueueName}:${constants.CONCURRENCY_LIMIT_PART}`; + } + + currentConcurrencyKeyFromQueue(queue: string) { + return `${queue}:${constants.CURRENT_CONCURRENCY_PART}`; + } + + currentConcurrencyKey( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrencyKey?: string + ): string { + return [this.queueKey(env, queue, concurrencyKey), constants.CURRENT_CONCURRENCY_PART].join( + ":" + ); + } + + disabledConcurrencyLimitKeyFromQueue(queue: string) { + const { orgId } = this.extractComponentsFromQueue(queue); + return `{${constants.ORG_PART}:${orgId}}:${constants.DISABLED_CONCURRENCY_LIMIT_PART}`; + } + + envConcurrencyLimitKeyFromQueue(queue: string) { + const { orgId, envId } = this.extractComponentsFromQueue(queue); + return `{${constants.ORG_PART}:${orgId}}:${constants.ENV_PART}:${envId}:${constants.CONCURRENCY_LIMIT_PART}`; + } + + envCurrentConcurrencyKeyFromQueue(queue: string) { + const { orgId, envId } = this.extractComponentsFromQueue(queue); + return `{${constants.ORG_PART}:${orgId}}:${constants.ENV_PART}:${envId}:${constants.CURRENT_CONCURRENCY_PART}`; + } + + envCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string { + return [ + this.orgKeySection(env.organization.id), + this.envKeySection(env.id), + constants.CURRENT_CONCURRENCY_PART, + ].join(":"); + } + + taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue: string) { + const { orgId, projectId } = this.extractComponentsFromQueue(queue); + + return `${[this.orgKeySection(orgId), this.projKeySection(projectId), constants.TASK_PART] + .filter(Boolean) + .join(":")}:`; + } + + taskIdentifierCurrentConcurrencyKeyFromQueue(queue: string, taskIdentifier: string) { + return `${this.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue)}${taskIdentifier}`; + } + + taskIdentifierCurrentConcurrencyKey( + env: MinimalAuthenticatedEnvironment, + taskIdentifier: string + ): string { + return [ + this.orgKeySection(env.organization.id), + this.projKeySection(env.project.id), + constants.TASK_PART, + taskIdentifier, + ].join(":"); + } + + projectCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string { + return [ + this.orgKeySection(env.organization.id), + this.projKeySection(env.project.id), + constants.CURRENT_CONCURRENCY_PART, + ].join(":"); + } + + projectCurrentConcurrencyKeyFromQueue(queue: string): string { + const { orgId, projectId } = this.extractComponentsFromQueue(queue); + return `${this.orgKeySection(orgId)}:${this.projKeySection(projectId)}:${ + constants.CURRENT_CONCURRENCY_PART + }`; + } + + messageKeyPrefixFromQueue(queue: string) { + const { orgId } = this.extractComponentsFromQueue(queue); + return `${this.orgKeySection(orgId)}:${constants.MESSAGE_PART}:`; + } + + messageKey(orgId: string, messageId: string) { + return [this.orgKeySection(orgId), `${constants.MESSAGE_PART}:${messageId}`] + .filter(Boolean) + .join(":"); + } + + extractComponentsFromQueue(queue: string) { + const parts = this.normalizeQueue(queue).split(":"); + return { + orgId: parts[1].replace("{", "").replace("}", ""), + projectId: parts[3], + envId: parts[5], + queue: parts[7], + concurrencyKey: parts.at(9), + }; + } + + private envKeySection(envId: string) { + return `${constants.ENV_PART}:${envId}`; + } + + private projKeySection(projId: string) { + return `${constants.PROJECT_PART}:${projId}`; + } + + private orgKeySection(orgId: string) { + return `{${constants.ORG_PART}:${orgId}}`; + } + + private queueSection(queue: string) { + return `${constants.QUEUE_PART}:${queue}`; + } + + private concurrencyKeySection(concurrencyKey: string) { + return `${constants.CONCURRENCY_KEY_PART}:${concurrencyKey}`; + } + + private taskIdentifierSection(taskIdentifier: string) { + return `${constants.TASK_PART}:${taskIdentifier}`; + } + + // This removes the leading prefix from the queue name if it exists + private normalizeQueue(queue: string) { + if (queue.startsWith(this._prefix)) { + return queue.slice(this._prefix.length); + } + + return queue; + } +} diff --git a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts new file mode 100644 index 0000000000..b85019e449 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts @@ -0,0 +1,119 @@ +import { + RunQueuePriorityStrategy, + PriorityStrategyChoice, + QueueRange, + QueueWithScores, +} from "./types.js"; + +export type SimpleWeightedChoiceStrategyOptions = { + queueSelectionCount: number; + randomSeed?: string; + excludeEnvCapacity?: boolean; +}; + +export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { + private _nextRangesByParentQueue: Map = new Map(); + + constructor(private options: SimpleWeightedChoiceStrategyOptions) {} + + private nextRangeForParentQueue(parentQueue: string, consumerId: string): QueueRange { + return ( + this._nextRangesByParentQueue.get(`${consumerId}:${parentQueue}`) ?? { + offset: 0, + count: this.options.queueSelectionCount, + } + ); + } + + chooseQueue( + queues: QueueWithScores[], + parentQueue: string, + consumerId: string, + previousRange: QueueRange + ): { choice: PriorityStrategyChoice; nextRange: QueueRange } { + const filteredQueues = filterQueuesAtCapacity(queues); + + if (queues.length === this.options.queueSelectionCount) { + const nextRange: QueueRange = { + offset: previousRange.offset + this.options.queueSelectionCount, + count: this.options.queueSelectionCount, + }; + + // If all queues are at capacity, and we were passed the max number of queues, then we will slide the window "to the right" + this._nextRangesByParentQueue.set(`${consumerId}:${parentQueue}`, nextRange); + } else { + this._nextRangesByParentQueue.delete(`${consumerId}:${parentQueue}`); + } + + if (filteredQueues.length === 0) { + return { + choice: { abort: true }, + nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), + }; + } + + const queueWeights = this.#calculateQueueWeights(filteredQueues); + + const choice = weightedRandomChoice(queueWeights); + + return { + choice, + nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), + }; + } + + async nextCandidateSelection( + parentQueue: string, + consumerId: string + ): Promise<{ range: QueueRange }> { + return { + range: this.nextRangeForParentQueue(parentQueue, consumerId), + }; + } + + #calculateQueueWeights(queues: QueueWithScores[]) { + const avgQueueSize = queues.reduce((acc, { size }) => acc + size, 0) / queues.length; + const avgMessageAge = queues.reduce((acc, { age }) => acc + age, 0) / queues.length; + + return queues.map(({ capacities, age, queue, size }) => { + let totalWeight = 1; + + if (size > avgQueueSize) { + totalWeight += Math.min(size / avgQueueSize, 4); + } + + if (age > avgMessageAge) { + totalWeight += Math.min(age / avgMessageAge, 4); + } + + return { + queue, + totalWeight: age, + }; + }); + } +} + +function filterQueuesAtCapacity(queues: QueueWithScores[]) { + return queues.filter( + (queue) => + queue.capacities.queue.current < queue.capacities.queue.limit && + queue.capacities.env.current < queue.capacities.env.limit + ); +} + +function weightedRandomChoice(queues: Array<{ queue: string; totalWeight: number }>) { + const totalWeight = queues.reduce((acc, queue) => acc + queue.totalWeight, 0); + let randomNum = Math.random() * totalWeight; + + for (const queue of queues) { + if (randomNum < queue.totalWeight) { + return queue.queue; + } + + randomNum -= queue.totalWeight; + } + + // If we get here, we should just return a random queue + return queues[Math.floor(Math.random() * queues.length)].queue; +} diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts new file mode 100644 index 0000000000..914193fb85 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -0,0 +1,116 @@ +import { z } from "zod"; +import { RuntimeEnvironmentType } from "../../../database/src/index.js"; +import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; + +export const InputPayload = z.object({ + runId: z.string(), + taskIdentifier: z.string(), + orgId: z.string(), + projectId: z.string(), + environmentId: z.string(), + environmentType: z.nativeEnum(RuntimeEnvironmentType), + queue: z.string(), + concurrencyKey: z.string().optional(), + timestamp: z.number(), +}); +export type InputPayload = z.infer; + +export const OutputPayload = InputPayload.extend({ + version: z.literal("1"), + masterQueue: z.string(), +}); +export type OutputPayload = z.infer; + +export type QueueCapacity = { + current: number; + limit: number; +}; + +export type QueueCapacities = { + queue: QueueCapacity; + env: QueueCapacity; +}; + +export type QueueWithScores = { + queue: string; + capacities: QueueCapacities; + age: number; + size: number; +}; + +export type QueueRange = { offset: number; count: number }; + +export interface RunQueueKeyProducer { + masterQueueScanPattern(masterQueue: string): string; + queueCurrentConcurrencyScanPattern(): string; + //queue + queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string): string; + queueConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment, queue: string): string; + concurrencyLimitKeyFromQueue(queue: string): string; + currentConcurrencyKeyFromQueue(queue: string): string; + currentConcurrencyKey( + env: MinimalAuthenticatedEnvironment, + queue: string, + concurrencyKey?: string + ): string; + disabledConcurrencyLimitKeyFromQueue(queue: string): string; + //env oncurrency + envCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + envConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment): string; + envConcurrencyLimitKeyFromQueue(queue: string): string; + envCurrentConcurrencyKeyFromQueue(queue: string): string; + //task concurrency + taskIdentifierCurrentConcurrencyKey( + env: MinimalAuthenticatedEnvironment, + taskIdentifier: string + ): string; + taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue: string): string; + taskIdentifierCurrentConcurrencyKeyFromQueue(queue: string, taskIdentifier: string): string; + //project concurrency + projectCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + projectCurrentConcurrencyKeyFromQueue(queue: string): string; + //message payload + messageKeyPrefixFromQueue(queue: string): string; + messageKey(orgId: string, messageId: string): string; + //utils + stripKeyPrefix(key: string): string; + extractComponentsFromQueue(queue: string): { + orgId: string; + projectId: string; + envId: string; + queue: string; + concurrencyKey: string | undefined; + }; +} + +export type PriorityStrategyChoice = string | { abort: true }; + +export interface RunQueuePriorityStrategy { + /** + * chooseQueue is called to select the next queue to process a message from + * + * @param queues + * @param parentQueue + * @param consumerId + * + * @returns The queue to process the message from, or an object with `abort: true` if no queue is available + */ + chooseQueue( + queues: Array, + parentQueue: string, + consumerId: string, + previousRange: QueueRange + ): { choice: PriorityStrategyChoice; nextRange: QueueRange }; + + /** + * This function is called to get the next candidate selection for the queue + * The `range` is used to select the set of queues that will be considered for the next selection (passed to chooseQueue) + * The `selectionId` is used to identify the selection and should be passed to chooseQueue + * + * @param parentQueue The parent queue that holds the candidate queues + * @param consumerId The consumerId that is making the request + * + * @returns The scores and the selectionId for the next candidate selection + */ + nextCandidateSelection(parentQueue: string, consumerId: string): Promise<{ range: QueueRange }>; +} diff --git a/internal-packages/run-engine/src/shared/asyncWorker.ts b/internal-packages/run-engine/src/shared/asyncWorker.ts new file mode 100644 index 0000000000..016662e1d5 --- /dev/null +++ b/internal-packages/run-engine/src/shared/asyncWorker.ts @@ -0,0 +1,34 @@ +export class AsyncWorker { + private running = false; + private timeout?: NodeJS.Timeout; + + constructor(private readonly fn: () => Promise, private readonly interval: number) {} + + start() { + if (this.running) { + return; + } + + this.running = true; + + this.#run(); + } + + stop() { + this.running = false; + } + + async #run() { + if (!this.running) { + return; + } + + try { + await this.fn(); + } catch (e) { + console.error(e); + } + + this.timeout = setTimeout(this.#run.bind(this), this.interval); + } +} diff --git a/internal-packages/run-engine/src/shared/index.ts b/internal-packages/run-engine/src/shared/index.ts new file mode 100644 index 0000000000..6bd3e304e3 --- /dev/null +++ b/internal-packages/run-engine/src/shared/index.ts @@ -0,0 +1,39 @@ +import { Attributes } from "@opentelemetry/api"; +import { Prisma } from "../../../database/src"; + +export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ + include: { project: true; organization: true; orgMember: true }; +}>; + +export type MinimalAuthenticatedEnvironment = { + id: AuthenticatedEnvironment["id"]; + type: AuthenticatedEnvironment["type"]; + maximumConcurrencyLimit: AuthenticatedEnvironment["maximumConcurrencyLimit"]; + project: { + id: AuthenticatedEnvironment["project"]["id"]; + }; + organization: { + id: AuthenticatedEnvironment["organization"]["id"]; + }; +}; + +const SemanticEnvResources = { + ENV_ID: "$trigger.env.id", + ENV_TYPE: "$trigger.env.type", + ENV_SLUG: "$trigger.env.slug", + ORG_ID: "$trigger.org.id", + ORG_SLUG: "$trigger.org.slug", + ORG_TITLE: "$trigger.org.title", + PROJECT_ID: "$trigger.project.id", + PROJECT_NAME: "$trigger.project.name", + USER_ID: "$trigger.user.id", +}; + +export function attributesFromAuthenticatedEnv(env: MinimalAuthenticatedEnvironment): Attributes { + return { + [SemanticEnvResources.ENV_ID]: env.id, + [SemanticEnvResources.ENV_TYPE]: env.type, + [SemanticEnvResources.ORG_ID]: env.organization.id, + [SemanticEnvResources.PROJECT_ID]: env.project.id, + }; +} diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json new file mode 100644 index 0000000000..515b521967 --- /dev/null +++ b/internal-packages/run-engine/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable"], + "module": "CommonJS", + "moduleResolution": "Node", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "types": ["vitest/globals"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "noEmit": true, + "strict": true, + "paths": { + "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], + "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], + "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], + "@internal/zod-worker/*": ["../../internal-packages/zod-worker/src/*"], + "@trigger.dev/core": ["../../packages/core/src/index"], + "@trigger.dev/core/*": ["../../packages/core/src/*"], + "@trigger.dev/database": ["../database/src/index"], + "@trigger.dev/database/*": ["../database/src/*"] + } + }, + "exclude": ["node_modules"] +} diff --git a/internal-packages/run-engine/vitest.config.ts b/internal-packages/run-engine/vitest.config.ts new file mode 100644 index 0000000000..4afd926425 --- /dev/null +++ b/internal-packages/run-engine/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + include: ["**/*.test.ts"], + globals: true, + }, +}); From b01ecadfb6cb250156ae5bc6831da1d23cd23cd2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 17:47:29 +0100 Subject: [PATCH 002/485] Remove todos from the worker.test --- internal-packages/redis-worker/src/worker.test.ts | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal-packages/redis-worker/src/worker.test.ts b/internal-packages/redis-worker/src/worker.test.ts index a55a653887..de2e78a7b0 100644 --- a/internal-packages/redis-worker/src/worker.test.ts +++ b/internal-packages/redis-worker/src/worker.test.ts @@ -274,11 +274,4 @@ describe("Worker", () => { } } ); - - //todo test that throwing an error doesn't screw up the other items - //todo process more items when finished - - //todo add a Dead Letter Queue when items are failed, with the error - //todo add a function on the worker to redrive them - //todo add an API endpoint to redrive with an ID }); From e1fc0e96df9cccbf2d64c518b8d5301594847ad2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 18:04:25 +0100 Subject: [PATCH 003/485] Switch to using redis-worker in the run-engine --- internal-packages/run-engine/package.json | 2 +- internal-packages/run-engine/tsconfig.json | 4 ++-- pnpm-lock.yaml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index a2cf01cbdc..82fca6f032 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -5,7 +5,7 @@ "main": "./src/index.ts", "types": "./src/index.ts", "dependencies": { - "@internal/zod-worker": "workspace:*", + "@internal/redis-worker": "workspace:*", "@opentelemetry/api": "^1.9.0", "@opentelemetry/semantic-conventions": "^1.27.0", "@trigger.dev/core": "workspace:*", diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json index 515b521967..0ac9414b19 100644 --- a/internal-packages/run-engine/tsconfig.json +++ b/internal-packages/run-engine/tsconfig.json @@ -17,8 +17,8 @@ "paths": { "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], - "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], - "@internal/zod-worker/*": ["../../internal-packages/zod-worker/src/*"], + "@internal/redis-worker": ["../../internal-packages/redis-worker/src/index"], + "@internal/redis-worker/*": ["../../internal-packages/redis-worker/src/*"], "@trigger.dev/core": ["../../packages/core/src/index"], "@trigger.dev/core/*": ["../../packages/core/src/*"], "@trigger.dev/database": ["../database/src/index"], diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2152dd8537..ef8ab777c3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -928,9 +928,9 @@ importers: internal-packages/run-engine: dependencies: - '@internal/zod-worker': + '@internal/redis-worker': specifier: workspace:* - version: link:../zod-worker + version: link:../redis-worker '@opentelemetry/api': specifier: ^1.9.0 version: 1.9.0 From cc7a326328aef57bd988b44cb712c6c50a489bf5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 18:49:33 +0100 Subject: [PATCH 004/485] Added @internal/run-engine back to the webapp --- apps/webapp/package.json | 1 + apps/webapp/tsconfig.json | 4 +++- pnpm-lock.yaml | 49 +++++++++++++++++++++++++++++++++++---- 3 files changed, 48 insertions(+), 6 deletions(-) diff --git a/apps/webapp/package.json b/apps/webapp/package.json index 403e19b88a..089176358c 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -50,6 +50,7 @@ "@headlessui/react": "^1.7.8", "@heroicons/react": "^2.0.12", "@internal/zod-worker": "workspace:*", + "@internal/run-engine": "workspace:*", "@internationalized/date": "^3.5.1", "@lezer/highlight": "^1.1.6", "@opentelemetry/api": "1.9.0", diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index 176d59f8b2..2f08d8ea35 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -33,7 +33,9 @@ "emails": ["../../internal-packages/emails/src/index"], "emails/*": ["../../internal-packages/emails/src/*"], "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], - "@internal/zod-worker/*": ["../../internal-packages/zod-worker/src/*"] + "@internal/zod-worker/*": ["../../internal-packages/zod-worker/src/*"], + "@internal/run-engine": ["../../internal-packages/run-engine/src/index"], + "@internal/run-engine/*": ["../../internal-packages/run-engine/src/*"] }, "noEmit": true } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7e4f2a7088..116e016e10 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -243,6 +243,9 @@ importers: '@heroicons/react': specifier: ^2.0.12 version: 2.0.13(react@18.2.0) + '@internal/run-engine': + specifier: workspace:* + version: link:../../internal-packages/run-engine '@internal/zod-worker': specifier: workspace:* version: link:../../internal-packages/zod-worker @@ -923,6 +926,46 @@ importers: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) + internal-packages/run-engine: + dependencies: + '@internal/redis-worker': + specifier: workspace:* + version: link:../redis-worker + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 + '@opentelemetry/semantic-conventions': + specifier: ^1.27.0 + version: 1.27.0 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core + '@trigger.dev/database': + specifier: workspace:* + version: link:../database + ioredis: + specifier: ^5.3.2 + version: 5.3.2 + nanoid: + specifier: ^3.3.4 + version: 3.3.7 + redlock: + specifier: 5.0.0-beta.2 + version: 5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu) + typescript: + specifier: ^5.5.4 + version: 5.5.4 + zod: + specifier: 3.22.3 + version: 3.22.3 + devDependencies: + '@internal/testcontainers': + specifier: workspace:* + version: link:../testcontainers + vitest: + specifier: ^1.4.0 + version: 1.6.0(@types/node@20.14.14) + internal-packages/testcontainers: dependencies: '@opentelemetry/api': @@ -16825,7 +16868,7 @@ packages: assertion-error: 1.1.0 check-error: 1.0.3 deep-eql: 4.1.3 - get-func-name: 2.0.0 + get-func-name: 2.0.2 loupe: 2.3.7 pathval: 1.1.1 type-detect: 4.0.8 @@ -20217,10 +20260,6 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} - /get-func-name@2.0.0: - resolution: {integrity: sha512-Hm0ixYtaSZ/V7C8FJrtZIuBBI+iSgL+1Aq82zSu8VQNB4S3Gk8e7Qs3VwBDJAhmRZcFqkl3tQu36g/Foh5I5ig==} - dev: true - /get-func-name@2.0.2: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} dev: true From 170135c8350ef52810235fa50d01ee4f5f675d14 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 19:08:22 +0100 Subject: [PATCH 005/485] Sensible default retry settings --- internal-packages/redis-worker/src/worker.ts | 32 +++++++++++++++----- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index 601f5e1708..50f61fdd55 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -9,11 +9,11 @@ import { SimpleQueue } from "./queue.js"; import Redis from "ioredis"; -type WorkerCatalog = { +export type WorkerCatalog = { [key: string]: { schema: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion; visibilityTimeoutMs: number; - retry: RetryOptions; + retry?: RetryOptions; }; }; @@ -28,6 +28,11 @@ type JobHandler = (param attempt: number; }) => Promise; +export type WorkerConcurrencyOptions = { + workers?: number; + tasksPerWorker?: number; +}; + type WorkerOptions = { name: string; redisOptions: RedisOptions; @@ -35,14 +40,22 @@ type WorkerOptions = { jobs: { [K in keyof TCatalog]: JobHandler; }; - concurrency?: { - workers?: number; - tasksPerWorker?: number; - }; + concurrency?: WorkerConcurrencyOptions; pollIntervalMs?: number; logger?: Logger; }; +// This results in attempt 12 being a delay of 1 hour +const defaultRetrySettings = { + maxAttempts: 12, + factor: 2, + //one second + minTimeoutInMs: 1_000, + //one hour + maxTimeoutInMs: 3_600_000, + randomize: true, +}; + class Worker { private subscriber: Redis; @@ -182,7 +195,12 @@ class Worker { try { attempt = attempt + 1; - const retryDelay = calculateNextRetryDelay(catalogItem.retry, attempt); + const retrySettings = { + ...defaultRetrySettings, + ...catalogItem.retry, + }; + + const retryDelay = calculateNextRetryDelay(retrySettings, attempt); if (!retryDelay) { this.logger.error( From 27ffb2d1ed9c4e6d244305093c0c118ba64c3031 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 8 Oct 2024 19:23:37 +0100 Subject: [PATCH 006/485] Use the new Redis Worker instead of the ZodWorker --- internal-packages/redis-worker/src/index.ts | 2 + .../run-engine/src/engine/index.ts | 81 +++++++++---------- 2 files changed, 39 insertions(+), 44 deletions(-) create mode 100644 internal-packages/redis-worker/src/index.ts diff --git a/internal-packages/redis-worker/src/index.ts b/internal-packages/redis-worker/src/index.ts new file mode 100644 index 0000000000..a5893efc83 --- /dev/null +++ b/internal-packages/redis-worker/src/index.ts @@ -0,0 +1,2 @@ +export * from "./queue"; +export * from "./worker"; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index f74d11ee91..44996e7b17 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,4 +1,4 @@ -import { RunnerOptions, ZodWorker } from "@internal/zod-worker"; +import { Worker, type WorkerCatalog, type WorkerConcurrencyOptions } from "@internal/redis-worker"; import { trace } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; import { QueueOptions } from "@trigger.dev/core/v3"; @@ -23,8 +23,8 @@ import { nanoid } from "nanoid"; type Options = { redis: RedisOptions; prisma: PrismaClient; - zodWorker: RunnerOptions & { - shutdownTimeoutInMs: number; + worker: WorkerConcurrencyOptions & { + pollIntervalMs?: number; }; }; @@ -64,23 +64,29 @@ type TriggerParams = { seedMetadataType?: string; }; -const schema = { - "runengine.waitpointCompleteDateTime": z.object({ - waitpointId: z.string(), - }), - "runengine.expireRun": z.object({ - runId: z.string(), - }), +const workerCatalog = { + waitpointCompleteDateTime: { + schema: z.object({ + waitpointId: z.string(), + }), + visibilityTimeoutMs: 5000, + }, + expireRun: { + schema: z.object({ + runId: z.string(), + }), + visibilityTimeoutMs: 5000, + }, }; -type EngineWorker = ZodWorker; +type EngineWorker = Worker; export class RunEngine { private redis: Redis; private prisma: PrismaClient; private redlock: Redlock; runQueue: RunQueue; - private zodWorker: EngineWorker; + private worker: EngineWorker; private logger = new Logger("RunEngine", "debug"); constructor(private readonly options: Options) { @@ -106,28 +112,19 @@ export class RunEngine { redis: options.redis, }); - this.zodWorker = new ZodWorker({ - name: "runQueueWorker", - prisma: options.prisma, - replica: options.prisma, - logger: new Logger("RunQueueWorker", "debug"), - runnerOptions: options.zodWorker, - shutdownTimeoutInMs: options.zodWorker.shutdownTimeoutInMs, - schema, - tasks: { - "runengine.waitpointCompleteDateTime": { - priority: 0, - maxAttempts: 10, - handler: async (payload, job) => { - await this.#completeWaitpoint(payload.waitpointId); - }, + this.worker = new Worker({ + name: "runengineworker", + redisOptions: options.redis, + catalog: workerCatalog, + concurrency: options.worker, + pollIntervalMs: options.worker.pollIntervalMs, + logger: new Logger("RunEngineWorker", "debug"), + jobs: { + waitpointCompleteDateTime: async ({ payload }) => { + await this.#completeWaitpoint(payload.waitpointId); }, - "runengine.expireRun": { - priority: 0, - maxAttempts: 10, - handler: async (payload, job) => { - await this.expireRun(payload.runId); - }, + expireRun: async ({ payload }) => { + await this.expireRun(payload.runId); }, }, }); @@ -319,11 +316,7 @@ export class RunEngine { const expireAt = parseNaturalLanguageDuration(taskRun.ttl); if (expireAt) { - await this.zodWorker.enqueue( - "runengine.expireRun", - { runId: taskRun.id }, - { tx, runAt: expireAt, jobKey: `runengine.expireRun.${taskRun.id}` } - ); + await this.worker.enqueue({ job: "expireRun", payload: { runId: taskRun.id } }); } } @@ -418,11 +411,11 @@ export class RunEngine { }, }); - await this.zodWorker.enqueue( - "runengine.waitpointCompleteDateTime", - { waitpointId: waitpoint.id }, - { tx, runAt: completedAfter, jobKey: `waitpointCompleteDateTime.${waitpoint.id}` } - ); + await this.worker.enqueue({ + id: `waitpointCompleteDateTime.${waitpoint.id}`, + job: "waitpointCompleteDateTime", + payload: { waitpointId: waitpoint.id }, + }); return waitpoint; } @@ -521,7 +514,7 @@ export class RunEngine { } } -/* +/* Starting execution flow: 1. Run id is pulled from a queue From 17e4c42f60d6bfa9e5db727799262cfd1b73c99e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 12:32:18 +0100 Subject: [PATCH 007/485] Fix for test using old zod-worker options --- internal-packages/run-engine/src/engine/index.test.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 64bfe6c06b..b379b68fa8 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -18,9 +18,10 @@ describe("RunEngine", () => { password: redisContainer.getPassword(), enableAutoPipelining: true, }, - zodWorker: { - connectionString: postgresContainer.getConnectionUri(), - shutdownTimeoutInMs: 100, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, }, }); From 8b5f1c65b7a4aba5acf3ec879cde66a356d4a59a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 15:07:03 +0100 Subject: [PATCH 008/485] Added acking and availableAt to the worker --- internal-packages/redis-worker/src/worker.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index 50f61fdd55..91f85440f5 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -101,11 +101,13 @@ class Worker { job, payload, visibilityTimeoutMs, + availableAt, }: { id?: string; job: K; payload: z.infer; visibilityTimeoutMs?: number; + availableAt?: Date; }) { const timeout = visibilityTimeoutMs ?? this.options.catalog[job].visibilityTimeoutMs; return this.queue.enqueue({ @@ -113,9 +115,14 @@ class Worker { job, item: payload, visibilityTimeoutMs: timeout, + availableAt, }); } + ack(id: string) { + return this.queue.ack(id); + } + private createWorker(tasksPerWorker: number) { const worker = new NodeWorker( ` From f705a77a35e22907883cb98c412d1bd4ef271b98 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 15:07:33 +0100 Subject: [PATCH 009/485] Basic heartbeating added to the RunEngine --- .../database/prisma/schema.prisma | 7 +- .../run-engine/src/engine/index.test.ts | 19 +- .../run-engine/src/engine/index.ts | 162 ++++++++++++++++-- 3 files changed, 169 insertions(+), 19 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 37bd8b49a3..33d80e8298 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1866,7 +1866,7 @@ enum RunEngineVersion { /// Used by the RunEngine during TaskRun execution /// It has the required information to transactionally progress a run through states, /// and prevent side effects like heartbeats failing a run that has progressed. -/// It is optimised for performance and is designed to be cleared at some point, +/// It is optimised for performance and is designed to be cleared at some point, /// so there are no cascading relationships to other models. model TaskRunExecutionSnapshot { id String @id @default(cuid()) @@ -1904,6 +1904,7 @@ model TaskRunExecutionSnapshot { enum TaskRunExecutionStatus { RUN_CREATED + ENQUEUED DEQUEUED_FOR_EXECUTION EXECUTING BLOCKED_BY_WAITPOINTS @@ -1924,7 +1925,7 @@ model Waitpoint { idempotencyKey String userProvidedIdempotencyKey Boolean - /// If an idempotencyKey is no longer active, we store it here and generate a new one for the idempotencyKey field. + /// If an idempotencyKey is no longer active, we store it here and generate a new one for the idempotencyKey field. /// This is a workaround because Prisma doesn't support partial indexes. inactiveIdempotencyKey String? @@ -2011,7 +2012,7 @@ model TaskRunDependency { checkpointEvent CheckpointRestoreEvent? @relation(fields: [checkpointEventId], references: [id], onDelete: Cascade, onUpdate: Cascade) checkpointEventId String? @unique - /// An attempt that is dependent on this task run. + /// An attempt that is dependent on this task run. dependentAttempt TaskRunAttempt? @relation(fields: [dependentAttemptId], references: [id]) dependentAttemptId String? diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index b379b68fa8..892d2500c1 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -57,6 +57,17 @@ describe("RunEngine", () => { expect(runFromDb).toBeDefined(); expect(runFromDb?.id).toBe(run.id); + const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: run.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + expect(snapshot).toBeDefined(); + expect(snapshot?.executionStatus).toBe("ENQUEUED"); + //check the waitpoint is created const runWaitpoint = await prisma.waitpoint.findMany({ where: { @@ -77,11 +88,9 @@ describe("RunEngine", () => { expect(envConcurrencyBefore).toBe(0); //dequeue the run - const dequeued = await engine.runQueue.dequeueMessageInSharedQueue( - "test_12345", - run.masterQueue - ); - expect(dequeued?.messageId).toBe(run.id); + const dequeued = await engine.dequeueFromMasterQueue("test_12345", run.masterQueue); + expect(dequeued?.runId).toBe(run.id); + expect(dequeued?.executionStatus).toBe("DEQUEUED_FOR_EXECUTION"); const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 44996e7b17..1fc37b0361 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -71,6 +71,12 @@ const workerCatalog = { }), visibilityTimeoutMs: 5000, }, + heartbeatSnapshot: { + schema: z.object({ + snapshotId: z.string(), + }), + visibilityTimeoutMs: 5000, + }, expireRun: { schema: z.object({ runId: z.string(), @@ -123,6 +129,7 @@ export class RunEngine { waitpointCompleteDateTime: async ({ payload }) => { await this.#completeWaitpoint(payload.waitpointId); }, + heartbeatSnapshot: async ({ payload }) => {}, expireRun: async ({ payload }) => { await this.expireRun(payload.runId); }, @@ -230,11 +237,6 @@ export class RunEngine { }); await this.redlock.using([taskRun.id], 5000, async (signal) => { - //todo add this in some places throughout this code - if (signal.aborted) { - throw signal.error; - } - //create associated waitpoint (this completes when the run completes) const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { projectId: environment.project.id, @@ -251,6 +253,11 @@ export class RunEngine { }); } + //Make sure lock extension succeeded + if (signal.aborted) { + throw signal.error; + } + if (queue) { const concurrencyLimit = typeof queue.concurrencyLimit === "number" @@ -320,6 +327,11 @@ export class RunEngine { } } + //Make sure lock extension succeeded + if (signal.aborted) { + throw signal.error; + } + await this.enqueueRun(taskRun, environment, prisma); }); @@ -340,6 +352,18 @@ export class RunEngine { env: MinimalAuthenticatedEnvironment, tx?: PrismaClientOrTransaction ) { + const prisma = tx ?? this.prisma; + + await prisma.taskRunExecutionSnapshot.create({ + data: { + runId: run.id, + engine: "V2", + executionStatus: "ENQUEUED", + description: "Run was enqueued", + runStatus: run.status, + }, + }); + await this.runQueue.enqueueMessage({ env, masterQueue: run.masterQueue, @@ -359,11 +383,54 @@ export class RunEngine { //todo update the TaskRunExecutionSnapshot } - async dequeueRun(consumerId: string, masterQueue: string) { + /** + * Gets a fairly selected run from the specified master queue, returning the information required to run it. + * @param consumerId: The consumer that is pulling, allows multiple consumers to pull from the same queue + * @param masterQueue: The shared queue to pull from, can be an individual environment (for dev) + * @returns + */ + async dequeueFromMasterQueue(consumerId: string, masterQueue: string) { const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); - //todo update the TaskRunExecutionSnapshot - //todo update the TaskRun status? - return message; + if (!message) { + return null; + } + + const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(message.messageId); + if (!snapshot) { + throw new Error( + `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` + ); + } + + if (!["ENQUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { + throw new Error( + `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` + ); + } + + //create new snapshot + const newSnapshot = await this.prisma.taskRunExecutionSnapshot.create({ + data: { + runId: message.messageId, + engine: "V2", + executionStatus: "DEQUEUED_FOR_EXECUTION", + description: "Run was dequeued for execution", + runStatus: snapshot.runStatus, + }, + }); + + //todo create heartbeat, associated with this snapshot + await this.#startHeartbeating({ + runId: message.messageId, + snapshotId: newSnapshot.id, + intervalSeconds: 60, + }); + + return newSnapshot; + }); + + return newSnapshot; } /** We want to actually execute the run, this could be a continuation of a previous execution. @@ -426,12 +493,12 @@ export class RunEngine { ) { //todo it would be better if we didn't remove from the queue, because this removes the payload //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload - //todo - // await this.runQueue.acknowledgeMessage(orgId, runId); //todo release concurrency and make sure the run isn't in the queue // await this.runQueue.blockMessage(orgId, runId); + throw new Error("Not implemented #blockRunWithWaitpoint"); + return tx.taskRunWaitpoint.create({ data: { taskRunId: runId, @@ -512,6 +579,79 @@ export class RunEngine { { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } ); } + + //MARK: - TaskRunExecutionSnapshots + async #getLatestExecutionSnapshot(runId: string) { + return this.prisma.taskRunExecutionSnapshot.findFirst({ + where: { runId }, + orderBy: { createdAt: "desc" }, + }); + } + + //MARK: - Heartbeat + async #startHeartbeating({ + runId, + snapshotId, + intervalSeconds, + }: { + runId: string; + snapshotId: string; + intervalSeconds: number; + }) { + await this.worker.enqueue({ + id: `heartbeatSnapshot.${snapshotId}`, + job: "heartbeatSnapshot", + payload: { snapshotId, runId }, + availableAt: new Date(Date.now() + intervalSeconds * 1000), + }); + } + + async #extendHeartbeatTimeout({ + runId, + snapshotId, + intervalSeconds, + }: { + runId: string; + snapshotId: string; + intervalSeconds: number; + }) { + const latestSnapshot = await this.#getLatestExecutionSnapshot(runId); + if (latestSnapshot?.id !== snapshotId) { + this.logger.log( + "RunEngine.#extendHeartbeatTimeout() no longer the latest snapshot, stopping the heartbeat.", + { + runId, + snapshotId, + latestSnapshot: latestSnapshot, + } + ); + + await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); + return; + } + + //it's the same as creating a new heartbeat + await this.#startHeartbeating({ runId, snapshotId, intervalSeconds }); + } + + async #handleStalledSnapshot({ runId, snapshotId }: { runId: string; snapshotId: string }) { + const latestSnapshot = await this.#getLatestExecutionSnapshot(runId); + if (latestSnapshot?.id !== snapshotId) { + this.logger.log( + "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", + { + runId, + snapshotId, + latestSnapshot: latestSnapshot, + } + ); + + await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); + return; + } + + //todo we need to return the run to the queue in the correct state. + } } /* From 0f11508e6713bb5028242e218bd9bf80fc1c1682 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 15:13:33 +0100 Subject: [PATCH 010/485] Heartbeat handling stubs --- internal-packages/run-engine/package.json | 1 + .../run-engine/src/engine/index.ts | 46 ++++++++++++++++++- pnpm-lock.yaml | 3 ++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index 82fca6f032..767a4469c9 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -10,6 +10,7 @@ "@opentelemetry/semantic-conventions": "^1.27.0", "@trigger.dev/core": "workspace:*", "@trigger.dev/database": "workspace:*", + "assert-never": "^1.2.1", "ioredis": "^5.3.2", "nanoid": "^3.3.4", "redlock": "5.0.0-beta.2", diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 1fc37b0361..724a4e3fce 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,4 +1,4 @@ -import { Worker, type WorkerCatalog, type WorkerConcurrencyOptions } from "@internal/redis-worker"; +import { Worker, type WorkerConcurrencyOptions } from "@internal/redis-worker"; import { trace } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; import { QueueOptions } from "@trigger.dev/core/v3"; @@ -11,6 +11,7 @@ import { TaskRun, Waitpoint, } from "@trigger.dev/database"; +import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; import Redlock from "redlock"; import { z } from "zod"; @@ -73,6 +74,7 @@ const workerCatalog = { }, heartbeatSnapshot: { schema: z.object({ + runId: z.string(), snapshotId: z.string(), }), visibilityTimeoutMs: 5000, @@ -129,7 +131,9 @@ export class RunEngine { waitpointCompleteDateTime: async ({ payload }) => { await this.#completeWaitpoint(payload.waitpointId); }, - heartbeatSnapshot: async ({ payload }) => {}, + heartbeatSnapshot: async ({ payload }) => { + await this.#handleStalledSnapshot(payload); + }, expireRun: async ({ payload }) => { await this.expireRun(payload.runId); }, @@ -636,6 +640,14 @@ export class RunEngine { async #handleStalledSnapshot({ runId, snapshotId }: { runId: string; snapshotId: string }) { const latestSnapshot = await this.#getLatestExecutionSnapshot(runId); + if (!latestSnapshot) { + this.logger.error("RunEngine.#handleStalledSnapshot() no latest snapshot found", { + runId, + snapshotId, + }); + return; + } + if (latestSnapshot?.id !== snapshotId) { this.logger.log( "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", @@ -650,6 +662,36 @@ export class RunEngine { return; } + switch (latestSnapshot.executionStatus) { + case "BLOCKED_BY_WAITPOINTS": { + //we need to check if the waitpoints are still blocking the run + throw new Error("Not implemented BLOCKED_BY_WAITPOINTS"); + } + case "DEQUEUED_FOR_EXECUTION": { + //we need to check if the run is still dequeued + throw new Error("Not implemented DEQUEUED_FOR_EXECUTION"); + } + case "ENQUEUED": { + //we need to check if the run is still enqueued + throw new Error("Not implemented ENQUEUED"); + } + case "EXECUTING": { + //we need to check if the run is still executing + throw new Error("Not implemented EXECUTING"); + } + case "FINISHED": { + //we need to check if the run is still finished + throw new Error("Not implemented FINISHED"); + } + case "RUN_CREATED": { + //we need to check if the run is still created + throw new Error("Not implemented RUN_CREATED"); + } + default: { + assertNever(latestSnapshot.executionStatus); + } + } + //todo we need to return the run to the queue in the correct state. } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 116e016e10..054848c05d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -943,6 +943,9 @@ importers: '@trigger.dev/database': specifier: workspace:* version: link:../database + assert-never: + specifier: ^1.2.1 + version: 1.2.1 ioredis: specifier: ^5.3.2 version: 5.3.2 From 91535ce5f064ce1632ca6532c7e3262dcc5d66fe Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 16:13:05 +0100 Subject: [PATCH 011/485] Changed a snapshot status --- .../database/prisma/schema.prisma | 2 +- .../run-engine/src/engine/index.ts | 143 +++++++++++------- 2 files changed, 91 insertions(+), 54 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 33d80e8298..da8cf94747 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1904,7 +1904,7 @@ model TaskRunExecutionSnapshot { enum TaskRunExecutionStatus { RUN_CREATED - ENQUEUED + QUEUED DEQUEUED_FOR_EXECUTION EXECUTING BLOCKED_BY_WAITPOINTS diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 724a4e3fce..59a6b32414 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -18,7 +18,6 @@ import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; - import { nanoid } from "nanoid"; type Options = { @@ -143,8 +142,7 @@ export class RunEngine { //MARK: - Run functions - /** "Triggers" one run. - */ + /** "Triggers" one run. */ async trigger( { friendlyId, @@ -336,7 +334,7 @@ export class RunEngine { throw signal.error; } - await this.enqueueRun(taskRun, environment, prisma); + await this.#enqueueRun(taskRun, environment, prisma); }); //todo release parent concurrency (for the project, task, and environment, but not for the queue?) @@ -350,43 +348,6 @@ export class RunEngine { */ async batchTrigger() {} - /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ - async enqueueRun( - run: TaskRun, - env: MinimalAuthenticatedEnvironment, - tx?: PrismaClientOrTransaction - ) { - const prisma = tx ?? this.prisma; - - await prisma.taskRunExecutionSnapshot.create({ - data: { - runId: run.id, - engine: "V2", - executionStatus: "ENQUEUED", - description: "Run was enqueued", - runStatus: run.status, - }, - }); - - await this.runQueue.enqueueMessage({ - env, - masterQueue: run.masterQueue, - message: { - runId: run.id, - taskIdentifier: run.taskIdentifier, - orgId: env.organization.id, - projectId: env.project.id, - environmentId: env.id, - environmentType: env.type, - queue: run.queue, - concurrencyKey: run.concurrencyKey ?? undefined, - timestamp: Date.now(), - }, - }); - - //todo update the TaskRunExecutionSnapshot - } - /** * Gets a fairly selected run from the specified master queue, returning the information required to run it. * @param consumerId: The consumer that is pulling, allows multiple consumers to pull from the same queue @@ -407,7 +368,7 @@ export class RunEngine { ); } - if (!["ENQUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { + if (!["QUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { throw new Error( `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` ); @@ -437,19 +398,89 @@ export class RunEngine { return newSnapshot; } - /** We want to actually execute the run, this could be a continuation of a previous execution. - * This is called from the queue, when the run has been pulled. */ - //todo think more about this, when do we create the attempt? - //todo what does this actually do? - //todo how does it get sent to the worker? DEV and PROD - async prepareForExecution(runId: string) {} + async createRunAttempt(runId: string, snapshotId: string) { + //todo create the run attempt, update the execution status, start a heartbeat + } - async prepareForAttempt(runId: string) {} + async waitForDuration() {} async complete(runId: string, completion: any) {} async expireRun(runId: string) {} + //MARK: RunQueue + + /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ + async #enqueueRun( + run: TaskRun, + env: MinimalAuthenticatedEnvironment, + tx?: PrismaClientOrTransaction + ) { + const prisma = tx ?? this.prisma; + + await prisma.taskRunExecutionSnapshot.create({ + data: { + runId: run.id, + engine: "V2", + executionStatus: "QUEUED", + description: "Run was QUEUED", + runStatus: run.status, + }, + }); + + await this.runQueue.enqueueMessage({ + env, + masterQueue: run.masterQueue, + message: { + runId: run.id, + taskIdentifier: run.taskIdentifier, + orgId: env.organization.id, + projectId: env.project.id, + environmentId: env.id, + environmentType: env.type, + queue: run.queue, + concurrencyKey: run.concurrencyKey ?? undefined, + timestamp: Date.now(), + }, + }); + } + + async #continueRun( + run: TaskRun, + env: MinimalAuthenticatedEnvironment, + tx?: PrismaClientOrTransaction + ) { + const prisma = tx ?? this.prisma; + + await this.redlock.using([run.id], 5000, async (signal) => { + await prisma.taskRunExecutionSnapshot.create({ + data: { + runId: run.id, + engine: "V2", + executionStatus: "QUEUED", + description: "Run was QUEUED", + runStatus: run.status, + }, + }); + + await this.runQueue.enqueueMessage({ + env, + masterQueue: run.masterQueue, + message: { + runId: run.id, + taskIdentifier: run.taskIdentifier, + orgId: env.organization.id, + projectId: env.project.id, + environmentId: env.id, + environmentType: env.type, + queue: run.queue, + concurrencyKey: run.concurrencyKey ?? undefined, + timestamp: Date.now(), + }, + }); + }); + } + //MARK: - Waitpoints async #createRunAssociatedWaitpoint( tx: PrismaClientOrTransaction, @@ -573,7 +604,7 @@ export class RunEngine { // 5. Continue the runs that have no more waitpoints for (const run of taskRunsToResume) { - await this.enqueueRun(run, run.runtimeEnvironment, tx); + await this.#continueRun(run, run.runtimeEnvironment, tx); } }, (error) => { @@ -662,18 +693,24 @@ export class RunEngine { return; } + this.logger.log("RunEngine.#handleStalledSnapshot() handling stalled snapshot", { + runId, + snapshot: latestSnapshot, + }); + switch (latestSnapshot.executionStatus) { case "BLOCKED_BY_WAITPOINTS": { //we need to check if the waitpoints are still blocking the run throw new Error("Not implemented BLOCKED_BY_WAITPOINTS"); } case "DEQUEUED_FOR_EXECUTION": { + //todo probably put it back in the queue //we need to check if the run is still dequeued throw new Error("Not implemented DEQUEUED_FOR_EXECUTION"); } - case "ENQUEUED": { - //we need to check if the run is still enqueued - throw new Error("Not implemented ENQUEUED"); + case "QUEUED": { + //we need to check if the run is still QUEUED + throw new Error("Not implemented QUEUED"); } case "EXECUTING": { //we need to check if the run is still executing From 674c29ba19f0e1c66658d352b705dfa458ec4e62 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 9 Oct 2024 16:43:18 +0100 Subject: [PATCH 012/485] DateTime waitpoints queued --- .../run-engine/src/engine/index.ts | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 59a6b32414..09dd7ac3f1 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -314,10 +314,12 @@ export class RunEngine { completedAfter: taskRun.delayUntil, }); - await this.#blockRunWithWaitpoint(prisma, { - orgId: environment.organization.id, - runId: taskRun.id, - waitpoint: delayWaitpoint, + await prisma.taskRunWaitpoint.create({ + data: { + taskRunId: taskRun.id, + waitpointId: delayWaitpoint.id, + projectId: delayWaitpoint.projectId, + }, }); } @@ -334,7 +336,10 @@ export class RunEngine { throw signal.error; } - await this.#enqueueRun(taskRun, environment, prisma); + //enqueue the run if it's not delayed + if (!taskRun.delayUntil) { + await this.#enqueueRun(taskRun, environment, prisma); + } }); //todo release parent concurrency (for the project, task, and environment, but not for the queue?) @@ -453,6 +458,15 @@ export class RunEngine { const prisma = tx ?? this.prisma; await this.redlock.using([run.id], 5000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(run.id); + if (!snapshot) { + throw new Error(`RunEngine.#continueRun(): No snapshot found for run: ${run.id}`); + } + + if (snapshot.executionStatus === "EXECUTING") { + throw new Error("RunEngine.#continueRun(): continue executing run, not implemented yet"); + } + await prisma.taskRunExecutionSnapshot.create({ data: { runId: run.id, @@ -517,6 +531,7 @@ export class RunEngine { id: `waitpointCompleteDateTime.${waitpoint.id}`, job: "waitpointCompleteDateTime", payload: { waitpointId: waitpoint.id }, + availableAt: completedAfter, }); return waitpoint; @@ -704,7 +719,6 @@ export class RunEngine { throw new Error("Not implemented BLOCKED_BY_WAITPOINTS"); } case "DEQUEUED_FOR_EXECUTION": { - //todo probably put it back in the queue //we need to check if the run is still dequeued throw new Error("Not implemented DEQUEUED_FOR_EXECUTION"); } From a23ed33a14f18b933121ced0087ba54429c61090 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 09:48:59 +0100 Subject: [PATCH 013/485] Removed unused method --- .../run-engine/src/run-queue/index.ts | 40 +++---------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index cb07054051..293218ab17 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -940,36 +940,6 @@ export class RunQueue { ); } - async #callRebalanceParentQueueChild({ - parentQueue, - childQueue, - currentScore, - }: { - parentQueue: string; - childQueue: string; - currentScore: string; - }) { - const rebalanceResult = await this.redis.rebalanceParentQueueChild( - childQueue, - parentQueue, - childQueue, - currentScore - ); - - if (rebalanceResult) { - this.logger.debug("Rebalanced parent queue child", { - parentQueue, - childQueue, - currentScore, - rebalanceResult, - operation: "rebalanceParentQueueChild", - service: this.name, - }); - } - - return rebalanceResult; - } - #registerCommands() { this.redis.defineCommand("enqueueMessage", { numberOfKeys: 7, @@ -1085,7 +1055,7 @@ return {messageId, messageScore, messagePayload} -- Return message details this.redis.defineCommand("acknowledgeMessage", { numberOfKeys: 7, lua: ` --- Keys: +-- Keys: local parentQueue = KEYS[1] local messageKey = KEYS[2] local messageQueue = KEYS[3] @@ -1131,7 +1101,7 @@ local envConcurrencyKey = KEYS[5] local projectConcurrencyKey = KEYS[6] local taskConcurrencyKey = KEYS[7] --- Args: +-- Args: local messageId = ARGV[1] local messageScore = tonumber(ARGV[2]) @@ -1200,7 +1170,7 @@ local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) o local concurrencyLimit = redis.call('GET', concurrencyLimitKey) -- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, orgIsEnabled } +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, orgIsEnabled } `, }); @@ -1213,7 +1183,7 @@ local currentEnvConcurrencyKey = KEYS[2] local concurrencyLimitKey = KEYS[3] local envConcurrencyLimitKey = KEYS[4] --- Args +-- Args local defaultEnvConcurrencyLimit = tonumber(ARGV[1]) local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0') @@ -1223,7 +1193,7 @@ local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) o local concurrencyLimit = redis.call('GET', concurrencyLimitKey) -- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, true } +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, true } `, }); From 1c64f87ace7d934edb8e14e6099afee58105e1f9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 09:49:55 +0100 Subject: [PATCH 014/485] Fix for renamed execution status --- internal-packages/run-engine/src/engine/index.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 892d2500c1..c9dc6b3d62 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -66,7 +66,7 @@ describe("RunEngine", () => { }, }); expect(snapshot).toBeDefined(); - expect(snapshot?.executionStatus).toBe("ENQUEUED"); + expect(snapshot?.executionStatus).toBe("QUEUED"); //check the waitpoint is created const runWaitpoint = await prisma.waitpoint.findMany({ From 6769fd1ad8beb84a335d9ddfd37b1e365021c768 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 11:06:56 +0100 Subject: [PATCH 015/485] When creating a snapshot, add heartbeats --- .../database/prisma/schema.prisma | 15 +- .../run-engine/src/engine/index.ts | 138 +++++++++++++----- packages/core/src/v3/schemas/common.ts | 1 + 3 files changed, 119 insertions(+), 35 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index da8cf94747..84182aaf65 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1896,7 +1896,9 @@ model TaskRunExecutionSnapshot { ///todo machine spec? - ///todo worker + ///todo add worker, we'll use it to call back out to the worker + workerId String? + worker Worker? @relation(fields: [workerId], references: [id]) /// Used to get the latest state quickly @@index([runId, createdAt(sort: Desc)]) @@ -1983,6 +1985,17 @@ model TaskRunWaitpoint { @@index([waitpointId]) } +model Worker { + id String @id @default(cuid()) + executionSnapshots TaskRunExecutionSnapshot[] +} + +model WorkerGroup { + id String @id @default(cuid()) + + masterQueue String +} + model TaskRunTag { id String @id @default(cuid()) name String diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 09dd7ac3f1..8366d366a3 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,7 +1,7 @@ import { Worker, type WorkerConcurrencyOptions } from "@internal/redis-worker"; import { trace } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; -import { QueueOptions } from "@trigger.dev/core/v3"; +import { QueueOptions, TaskRunInternalError } from "@trigger.dev/core/v3"; import { generateFriendlyId, parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; import { $transaction, @@ -9,6 +9,8 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, + TaskRunExecutionStatus, + TaskRunStatus, Waitpoint, } from "@trigger.dev/database"; import assertNever from "assert-never"; @@ -19,6 +21,7 @@ import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; import { nanoid } from "nanoid"; +import { error } from "node:console"; type Options = { redis: RedisOptions; @@ -134,7 +137,7 @@ export class RunEngine { await this.#handleStalledSnapshot(payload); }, expireRun: async ({ payload }) => { - await this.expireRun(payload.runId); + await this.expire(payload.runId); }, }, }); @@ -366,7 +369,7 @@ export class RunEngine { } const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(message.messageId); + const snapshot = await this.#getLatestExecutionSnapshot(this.prisma, message.messageId); if (!snapshot) { throw new Error( `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` @@ -374,44 +377,51 @@ export class RunEngine { } if (!["QUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { + //todo put run in a system failure state throw new Error( `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` ); } - //create new snapshot - const newSnapshot = await this.prisma.taskRunExecutionSnapshot.create({ - data: { - runId: message.messageId, - engine: "V2", + const newSnapshot = await this.#createExecutionSnapshot(this.prisma, { + run: { + id: message.messageId, + status: snapshot.runStatus, + }, + snapshot: { executionStatus: "DEQUEUED_FOR_EXECUTION", description: "Run was dequeued for execution", - runStatus: snapshot.runStatus, }, }); - //todo create heartbeat, associated with this snapshot - await this.#startHeartbeating({ - runId: message.messageId, - snapshotId: newSnapshot.id, - intervalSeconds: 60, - }); - return newSnapshot; }); return newSnapshot; } - async createRunAttempt(runId: string, snapshotId: string) { - //todo create the run attempt, update the execution status, start a heartbeat + async createRunAttempt(runId: string, snapshotId: string, tx?: PrismaClientOrTransaction) { + const prisma = tx ?? this.prisma; + + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!latestSnapshot) { + return this.systemFailure(runId, { + type: "INTERNAL_ERROR", + code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", + message: "Task had no execution snapshot when trying to create a run attempt", + }); + } + + //todo check if the snapshot is the latest one } async waitForDuration() {} async complete(runId: string, completion: any) {} - async expireRun(runId: string) {} + async expire(runId: string) {} + + async systemFailure(runId: string, error: TaskRunInternalError) {} //MARK: RunQueue @@ -423,13 +433,11 @@ export class RunEngine { ) { const prisma = tx ?? this.prisma; - await prisma.taskRunExecutionSnapshot.create({ - data: { - runId: run.id, - engine: "V2", + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { executionStatus: "QUEUED", description: "Run was QUEUED", - runStatus: run.status, }, }); @@ -458,22 +466,31 @@ export class RunEngine { const prisma = tx ?? this.prisma; await this.redlock.using([run.id], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(run.id); + const snapshot = await this.#getLatestExecutionSnapshot(prisma, run.id); if (!snapshot) { throw new Error(`RunEngine.#continueRun(): No snapshot found for run: ${run.id}`); } - if (snapshot.executionStatus === "EXECUTING") { + //run is still executing, send a message to the worker + if (snapshot.executionStatus === "EXECUTING" && snapshot.worker) { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { + executionStatus: "EXECUTING", + description: "Run was continued, whilst still executing.", + }, + }); + + //todo send a message to the worker somehow + // await this.#sendMessageToWorker(); throw new Error("RunEngine.#continueRun(): continue executing run, not implemented yet"); } - await prisma.taskRunExecutionSnapshot.create({ - data: { - runId: run.id, - engine: "V2", + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { executionStatus: "QUEUED", - description: "Run was QUEUED", - runStatus: run.status, + description: "Run was QUEUED, because it needs to be continued.", }, }); @@ -631,8 +648,59 @@ export class RunEngine { } //MARK: - TaskRunExecutionSnapshots - async #getLatestExecutionSnapshot(runId: string) { - return this.prisma.taskRunExecutionSnapshot.findFirst({ + async #createExecutionSnapshot( + prisma: PrismaClientOrTransaction, + { + run, + snapshot, + }: { + run: { id: string; status: TaskRunStatus }; + snapshot: { + executionStatus: TaskRunExecutionStatus; + description: string; + }; + } + ) { + const newSnapshot = await prisma.taskRunExecutionSnapshot.create({ + data: { + runId: run.id, + engine: "V2", + executionStatus: snapshot.executionStatus, + description: snapshot.description, + runStatus: run.status, + }, + }); + + //create heartbeat (if relevant) + switch (snapshot.executionStatus) { + case "RUN_CREATED": + case "QUEUED": + case "BLOCKED_BY_WAITPOINTS": + case "FINISHED": + case "DEQUEUED_FOR_EXECUTION": { + await this.#startHeartbeating({ + runId: run.id, + snapshotId: newSnapshot.id, + intervalSeconds: 60, + }); + break; + } + case "EXECUTING": { + await this.#startHeartbeating({ + runId: run.id, + snapshotId: newSnapshot.id, + intervalSeconds: 60 * 15, + }); + break; + } + } + + return newSnapshot; + } + + async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { + return prisma.taskRunExecutionSnapshot.findFirst({ + include: { worker: true }, where: { runId }, orderBy: { createdAt: "desc" }, }); @@ -713,6 +781,8 @@ export class RunEngine { snapshot: latestSnapshot, }); + //todo fail attempt if there is one? + switch (latestSnapshot.executionStatus) { case "BLOCKED_BY_WAITPOINTS": { //we need to check if the waitpoints are still blocking the run diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index c7e2bd8f77..221d99dbd5 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -124,6 +124,7 @@ export const TaskRunInternalError = z.object({ "DISK_SPACE_EXCEEDED", "POD_EVICTED", "POD_UNKNOWN_ERROR", + "TASK_HAS_N0_EXECUTION_SNAPSHOT", ]), message: z.string().optional(), stackTrace: z.string().optional(), From dd0f84250520220a2eee4942e01dc1c01d919fed Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 12:49:29 +0100 Subject: [PATCH 016/485] Small changes --- .../run-engine/src/engine/index.ts | 74 ++++++++++++++----- 1 file changed, 55 insertions(+), 19 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8366d366a3..ca5bfe210f 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -15,13 +15,18 @@ import { } from "@trigger.dev/database"; import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; +import { nanoid } from "nanoid"; import Redlock from "redlock"; import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; -import { nanoid } from "nanoid"; -import { error } from "node:console"; + +class NotImplementedError extends Error { + constructor(message: string) { + super(message); + } +} type Options = { redis: RedisOptions; @@ -362,14 +367,23 @@ export class RunEngine { * @param masterQueue: The shared queue to pull from, can be an individual environment (for dev) * @returns */ - async dequeueFromMasterQueue(consumerId: string, masterQueue: string) { + async dequeueFromMasterQueue({ + consumerId, + masterQueue, + tx, + }: { + consumerId: string; + masterQueue: string; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); if (!message) { return null; } const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(this.prisma, message.messageId); + const snapshot = await this.#getLatestExecutionSnapshot(prisma, message.messageId); if (!snapshot) { throw new Error( `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` @@ -383,7 +397,7 @@ export class RunEngine { ); } - const newSnapshot = await this.#createExecutionSnapshot(this.prisma, { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { id: message.messageId, status: snapshot.runStatus, @@ -400,15 +414,27 @@ export class RunEngine { return newSnapshot; } - async createRunAttempt(runId: string, snapshotId: string, tx?: PrismaClientOrTransaction) { + async createRunAttempt({ + runId, + snapshotId, + tx, + }: { + runId: string; + snapshotId: string; + tx?: PrismaClientOrTransaction; + }) { const prisma = tx ?? this.prisma; const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (!latestSnapshot) { - return this.systemFailure(runId, { - type: "INTERNAL_ERROR", - code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", - message: "Task had no execution snapshot when trying to create a run attempt", + return this.systemFailure({ + runId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", + message: "Task had no execution snapshot when trying to create a run attempt", + }, + tx: prisma, }); } @@ -421,7 +447,15 @@ export class RunEngine { async expire(runId: string) {} - async systemFailure(runId: string, error: TaskRunInternalError) {} + async systemFailure({ + runId, + error, + tx, + }: { + runId: string; + error: TaskRunInternalError; + tx?: PrismaClientOrTransaction; + }) {} //MARK: RunQueue @@ -483,7 +517,9 @@ export class RunEngine { //todo send a message to the worker somehow // await this.#sendMessageToWorker(); - throw new Error("RunEngine.#continueRun(): continue executing run, not implemented yet"); + throw new NotImplementedError( + "RunEngine.#continueRun(): continue executing run, not implemented yet" + ); } const newSnapshot = await this.#createExecutionSnapshot(prisma, { @@ -564,7 +600,7 @@ export class RunEngine { //todo release concurrency and make sure the run isn't in the queue // await this.runQueue.blockMessage(orgId, runId); - throw new Error("Not implemented #blockRunWithWaitpoint"); + throw new NotImplementedError("Not implemented #blockRunWithWaitpoint"); return tx.taskRunWaitpoint.create({ data: { @@ -786,27 +822,27 @@ export class RunEngine { switch (latestSnapshot.executionStatus) { case "BLOCKED_BY_WAITPOINTS": { //we need to check if the waitpoints are still blocking the run - throw new Error("Not implemented BLOCKED_BY_WAITPOINTS"); + throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); } case "DEQUEUED_FOR_EXECUTION": { //we need to check if the run is still dequeued - throw new Error("Not implemented DEQUEUED_FOR_EXECUTION"); + throw new NotImplementedError("Not implemented DEQUEUED_FOR_EXECUTION"); } case "QUEUED": { //we need to check if the run is still QUEUED - throw new Error("Not implemented QUEUED"); + throw new NotImplementedError("Not implemented QUEUED"); } case "EXECUTING": { //we need to check if the run is still executing - throw new Error("Not implemented EXECUTING"); + throw new NotImplementedError("Not implemented EXECUTING"); } case "FINISHED": { //we need to check if the run is still finished - throw new Error("Not implemented FINISHED"); + throw new NotImplementedError("Not implemented FINISHED"); } case "RUN_CREATED": { //we need to check if the run is still created - throw new Error("Not implemented RUN_CREATED"); + throw new NotImplementedError("Not implemented RUN_CREATED"); } default: { assertNever(latestSnapshot.executionStatus); From 24d18e2311c04d732b7312b638874c9232cbcae7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 12:49:41 +0100 Subject: [PATCH 017/485] Added a section to the readme about sending messages to workers --- internal-packages/run-engine/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 2960756f71..049ad4e42d 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -217,6 +217,13 @@ export const myTask = task({ It's very important that a run can only be acted on by one process at a time. We lock runs using RedLock while they're being mutated. This prevents some network-related race conditions like the timing of checkpoints and heartbeats permanently hanging runs. +# Sending messages to the worker + +Sending messages to the worker is challenging because we many servers and we're going to have many workers. We need to make sure that the message is sent to the correct worker. + +## #continueRun +When all waitpoints are finished, we need to continue a run. Sometimes they're still running in the cluster. + # Legacy system These are all the TaskRun mutations happening right now: From ad73223bee921283eb9f9ac6b0b9c20713fd27f8 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 10 Oct 2024 12:52:40 +0100 Subject: [PATCH 018/485] Pass prisma/tx to everything --- internal-packages/run-engine/README.md | 2 ++ .../run-engine/src/engine/index.ts | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 049ad4e42d..a8d535b297 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -221,6 +221,8 @@ It's very important that a run can only be acted on by one process at a time. We Sending messages to the worker is challenging because we many servers and we're going to have many workers. We need to make sure that the message is sent to the correct worker. +We could add timeouts using the heartbeat system + ## #continueRun When all waitpoints are finished, we need to continue a run. Sometimes they're still running in the cluster. diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index ca5bfe210f..5fb9a15713 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -764,12 +764,14 @@ export class RunEngine { runId, snapshotId, intervalSeconds, + tx, }: { runId: string; snapshotId: string; intervalSeconds: number; + tx?: PrismaClientOrTransaction; }) { - const latestSnapshot = await this.#getLatestExecutionSnapshot(runId); + const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); if (latestSnapshot?.id !== snapshotId) { this.logger.log( "RunEngine.#extendHeartbeatTimeout() no longer the latest snapshot, stopping the heartbeat.", @@ -788,8 +790,16 @@ export class RunEngine { await this.#startHeartbeating({ runId, snapshotId, intervalSeconds }); } - async #handleStalledSnapshot({ runId, snapshotId }: { runId: string; snapshotId: string }) { - const latestSnapshot = await this.#getLatestExecutionSnapshot(runId); + async #handleStalledSnapshot({ + runId, + snapshotId, + tx, + }: { + runId: string; + snapshotId: string; + tx?: PrismaClientOrTransaction; + }) { + const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); if (!latestSnapshot) { this.logger.error("RunEngine.#handleStalledSnapshot() no latest snapshot found", { runId, @@ -848,8 +858,6 @@ export class RunEngine { assertNever(latestSnapshot.executionStatus); } } - - //todo we need to return the run to the queue in the correct state. } } From b67da9e196a1734ebbf6e4e4287fd9f1ef6b7256 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 11 Oct 2024 17:04:49 +0100 Subject: [PATCH 019/485] Fix for the TaskRunExecutionSnapshot accidentally being on v2 Tasks --- internal-packages/database/prisma/schema.prisma | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 84182aaf65..2848baa512 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1113,8 +1113,6 @@ model TaskAttempt { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - executionSnapshot TaskRunExecutionSnapshot[] - @@unique([taskId, number]) } @@ -1727,7 +1725,7 @@ model TaskRun { batchItems BatchTaskRunItem[] dependency TaskRunDependency? CheckpointRestoreEvent CheckpointRestoreEvent[] - executionSnapshot TaskRunExecutionSnapshot[] + executionSnapshots TaskRunExecutionSnapshot[] alerts ProjectAlert[] @@ -1886,8 +1884,8 @@ model TaskRunExecutionSnapshot { /// Attempt currentAttemptId String? - currentAttempt TaskAttempt? @relation(fields: [currentAttemptId], references: [id]) - currentAttemptStatus TaskAttemptStatus? + currentAttempt TaskRunAttempt? @relation(fields: [currentAttemptId], references: [id]) + currentAttemptStatus TaskRunAttemptStatus? /// todo Checkpoint @@ -2098,7 +2096,8 @@ model TaskRunAttempt { batchTaskRunItems BatchTaskRunItem[] CheckpointRestoreEvent CheckpointRestoreEvent[] alerts ProjectAlert[] - childRuns TaskRun[] @relation("TaskParentRunAttempt") + childRuns TaskRun[] @relation("TaskParentRunAttempt") + executionSnapshots TaskRunExecutionSnapshot[] @@unique([taskRunId, number]) @@index([taskRunId]) From 4c20d438a7b9d3f4bdf8ed2192b1e6ede2f1dcaa Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 11 Oct 2024 17:12:41 +0100 Subject: [PATCH 020/485] Added tracing to the run engine and create task run attempt --- .../run-engine/src/engine/consts.ts | 1 + .../run-engine/src/engine/index.test.ts | 18 +- .../run-engine/src/engine/index.ts | 769 +++++++++++++----- .../run-engine/src/engine/machinePresets.ts | 63 ++ packages/core/src/v3/schemas/common.ts | 1 + 5 files changed, 650 insertions(+), 202 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/consts.ts create mode 100644 internal-packages/run-engine/src/engine/machinePresets.ts diff --git a/internal-packages/run-engine/src/engine/consts.ts b/internal-packages/run-engine/src/engine/consts.ts new file mode 100644 index 0000000000..6ea6f54c38 --- /dev/null +++ b/internal-packages/run-engine/src/engine/consts.ts @@ -0,0 +1 @@ +export const MAX_TASK_RUN_ATTEMPTS = 250; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index c9dc6b3d62..796163f6e6 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -2,6 +2,7 @@ import { expect } from "vitest"; import { containerTest } from "@internal/testcontainers"; import { RunEngine } from "./index.js"; import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { trace } from "@opentelemetry/api"; describe("RunEngine", () => { containerTest( @@ -23,6 +24,18 @@ describe("RunEngine", () => { tasksPerWorker: 10, pollIntervalMs: 100, }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + }, + tracer: trace.getTracer("test", "0.0.0"), }); const run = await engine.trigger( @@ -88,7 +101,10 @@ describe("RunEngine", () => { expect(envConcurrencyBefore).toBe(0); //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue("test_12345", run.masterQueue); + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + }); expect(dequeued?.runId).toBe(run.id); expect(dequeued?.executionStatus).toBe("DEQUEUED_FOR_EXECUTION"); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 5fb9a15713..5dba293d01 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,7 +1,14 @@ import { Worker, type WorkerConcurrencyOptions } from "@internal/redis-worker"; -import { trace } from "@opentelemetry/api"; +import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; -import { QueueOptions, TaskRunInternalError } from "@trigger.dev/core/v3"; +import { + MachinePreset, + MachinePresetName, + parsePacket, + QueueOptions, + TaskRunExecution, + TaskRunInternalError, +} from "@trigger.dev/core/v3"; import { generateFriendlyId, parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; import { $transaction, @@ -9,6 +16,7 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, + TaskRunAttemptStatus, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -21,12 +29,8 @@ import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; - -class NotImplementedError extends Error { - constructor(message: string) { - super(message); - } -} +import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; +import { machinePresetFromConfig } from "./machinePresets"; type Options = { redis: RedisOptions; @@ -34,6 +38,11 @@ type Options = { worker: WorkerConcurrencyOptions & { pollIntervalMs?: number; }; + machines: { + defaultMachine: MachinePresetName; + machines: Record; + }; + tracer: Tracer; }; type TriggerParams = { @@ -103,6 +112,7 @@ export class RunEngine { runQueue: RunQueue; private worker: EngineWorker; private logger = new Logger("RunEngine", "debug"); + private tracer: Tracer; constructor(private readonly options: Options) { this.prisma = options.prisma; @@ -142,10 +152,12 @@ export class RunEngine { await this.#handleStalledSnapshot(payload); }, expireRun: async ({ payload }) => { - await this.expire(payload.runId); + await this.expire({ runId: payload.runId }); }, }, }); + + this.tracer = options.tracer; } //MARK: - Run functions @@ -191,169 +203,186 @@ export class RunEngine { ) { const prisma = tx ?? this.prisma; - const status = delayUntil ? "DELAYED" : "PENDING"; - - //create run - const taskRun = await prisma.taskRun.create({ - data: { - status, - number, + return this.#trace( + "createRunAttempt", + { friendlyId, - runtimeEnvironmentId: environment.id, + environmentId: environment.id, projectId: environment.project.id, - idempotencyKey, taskIdentifier, - payload, - payloadType, - context, - traceContext, - traceId, - spanId, - parentSpanId, - lockedToVersionId, - concurrencyKey, - queue: queueName, - masterQueue, - isTest, - delayUntil, - queuedAt, - maxAttempts, - ttl, - tags: - tags.length === 0 - ? undefined - : { - connect: tags.map((id) => ({ id })), - }, - parentTaskRunId, - parentTaskRunAttemptId, - rootTaskRunId, - batchId, - resumeParentOnCompletion, - depth, - metadata, - metadataType, - seedMetadata, - seedMetadataType, - executionSnapshot: { - create: { - engine: "V2", - executionStatus: "RUN_CREATED", - description: "Run was created", - runStatus: status, - }, - }, }, - }); - - await this.redlock.using([taskRun.id], 5000, async (signal) => { - //create associated waitpoint (this completes when the run completes) - const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { - projectId: environment.project.id, - completedByTaskRunId: taskRun.id, - }); - - //triggerAndWait or batchTriggerAndWait - if (resumeParentOnCompletion && parentTaskRunId) { - //this will block the parent run from continuing until this waitpoint is completed (and removed) - await this.#blockRunWithWaitpoint(prisma, { - orgId: environment.organization.id, - runId: parentTaskRunId, - waitpoint: associatedWaitpoint, - }); - } - - //Make sure lock extension succeeded - if (signal.aborted) { - throw signal.error; - } - - if (queue) { - const concurrencyLimit = - typeof queue.concurrencyLimit === "number" - ? Math.max(0, queue.concurrencyLimit) - : undefined; + async (span) => { + const status = delayUntil ? "DELAYED" : "PENDING"; - let taskQueue = await prisma.taskQueue.findFirst({ - where: { + //create run + const taskRun = await prisma.taskRun.create({ + data: { + status, + number, + friendlyId, runtimeEnvironmentId: environment.id, - name: queueName, + projectId: environment.project.id, + idempotencyKey, + taskIdentifier, + payload, + payloadType, + context, + traceContext, + traceId, + spanId, + parentSpanId, + lockedToVersionId, + concurrencyKey, + queue: queueName, + masterQueue, + isTest, + delayUntil, + queuedAt, + maxAttempts, + ttl, + tags: + tags.length === 0 + ? undefined + : { + connect: tags.map((id) => ({ id })), + }, + parentTaskRunId, + parentTaskRunAttemptId, + rootTaskRunId, + batchId, + resumeParentOnCompletion, + depth, + metadata, + metadataType, + seedMetadata, + seedMetadataType, + executionSnapshot: { + create: { + engine: "V2", + executionStatus: "RUN_CREATED", + description: "Run was created", + runStatus: status, + }, + }, }, }); - if (taskQueue) { - taskQueue = await prisma.taskQueue.update({ - where: { - id: taskQueue.id, - }, - data: { - concurrencyLimit, - rateLimit: queue.rateLimit, - }, - }); - } else { - taskQueue = await prisma.taskQueue.create({ - data: { - friendlyId: generateFriendlyId("queue"), - name: queueName, - concurrencyLimit, - runtimeEnvironmentId: environment.id, - projectId: environment.project.id, - rateLimit: queue.rateLimit, - type: "NAMED", - }, - }); - } + span.setAttribute("runId", taskRun.id); - if (typeof taskQueue.concurrencyLimit === "number") { - await this.runQueue.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); - } else { - await this.runQueue.removeQueueConcurrencyLimits(environment, taskQueue.name); - } - } - - if (taskRun.delayUntil) { - const delayWaitpoint = await this.#createDateTimeWaitpoint(prisma, { - projectId: environment.project.id, - completedAfter: taskRun.delayUntil, - }); + await this.redlock.using([taskRun.id], 5000, async (signal) => { + //create associated waitpoint (this completes when the run completes) + const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { + projectId: environment.project.id, + completedByTaskRunId: taskRun.id, + }); - await prisma.taskRunWaitpoint.create({ - data: { - taskRunId: taskRun.id, - waitpointId: delayWaitpoint.id, - projectId: delayWaitpoint.projectId, - }, + //triggerAndWait or batchTriggerAndWait + if (resumeParentOnCompletion && parentTaskRunId) { + //this will block the parent run from continuing until this waitpoint is completed (and removed) + await this.#blockRunWithWaitpoint(prisma, { + orgId: environment.organization.id, + runId: parentTaskRunId, + waitpoint: associatedWaitpoint, + }); + } + + //Make sure lock extension succeeded + if (signal.aborted) { + throw signal.error; + } + + if (queue) { + const concurrencyLimit = + typeof queue.concurrencyLimit === "number" + ? Math.max(0, queue.concurrencyLimit) + : undefined; + + let taskQueue = await prisma.taskQueue.findFirst({ + where: { + runtimeEnvironmentId: environment.id, + name: queueName, + }, + }); + + if (taskQueue) { + taskQueue = await prisma.taskQueue.update({ + where: { + id: taskQueue.id, + }, + data: { + concurrencyLimit, + rateLimit: queue.rateLimit, + }, + }); + } else { + taskQueue = await prisma.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + rateLimit: queue.rateLimit, + type: "NAMED", + }, + }); + } + + if (typeof taskQueue.concurrencyLimit === "number") { + await this.runQueue.updateQueueConcurrencyLimits( + environment, + taskQueue.name, + taskQueue.concurrencyLimit + ); + } else { + await this.runQueue.removeQueueConcurrencyLimits(environment, taskQueue.name); + } + } + + if (taskRun.delayUntil) { + const delayWaitpoint = await this.#createDateTimeWaitpoint(prisma, { + projectId: environment.project.id, + completedAfter: taskRun.delayUntil, + }); + + await prisma.taskRunWaitpoint.create({ + data: { + taskRunId: taskRun.id, + waitpointId: delayWaitpoint.id, + projectId: delayWaitpoint.projectId, + }, + }); + } + + if (!taskRun.delayUntil && taskRun.ttl) { + const expireAt = parseNaturalLanguageDuration(taskRun.ttl); + + if (expireAt) { + await this.worker.enqueue({ + id: `expireRun:${taskRun.id}`, + job: "expireRun", + payload: { runId: taskRun.id }, + }); + } + } + + //Make sure lock extension succeeded + if (signal.aborted) { + throw signal.error; + } + + //enqueue the run if it's not delayed + if (!taskRun.delayUntil) { + await this.#enqueueRun(taskRun, environment, prisma); + } }); - } - if (!taskRun.delayUntil && taskRun.ttl) { - const expireAt = parseNaturalLanguageDuration(taskRun.ttl); + //todo release parent concurrency (for the project, task, and environment, but not for the queue?) + //todo if this has been triggered with triggerAndWait or batchTriggerAndWait - if (expireAt) { - await this.worker.enqueue({ job: "expireRun", payload: { runId: taskRun.id } }); - } + return taskRun; } - - //Make sure lock extension succeeded - if (signal.aborted) { - throw signal.error; - } - - //enqueue the run if it's not delayed - if (!taskRun.delayUntil) { - await this.#enqueueRun(taskRun, environment, prisma); - } - }); - - //todo release parent concurrency (for the project, task, and environment, but not for the queue?) - //todo if this has been triggered with triggerAndWait or batchTriggerAndWait - - return taskRun; + ); } /** Triggers multiple runs. @@ -377,41 +406,53 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) { const prisma = tx ?? this.prisma; - const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); - if (!message) { - return null; - } - - const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, message.messageId); - if (!snapshot) { - throw new Error( - `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` - ); + return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { + const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); + if (!message) { + return null; } - if (!["QUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { - //todo put run in a system failure state - throw new Error( - `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` - ); - } + span.setAttribute("runId", message.messageId); - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: { - id: message.messageId, - status: snapshot.runStatus, - }, - snapshot: { - executionStatus: "DEQUEUED_FOR_EXECUTION", - description: "Run was dequeued for execution", - }, + const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, message.messageId); + if (!snapshot) { + throw new Error( + `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` + ); + } + + if (!["QUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { + await this.#systemFailure({ + runId: message.messageId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_DEQUEUED_INVALID_STATE", + message: `Task was in the ${snapshot.executionStatus} state when it was dequeued for execution.`, + }, + tx: prisma, + }); + throw new Error( + `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` + ); + } + + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: { + id: message.messageId, + status: snapshot.runStatus, + }, + snapshot: { + executionStatus: "DEQUEUED_FOR_EXECUTION", + description: "Run was dequeued for execution", + }, + }); + + return newSnapshot; }); return newSnapshot; }); - - return newSnapshot; } async createRunAttempt({ @@ -425,29 +466,282 @@ export class RunEngine { }) { const prisma = tx ?? this.prisma; - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!latestSnapshot) { - return this.systemFailure({ - runId, - error: { - type: "INTERNAL_ERROR", - code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", - message: "Task had no execution snapshot when trying to create a run attempt", - }, - tx: prisma, - }); - } + return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { + return this.redlock.using([runId], 5000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!latestSnapshot) { + await this.#systemFailure({ + runId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", + message: "Task had no execution snapshot when trying to create a run attempt", + }, + tx: prisma, + }); + throw new ServiceValidationError("No snapshot", 404); + } + + if (latestSnapshot.id !== snapshotId) { + //if there is a big delay between the snapshot and the attempt, the snapshot might have changed + //we just want to log because elsewhere it should have been put back into a state where it can be attempted + this.logger.warn( + "RunEngine.createRunAttempt(): snapshot has changed since the attempt was created, ignoring." + ); + throw new ServiceValidationError("Snapshot changed", 409); + } + + const environment = await this.#getAuthenticatedEnvironmentFromRun(runId, prisma); + if (!environment) { + throw new ServiceValidationError("Environment not found", 404); + } - //todo check if the snapshot is the latest one + const taskRun = await prisma.taskRun.findFirst({ + where: { + id: runId, + }, + include: { + tags: true, + attempts: { + take: 1, + orderBy: { + number: "desc", + }, + }, + lockedBy: { + include: { + worker: { + select: { + id: true, + version: true, + sdkVersion: true, + cliVersion: true, + supportsLazyAttempts: true, + }, + }, + }, + }, + batchItems: { + include: { + batchTaskRun: true, + }, + }, + }, + }); + + this.logger.debug("Creating a task run attempt", { taskRun }); + + if (!taskRun) { + throw new ServiceValidationError("Task run not found", 404); + } + + span.setAttribute("projectId", taskRun.projectId); + span.setAttribute("environmentId", taskRun.runtimeEnvironmentId); + span.setAttribute("taskRunId", taskRun.id); + span.setAttribute("taskRunFriendlyId", taskRun.friendlyId); + + if (taskRun.status === "CANCELED") { + throw new ServiceValidationError("Task run is cancelled", 400); + } + + if (!taskRun.lockedBy) { + throw new ServiceValidationError("Task run is not locked", 400); + } + + const queue = await prisma.taskQueue.findUnique({ + where: { + runtimeEnvironmentId_name: { + runtimeEnvironmentId: environment.id, + name: taskRun.queue, + }, + }, + }); + + if (!queue) { + throw new ServiceValidationError("Queue not found", 404); + } + + const nextAttemptNumber = taskRun.attempts[0] ? taskRun.attempts[0].number + 1 : 1; + + if (nextAttemptNumber > MAX_TASK_RUN_ATTEMPTS) { + await this.#crash({ + runId: taskRun.id, + error: { + type: "INTERNAL_ERROR", + code: "TASK_RUN_CRASHED", + message: taskRun.lockedBy.worker.supportsLazyAttempts + ? "Max attempts reached." + : "Max attempts reached. Please upgrade your CLI and SDK.", + }, + }); + + throw new ServiceValidationError("Max attempts reached", 400); + } + + const result = await $transaction( + prisma, + async (tx) => { + const attempt = await tx.taskRunAttempt.create({ + data: { + number: nextAttemptNumber, + friendlyId: generateFriendlyId("attempt"), + taskRunId: taskRun.id, + startedAt: new Date(), + backgroundWorkerId: taskRun.lockedBy!.worker.id, + backgroundWorkerTaskId: taskRun.lockedBy!.id, + status: "EXECUTING", + queueId: queue.id, + runtimeEnvironmentId: environment.id, + }, + }); + + const run = await tx.taskRun.update({ + where: { + id: taskRun.id, + }, + data: { + status: "EXECUTING", + }, + include: { + tags: true, + lockedBy: { + include: { worker: true }, + }, + }, + }); + + const newSnapshot = await this.#createExecutionSnapshot(tx, { + run, + attempt, + snapshot: { + executionStatus: "EXECUTING", + description: "Attempt created, starting execution", + }, + }); + + if (taskRun.ttl) { + //don't expire the run, it's going to execute + await this.worker.ack(`expireRun:${taskRun.id}`); + } + + return { run, attempt, snapshot: newSnapshot }; + }, + (error) => { + this.logger.error("RunEngine.createRunAttempt(): prisma.$transaction error", { + code: error.code, + meta: error.meta, + stack: error.stack, + message: error.message, + name: error.name, + }); + } + ); + + if (!result) { + this.logger.error("RunEngine.createRunAttempt(): failed to create task run attempt", { + runId: taskRun.id, + nextAttemptNumber, + }); + throw new ServiceValidationError("Failed to create task run attempt", 500); + } + + const { run, attempt, snapshot } = result; + + const machinePreset = machinePresetFromConfig({ + machines: this.options.machines.machines, + defaultMachine: this.options.machines.defaultMachine, + config: taskRun.lockedBy.machineConfig ?? {}, + }); + + const metadata = await parsePacket({ + data: taskRun.metadata ?? undefined, + dataType: taskRun.metadataType, + }); + + const execution: TaskRunExecution = { + task: { + id: run.lockedBy!.slug, + filePath: run.lockedBy!.filePath, + exportName: run.lockedBy!.exportName, + }, + attempt: { + id: attempt.friendlyId, + number: attempt.number, + startedAt: attempt.startedAt ?? attempt.createdAt, + backgroundWorkerId: run.lockedBy!.worker.id, + backgroundWorkerTaskId: run.lockedBy!.id, + status: "EXECUTING" as const, + }, + run: { + id: run.friendlyId, + payload: run.payload, + payloadType: run.payloadType, + context: run.context, + createdAt: run.createdAt, + tags: run.tags.map((tag) => tag.name), + isTest: run.isTest, + idempotencyKey: run.idempotencyKey ?? undefined, + startedAt: run.startedAt ?? run.createdAt, + durationMs: run.usageDurationMs, + costInCents: run.costInCents, + baseCostInCents: run.baseCostInCents, + maxAttempts: run.maxAttempts ?? undefined, + version: run.lockedBy!.worker.version, + metadata, + maxDuration: run.maxDurationInSeconds ?? undefined, + }, + queue: { + id: queue.friendlyId, + name: queue.name, + }, + environment: { + id: environment.id, + slug: environment.slug, + type: environment.type, + }, + organization: { + id: environment.organization.id, + slug: environment.organization.slug, + name: environment.organization.title, + }, + project: { + id: environment.project.id, + ref: environment.project.externalRef, + slug: environment.project.slug, + name: environment.project.name, + }, + batch: + taskRun.batchItems[0] && taskRun.batchItems[0].batchTaskRun + ? { id: taskRun.batchItems[0].batchTaskRun.friendlyId } + : undefined, + }; + + return { + run, + attempt, + snapshot, + }; + }); + }); } async waitForDuration() {} async complete(runId: string, completion: any) {} - async expire(runId: string) {} + async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} - async systemFailure({ + async #systemFailure({ + runId, + error, + tx, + }: { + runId: string; + error: TaskRunInternalError; + tx?: PrismaClientOrTransaction; + }) {} + + async #crash({ runId, error, tx, @@ -688,9 +982,11 @@ export class RunEngine { prisma: PrismaClientOrTransaction, { run, + attempt, snapshot, }: { run: { id: string; status: TaskRunStatus }; + attempt?: { id: string; status: TaskRunAttemptStatus }; snapshot: { executionStatus: TaskRunExecutionStatus; description: string; @@ -699,11 +995,13 @@ export class RunEngine { ) { const newSnapshot = await prisma.taskRunExecutionSnapshot.create({ data: { - runId: run.id, engine: "V2", executionStatus: snapshot.executionStatus, description: snapshot.description, + runId: run.id, runStatus: run.status, + currentAttemptId: attempt?.id, + currentAttemptStatus: attempt?.status, }, }); @@ -859,6 +1157,75 @@ export class RunEngine { } } } + + async #getAuthenticatedEnvironmentFromRun(runId: string, tx?: PrismaClientOrTransaction) { + const prisma = tx ?? this.prisma; + const taskRun = await prisma.taskRun.findUnique({ + where: { + id: runId, + }, + include: { + runtimeEnvironment: { + include: { + organization: true, + project: true, + }, + }, + }, + }); + + if (!taskRun) { + return; + } + + return taskRun?.runtimeEnvironment; + } + + async #trace( + trace: string, + attributes: Attributes | undefined, + fn: (span: Span) => Promise + ): Promise { + return this.tracer.startActiveSpan( + `${this.constructor.name}.${trace}`, + { attributes, kind: SpanKind.SERVER }, + async (span) => { + try { + return await fn(span); + } catch (e) { + if (e instanceof ServiceValidationError) { + throw e; + } + + if (e instanceof Error) { + span.recordException(e); + } else { + span.recordException(new Error(String(e))); + } + + throw e; + } finally { + span.end(); + } + } + ); + } +} + +export class ServiceValidationError extends Error { + constructor( + message: string, + public status?: number + ) { + super(message); + this.name = "ServiceValidationError"; + } +} + +class NotImplementedError extends Error { + constructor(message: string) { + super(message); + } } /* diff --git a/internal-packages/run-engine/src/engine/machinePresets.ts b/internal-packages/run-engine/src/engine/machinePresets.ts new file mode 100644 index 0000000000..7e794fdcf1 --- /dev/null +++ b/internal-packages/run-engine/src/engine/machinePresets.ts @@ -0,0 +1,63 @@ +import { MachineConfig, MachinePreset, MachinePresetName } from "@trigger.dev/core/v3"; +import { Logger } from "@trigger.dev/core/logger"; + +const logger = new Logger("machinePresetFromConfig"); + +export function machinePresetFromConfig({ + defaultMachine, + machines, + config, +}: { + defaultMachine: MachinePresetName; + machines: Record; + config: unknown; +}): MachinePreset { + const parsedConfig = MachineConfig.safeParse(config); + + if (!parsedConfig.success) { + logger.error("Failed to parse machine config", { config }); + + return machinePresetFromName(machines, "small-1x"); + } + + if (parsedConfig.data.preset) { + return machinePresetFromName(machines, parsedConfig.data.preset); + } + + if (parsedConfig.data.cpu && parsedConfig.data.memory) { + const name = derivePresetNameFromValues( + machines, + parsedConfig.data.cpu, + parsedConfig.data.memory + ); + if (!name) { + return machinePresetFromName(machines, defaultMachine); + } + + return machinePresetFromName(machines, name); + } + + return machinePresetFromName(machines, "small-1x"); +} + +export function machinePresetFromName( + machines: Record, + name: MachinePresetName +): MachinePreset { + return { + ...machines[name], + }; +} + +// Finds the smallest machine preset name that satisfies the given CPU and memory requirements +function derivePresetNameFromValues( + machines: Record, + cpu: number, + memory: number +): MachinePresetName | undefined { + for (const [name, preset] of Object.entries(machines)) { + if (preset.cpu >= cpu && preset.memory >= memory) { + return name as MachinePresetName; + } + } +} diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 221d99dbd5..17d5e12ab5 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -125,6 +125,7 @@ export const TaskRunInternalError = z.object({ "POD_EVICTED", "POD_UNKNOWN_ERROR", "TASK_HAS_N0_EXECUTION_SNAPSHOT", + "TASK_DEQUEUED_INVALID_STATE", ]), message: z.string().optional(), stackTrace: z.string().optional(), From 7960aa5472960b0c6319f6743038bf8c701c12b3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 14 Oct 2024 15:56:55 +0100 Subject: [PATCH 021/485] Added creatingAttempt test (currently failing) --- .../run-engine/src/engine/index.test.ts | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 796163f6e6..25c4ba4054 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -38,6 +38,7 @@ describe("RunEngine", () => { tracer: trace.getTracer("test", "0.0.0"), }); + //trigger the run const run = await engine.trigger( { number: 1, @@ -57,7 +58,6 @@ describe("RunEngine", () => { }, prisma ); - expect(run).toBeDefined(); expect(run.friendlyId).toBe("run_1234"); @@ -112,6 +112,16 @@ describe("RunEngine", () => { authenticatedEnvironment ); expect(envConcurrencyAfter).toBe(1); + + //create an attempt + const attemptResult = await engine.createRunAttempt({ + runId: dequeued!.runId, + snapshotId: dequeued!.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.attempt.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } ); From e8a57232c2230635eb8f90485f1b19ad3b48fe9b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 14 Oct 2024 15:57:24 +0100 Subject: [PATCH 022/485] =?UTF-8?q?Added=20the=20messages=20in=20the=20Run?= =?UTF-8?q?Engine=20(temporary=20as=20they=E2=80=99ll=20need=20to=20move?= =?UTF-8?q?=20to=20core=20so=20the=20new=20Workers=20can=20use=20them=20to?= =?UTF-8?q?o)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../run-engine/src/engine/messages.ts | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 internal-packages/run-engine/src/engine/messages.ts diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts new file mode 100644 index 0000000000..b0bbeacf13 --- /dev/null +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -0,0 +1,60 @@ +import { MachinePreset, EnvironmentType } from "@trigger.dev/core/v3"; +import { z } from "zod"; + +//turn this into a zod schema discriminated union, like the messages we have in core. +//it will need to move into core + +const StartRunMessage = z.object({ + action: z.literal("START_RUN"), + // The payload allows us to a discriminated union with the version + payload: z.object({ + version: z.literal("1"), + execution: z.object({ + id: z.string(), + status: z.literal("DEQUEUED_FOR_EXECUTION"), + }), + image: z.string().optional(), + checkpoint: z + .object({ + id: z.string(), + type: z.string(), + location: z.string(), + reason: z.string().optional(), + }) + .optional(), + backgroundWorker: z.object({ + id: z.string(), + version: z.string(), + }), + run: z.object({ + id: z.string(), + friendlyId: z.string(), + isTest: z.boolean(), + machine: MachinePreset, + attemptNumber: z.number(), + masterQueue: z.string(), + }), + environment: z.object({ + id: z.string(), + type: EnvironmentType, + }), + organization: z.object({ + id: z.string(), + }), + project: z.object({ + id: z.string(), + }), + traceContext: z.record(z.unknown()), + }), +}); +export type StartRunMessage = z.infer; + +export const ContinueRunMessage = z.object({ + action: z.literal("CONTINUE_RUN"), + payload: z.object({ + version: z.literal("1"), + }), +}); +export type ContinueRunMessage = z.infer; + +export const Messages = z.discriminatedUnion("action", [StartRunMessage]); From 7c149a9f5a27ee1c13339aa48d2f6056a3d6bdc4 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 11:55:22 +0100 Subject: [PATCH 023/485] Work on getting the best worker, task, deployment from a run (that works for dev and deploy) --- .../run-engine/src/engine/consts.ts | 1 + .../run-engine/src/engine/db/worker.ts | 204 ++++++++++++++++++ .../run-engine/src/engine/index.ts | 33 ++- 3 files changed, 234 insertions(+), 4 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/db/worker.ts diff --git a/internal-packages/run-engine/src/engine/consts.ts b/internal-packages/run-engine/src/engine/consts.ts index 6ea6f54c38..ee183d1c5c 100644 --- a/internal-packages/run-engine/src/engine/consts.ts +++ b/internal-packages/run-engine/src/engine/consts.ts @@ -1 +1,2 @@ export const MAX_TASK_RUN_ATTEMPTS = 250; +export const CURRENT_DEPLOYMENT_LABEL = "current"; diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts new file mode 100644 index 0000000000..bde2f16142 --- /dev/null +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -0,0 +1,204 @@ +import { + BackgroundWorker, + BackgroundWorkerTask, + Prisma, + PrismaClientOrTransaction, + WorkerDeployment, +} from "@trigger.dev/database"; +import { CURRENT_DEPLOYMENT_LABEL } from "../consts"; + +type RunWithMininimalEnvironment = Prisma.TaskRunGetPayload<{ + include: { + runtimeEnvironment: { + select: { + id: true; + type: true; + }; + }; + }; +}>; + +type RunWithBackgroundWorkerTasksResult = + | { + success: false; + code: "NO_RUN" | "NO_WORKER" | "TASK_NOT_IN_LATEST" | "TASK_NEVER_REGISTERED"; + message: string; + } + | { + success: true; + run: RunWithMininimalEnvironment; + worker: BackgroundWorker; + task: BackgroundWorkerTask; + deployment: WorkerDeployment | null; + }; + +export async function getRunWithBackgroundWorkerTasks( + prisma: PrismaClientOrTransaction, + runId: string +): Promise { + const run = await prisma.taskRun.findFirst({ + where: { + id: runId, + }, + include: { + runtimeEnvironment: { + select: { + id: true, + type: true, + }, + }, + lockedToVersion: { + include: { + deployment: true, + tasks: true, + }, + }, + }, + }); + + if (!run) { + return { + success: false as const, + code: "NO_RUN", + message: `No run found with id: ${runId}`, + }; + } + + //get the relevant BackgroundWorker with tasks and deployment (if not DEV) + const workerWithTasks = run.lockedToVersionId + ? await getWorkerDeploymentFromWorker(prisma, run.lockedToVersionId) + : run.runtimeEnvironment.type === "DEVELOPMENT" + ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) + : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); + + if (!workerWithTasks) { + return { + success: false as const, + code: "NO_WORKER", + message: `No worker found for run: ${run.id}`, + }; + } + + const backgroundTask = workerWithTasks.tasks.find((task) => task.slug === run.taskIdentifier); + + if (!backgroundTask) { + const nonCurrentTask = await prisma.backgroundWorkerTask.findFirst({ + where: { + slug: run.taskIdentifier, + projectId: run.projectId, + runtimeEnvironmentId: run.runtimeEnvironmentId, + }, + include: { + worker: true, + }, + }); + + if (nonCurrentTask) { + return { + success: false as const, + code: "TASK_NOT_IN_LATEST", + message: `Task not found in latest version: ${run.taskIdentifier}. Found in ${nonCurrentTask.worker.version}`, + }; + } else { + return { + success: false as const, + code: "TASK_NEVER_REGISTERED", + message: `Task has never been registered (in dev or deployed): ${run.taskIdentifier}`, + }; + } + } + + return { + success: true as const, + run, + worker: workerWithTasks.worker, + task: backgroundTask, + deployment: workerWithTasks.deployment, + }; +} + +type WorkerDeploymentWithWorkerTasks = { + worker: BackgroundWorker; + tasks: BackgroundWorkerTask[]; + deployment: WorkerDeployment | null; +}; + +export async function getWorkerDeploymentFromWorker( + prisma: PrismaClientOrTransaction, + workerId: string +): Promise { + const worker = await prisma.backgroundWorker.findUnique({ + where: { + id: workerId, + }, + include: { + deployment: true, + tasks: true, + }, + }); + + if (!worker) { + return null; + } + + return { worker, tasks: worker.tasks, deployment: worker.deployment }; +} + +export async function getMostRecentWorker( + prisma: PrismaClientOrTransaction, + environmentId: string +): Promise { + const worker = await prisma.backgroundWorker.findFirst({ + where: { + runtimeEnvironmentId: environmentId, + }, + include: { + deployment: true, + tasks: true, + }, + orderBy: { + id: "desc", + }, + }); + + if (!worker) { + return null; + } + + return { worker, tasks: worker.tasks, deployment: worker.deployment }; +} + +export async function getWorkerFromCurrentlyPromotedDeployment( + prisma: PrismaClientOrTransaction, + environmentId: string +): Promise { + const promotion = await prisma.workerDeploymentPromotion.findUnique({ + where: { + environmentId_label: { + environmentId, + label: CURRENT_DEPLOYMENT_LABEL, + }, + }, + include: { + deployment: { + include: { + worker: { + include: { + tasks: true, + }, + }, + }, + }, + }, + }); + + if (!promotion || !promotion.deployment.worker) { + return null; + } + + return { + worker: promotion.deployment.worker, + tasks: promotion.deployment.worker.tasks, + deployment: promotion.deployment, + }; +} diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 5dba293d01..dc1c1fb464 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -8,18 +8,22 @@ import { QueueOptions, TaskRunExecution, TaskRunInternalError, + EnvironmentType, } from "@trigger.dev/core/v3"; import { generateFriendlyId, parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; import { $transaction, + BackgroundWorkerTask, Prisma, PrismaClient, PrismaClientOrTransaction, + PrismaTransactionClient, TaskRun, TaskRunAttemptStatus, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, + WorkerDeployment, } from "@trigger.dev/database"; import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; @@ -29,8 +33,17 @@ import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; -import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; +import { CURRENT_DEPLOYMENT_LABEL, MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { machinePresetFromConfig } from "./machinePresets"; +import { ContinueRunMessage, StartRunMessage } from "./messages"; +import { + getMostRecentWorker, + getWorkerDeploymentFromWorker, + getWorkerFromCurrentlyPromotedDeployment, +} from "./queries"; +import { getRunWithBackgroundWorkerTasks } from "./db/worker"; + +const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED", "BLOCKED_BY_WAITPOINTS"]; type Options = { redis: RedisOptions; @@ -256,7 +269,7 @@ export class RunEngine { metadataType, seedMetadata, seedMetadataType, - executionSnapshot: { + executionSnapshots: { create: { engine: "V2", executionStatus: "RUN_CREATED", @@ -404,9 +417,10 @@ export class RunEngine { consumerId: string; masterQueue: string; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { + //gets a fair run from this shared queue const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); if (!message) { return null; @@ -414,6 +428,7 @@ export class RunEngine { span.setAttribute("runId", message.messageId); + //lock the run so nothing else can modify it const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, message.messageId); if (!snapshot) { @@ -422,7 +437,8 @@ export class RunEngine { ); } - if (!["QUEUED", "BLOCKED_BY_WAITPOINTS"].includes(snapshot.executionStatus)) { + if (!dequeuableExecutionStatuses.includes(snapshot.executionStatus)) { + //todo is there a way to recover this, so the run can be retried? await this.#systemFailure({ runId: message.messageId, error: { @@ -437,6 +453,14 @@ export class RunEngine { ); } + const result = await getRunWithBackgroundWorkerTasks(prisma, message.messageId); + + //todo create an internal function that is called to prepare a run for execution + // - get the worker and task (including deployment) + // - get the queue + // - deal with waiting for deploy + // - update the run, get attempts + const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { id: message.messageId, @@ -714,6 +738,7 @@ export class RunEngine { taskRun.batchItems[0] && taskRun.batchItems[0].batchTaskRun ? { id: taskRun.batchItems[0].batchTaskRun.friendlyId } : undefined, + machine: machinePreset, }; return { From edb71eeada96d9483f34ef4b8fe75963396ae87d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 14:23:54 +0100 Subject: [PATCH 024/485] Move maxDuration and sanitizeQueue to @trigger.dev/core/v3/apps --- .../app/presenters/v3/SpanPresenter.server.ts | 2 +- .../app/v3/marqs/devQueueConsumer.server.ts | 2 +- apps/webapp/app/v3/marqs/index.server.ts | 10 +++------ .../v3/marqs/sharedQueueConsumer.server.ts | 2 +- packages/core/src/v3/apps/index.ts | 2 ++ packages/core/src/v3/apps/maxDuration.ts | 22 +++++++++++++++++++ packages/core/src/v3/apps/queueName.ts | 4 ++++ 7 files changed, 34 insertions(+), 10 deletions(-) create mode 100644 packages/core/src/v3/apps/maxDuration.ts create mode 100644 packages/core/src/v3/apps/queueName.ts diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index 6af0f09de1..4af491a692 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -9,7 +9,7 @@ import { eventRepository } from "~/v3/eventRepository.server"; import { machinePresetFromName } from "~/v3/machinePresets.server"; import { FINAL_ATTEMPT_STATUSES, isFinalRunStatus } from "~/v3/taskStatus"; import { BasePresenter } from "./basePresenter.server"; -import { getMaxDuration } from "~/v3/utils/maxDuration"; +import { getMaxDuration } from "@trigger.dev/core/v3/apps"; type Result = Awaited>; export type Span = NonNullable["span"]>; diff --git a/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts b/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts index 0dc162c638..f146f0ddfb 100644 --- a/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts +++ b/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts @@ -24,7 +24,7 @@ import { tracer, } from "../tracer.server"; import { DevSubscriber, devPubSub } from "./devPubSub.server"; -import { getMaxDuration } from "../utils/maxDuration"; +import { getMaxDuration } from "@trigger.dev/core/v3/apps"; const MessageBody = z.discriminatedUnion("type", [ z.object({ diff --git a/apps/webapp/app/v3/marqs/index.server.ts b/apps/webapp/app/v3/marqs/index.server.ts index 6839f7761b..e2ef37ea30 100644 --- a/apps/webapp/app/v3/marqs/index.server.ts +++ b/apps/webapp/app/v3/marqs/index.server.ts @@ -33,6 +33,7 @@ import { } from "./types"; import { V3VisibilityTimeout } from "./v3VisibilityTimeout.server"; import { concurrencyTracker } from "../services/taskRunConcurrencyTracker.server"; +export { sanitizeQueueName } from "@trigger.dev/core/v3/apps"; const KEY_PREFIX = "marqs:"; @@ -1660,7 +1661,7 @@ local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) o local concurrencyLimit = redis.call('GET', concurrencyLimitKey) -- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, currentOrgConcurrency, orgConcurrencyLimit } +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, currentOrgConcurrency, orgConcurrencyLimit } `, }); @@ -1689,7 +1690,7 @@ local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) o local concurrencyLimit = redis.call('GET', concurrencyLimitKey) -- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, currentOrgConcurrency, orgConcurrencyLimit } +return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, currentOrgConcurrency, orgConcurrencyLimit } `, }); @@ -1908,8 +1909,3 @@ function getMarQSClient() { } } } - -// Only allow alphanumeric characters, underscores, hyphens, and slashes (and only the first 128 characters) -export function sanitizeQueueName(queueName: string) { - return queueName.replace(/[^a-zA-Z0-9_\-\/]/g, "").substring(0, 128); -} diff --git a/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts b/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts index a663011e4a..b580419cd0 100644 --- a/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts +++ b/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts @@ -44,7 +44,7 @@ import { EnvironmentVariable } from "../environmentVariables/repository"; import { machinePresetFromConfig } from "../machinePresets.server"; import { env } from "~/env.server"; import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; -import { getMaxDuration } from "../utils/maxDuration"; +import { getMaxDuration } from "@trigger.dev/core/v3/apps"; const WithTraceContext = z.object({ traceparent: z.string().optional(), diff --git a/packages/core/src/v3/apps/index.ts b/packages/core/src/v3/apps/index.ts index 97266f729f..a2a1f47020 100644 --- a/packages/core/src/v3/apps/index.ts +++ b/packages/core/src/v3/apps/index.ts @@ -7,3 +7,5 @@ export * from "./provider.js"; export * from "./isExecaChildProcess.js"; export * from "./friendlyId.js"; export * from "./duration.js"; +export * from "./maxDuration.js"; +export * from "./queueName.js"; diff --git a/packages/core/src/v3/apps/maxDuration.ts b/packages/core/src/v3/apps/maxDuration.ts new file mode 100644 index 0000000000..b19d2786fd --- /dev/null +++ b/packages/core/src/v3/apps/maxDuration.ts @@ -0,0 +1,22 @@ +const MINIMUM_MAX_DURATION = 5; +const MAXIMUM_MAX_DURATION = 2_147_483_647; // largest 32-bit signed integer + +export function clampMaxDuration(maxDuration: number): number { + return Math.min(Math.max(maxDuration, MINIMUM_MAX_DURATION), MAXIMUM_MAX_DURATION); +} + +export function getMaxDuration( + maxDuration?: number | null, + defaultMaxDuration?: number | null +): number | undefined { + if (!maxDuration) { + return defaultMaxDuration ?? undefined; + } + + // Setting the maxDuration to MAXIMUM_MAX_DURATION means we don't want to use the default maxDuration + if (maxDuration === MAXIMUM_MAX_DURATION) { + return; + } + + return maxDuration; +} diff --git a/packages/core/src/v3/apps/queueName.ts b/packages/core/src/v3/apps/queueName.ts new file mode 100644 index 0000000000..1416148978 --- /dev/null +++ b/packages/core/src/v3/apps/queueName.ts @@ -0,0 +1,4 @@ +// Only allow alphanumeric characters, underscores, hyphens, and slashes (and only the first 128 characters) +export function sanitizeQueueName(queueName: string) { + return queueName.replace(/[^a-zA-Z0-9_\-\/]/g, "").substring(0, 128); +} From fec5931a91f6f57463fe62eb300dc5c60a0e3913 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 14:27:56 +0100 Subject: [PATCH 025/485] RunEngine dequeueFromMasterQueue now prepares the run (WIP) --- .../run-engine/src/engine/db/worker.ts | 12 +- .../run-engine/src/engine/index.ts | 188 +++++++++++++++--- .../run-engine/src/run-queue/index.ts | 5 - 3 files changed, 173 insertions(+), 32 deletions(-) diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts index bde2f16142..f721f244d7 100644 --- a/internal-packages/run-engine/src/engine/db/worker.ts +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -3,6 +3,7 @@ import { BackgroundWorkerTask, Prisma, PrismaClientOrTransaction, + RuntimeEnvironmentType, WorkerDeployment, } from "@trigger.dev/database"; import { CURRENT_DEPLOYMENT_LABEL } from "../consts"; @@ -21,9 +22,15 @@ type RunWithMininimalEnvironment = Prisma.TaskRunGetPayload<{ type RunWithBackgroundWorkerTasksResult = | { success: false; - code: "NO_RUN" | "NO_WORKER" | "TASK_NOT_IN_LATEST" | "TASK_NEVER_REGISTERED"; + code: "NO_RUN"; message: string; } + | { + success: false; + code: "NO_WORKER" | "TASK_NOT_IN_LATEST" | "TASK_NEVER_REGISTERED"; + message: string; + run: RunWithMininimalEnvironment; + } | { success: true; run: RunWithMininimalEnvironment; @@ -76,6 +83,7 @@ export async function getRunWithBackgroundWorkerTasks( success: false as const, code: "NO_WORKER", message: `No worker found for run: ${run.id}`, + run, }; } @@ -98,12 +106,14 @@ export async function getRunWithBackgroundWorkerTasks( success: false as const, code: "TASK_NOT_IN_LATEST", message: `Task not found in latest version: ${run.taskIdentifier}. Found in ${nonCurrentTask.worker.version}`, + run, }; } else { return { success: false as const, code: "TASK_NEVER_REGISTERED", message: `Task has never been registered (in dev or deployed): ${run.taskIdentifier}`, + run, }; } } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index dc1c1fb464..23a700d06d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -8,22 +8,23 @@ import { QueueOptions, TaskRunExecution, TaskRunInternalError, - EnvironmentType, } from "@trigger.dev/core/v3"; -import { generateFriendlyId, parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; +import { + generateFriendlyId, + parseNaturalLanguageDuration, + getMaxDuration, + sanitizeQueueName, +} from "@trigger.dev/core/v3/apps"; import { $transaction, - BackgroundWorkerTask, Prisma, PrismaClient, PrismaClientOrTransaction, - PrismaTransactionClient, TaskRun, TaskRunAttemptStatus, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, - WorkerDeployment, } from "@trigger.dev/database"; import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; @@ -33,15 +34,10 @@ import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; -import { CURRENT_DEPLOYMENT_LABEL, MAX_TASK_RUN_ATTEMPTS } from "./consts"; +import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; +import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { machinePresetFromConfig } from "./machinePresets"; import { ContinueRunMessage, StartRunMessage } from "./messages"; -import { - getMostRecentWorker, - getWorkerDeploymentFromWorker, - getWorkerFromCurrentlyPromotedDeployment, -} from "./queries"; -import { getRunWithBackgroundWorkerTasks } from "./db/worker"; const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED", "BLOCKED_BY_WAITPOINTS"]; @@ -54,6 +50,7 @@ type Options = { machines: { defaultMachine: MachinePresetName; machines: Record; + baseCostInCents: number; }; tracer: Tracer; }; @@ -426,21 +423,24 @@ export class RunEngine { return null; } - span.setAttribute("runId", message.messageId); + const orgId = message.message.orgId; + const runId = message.messageId; + + span.setAttribute("runId", runId); //lock the run so nothing else can modify it - const newSnapshot = await this.redlock.using([message.messageId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, message.messageId); + const newSnapshot = await this.redlock.using([runId], 5000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (!snapshot) { throw new Error( - `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${message.messageId}` + `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${runId}` ); } if (!dequeuableExecutionStatuses.includes(snapshot.executionStatus)) { //todo is there a way to recover this, so the run can be retried? await this.#systemFailure({ - runId: message.messageId, + runId, error: { type: "INTERNAL_ERROR", code: "TASK_DEQUEUED_INVALID_STATE", @@ -448,22 +448,156 @@ export class RunEngine { }, tx: prisma, }); - throw new Error( - `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${message.messageId}\n ${snapshot.id}:${snapshot.executionStatus}` + this.logger.error( + `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${runId}\n ${snapshot.id}:${snapshot.executionStatus}` ); + return null; + } + + const result = await getRunWithBackgroundWorkerTasks(prisma, runId); + + if (!result.success) { + switch (result.code) { + case "NO_RUN": { + //this should not happen, the run is unrecoverable so we'll ack it + this.logger.error("RunEngine.dequeueFromMasterQueue(): No run found", { + runId, + latestSnapshot: snapshot.id, + }); + await this.runQueue.acknowledgeMessage(orgId, runId); + return null; + } + case "NO_WORKER": + case "TASK_NEVER_REGISTERED": + case "TASK_NOT_IN_LATEST": { + this.logger.warn(`RunEngine.dequeueFromMasterQueue(): ${result.code}`, { + runId, + latestSnapshot: snapshot.id, + result, + }); + + if (result.run.runtimeEnvironment.type === "DEVELOPMENT") { + //requeue for 10s in the future, so we can try again + //todo when do we stop doing this, the run.ttl should deal with this. + await this.runQueue.nackMessage( + orgId, + runId, + new Date(Date.now() + 10_000).getTime() + ); + } else { + //not deployed yet, so we'll wait for the deploy + await this.#waitingForDeploy({ + runId, + tx: prisma, + }); + //we ack because when it's deployed it will be requeued + await this.runQueue.acknowledgeMessage(orgId, runId); + } + + return null; + } + } } - const result = await getRunWithBackgroundWorkerTasks(prisma, message.messageId); + //check for a valid deployment if it's not a development environment + if (result.run.runtimeEnvironment.type !== "DEVELOPMENT") { + if (!result.deployment || !result.deployment.imageReference) { + this.logger.warn("RunEngine.dequeueFromMasterQueue(): No deployment found", { + runId, + latestSnapshot: snapshot.id, + result, + }); + //not deployed yet, so we'll wait for the deploy + await this.#waitingForDeploy({ + runId, + tx: prisma, + }); + //we ack because when it's deployed it will be requeued + await this.runQueue.acknowledgeMessage(orgId, runId); + return null; + } + } - //todo create an internal function that is called to prepare a run for execution - // - get the worker and task (including deployment) - // - get the queue - // - deal with waiting for deploy - // - update the run, get attempts + const machinePreset = machinePresetFromConfig({ + machines: this.options.machines.machines, + defaultMachine: this.options.machines.defaultMachine, + config: result.task.machineConfig ?? {}, + }); + + //update the run + const lockedTaskRun = await prisma.taskRun.update({ + where: { + id: runId, + }, + data: { + lockedAt: new Date(), + lockedById: result.task.id, + lockedToVersionId: result.worker.id, + startedAt: result.run.startedAt ?? new Date(), + baseCostInCents: this.options.machines.baseCostInCents, + machinePreset: machinePreset.name, + maxDurationInSeconds: getMaxDuration( + result.run.maxDurationInSeconds, + result.task.maxDurationInSeconds + ), + }, + include: { + runtimeEnvironment: true, + attempts: { + take: 1, + orderBy: { number: "desc" }, + }, + tags: true, + }, + }); + + if (!lockedTaskRun) { + this.logger.error("RunEngine.dequeueFromMasterQueue(): Failed to lock task run", { + taskRun: result.run.id, + taskIdentifier: result.run.taskIdentifier, + deployment: result.deployment?.id, + worker: result.worker.id, + task: result.task.id, + runId, + }); + + await this.runQueue.acknowledgeMessage(orgId, runId); + return null; + } + + const queue = await prisma.taskQueue.findUnique({ + where: { + runtimeEnvironmentId_name: { + runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, + name: sanitizeQueueName(lockedTaskRun.queue), + }, + }, + }); + + if (!queue) { + this.logger.debug( + "RunEngine.dequeueFromMasterQueue(): queue not found, so nacking message", + { + queueMessage: message, + taskRunQueue: lockedTaskRun.queue, + runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, + } + ); + + //try again in 1 second + await this.runQueue.nackMessage(orgId, runId, new Date(Date.now() + 1000).getTime()); + return null; + } + + //checkpoints? + + const nextAttemptNumber = lockedTaskRun.attempts[0] + ? lockedTaskRun.attempts[0].number + 1 + : 1; const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { - id: message.messageId, + id: runId, status: snapshot.runStatus, }, snapshot: { @@ -776,6 +910,8 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) {} + async #waitingForDeploy({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + //MARK: RunQueue /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 293218ab17..83241d16ab 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -1347,8 +1347,3 @@ declare module "ioredis" { ): Result; } } - -// Only allow alphanumeric characters, underscores, hyphens, and slashes (and only the first 128 characters) -export function sanitizeQueueName(queueName: string) { - return queueName.replace(/[^a-zA-Z0-9_\-\/]/g, "").substring(0, 128); -} From 45b2e067ee429add2b2031168025b73b66fad161 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 15:04:15 +0100 Subject: [PATCH 026/485] Some improvements, and got the function to compile --- .../run-engine/src/engine/index.ts | 18 +++++++----------- .../run-engine/src/engine/statuses.ts | 6 ++++++ 2 files changed, 13 insertions(+), 11 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/statuses.ts diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 23a700d06d..65db8197d2 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -38,8 +38,7 @@ import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { machinePresetFromConfig } from "./machinePresets"; import { ContinueRunMessage, StartRunMessage } from "./messages"; - -const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED", "BLOCKED_BY_WAITPOINTS"]; +import { isDequeueableExecutionStatus } from "./statuses"; type Options = { redis: RedisOptions; @@ -429,7 +428,7 @@ export class RunEngine { span.setAttribute("runId", runId); //lock the run so nothing else can modify it - const newSnapshot = await this.redlock.using([runId], 5000, async (signal) => { + return this.redlock.using([runId], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (!snapshot) { throw new Error( @@ -437,7 +436,7 @@ export class RunEngine { ); } - if (!dequeuableExecutionStatuses.includes(snapshot.executionStatus)) { + if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { //todo is there a way to recover this, so the run can be retried? await this.#systemFailure({ runId, @@ -589,11 +588,10 @@ export class RunEngine { return null; } - //checkpoints? + const currentAttemptNumber = lockedTaskRun.attempts.at(0)?.number ?? 0; + const nextAttemptNumber = currentAttemptNumber + 1; - const nextAttemptNumber = lockedTaskRun.attempts[0] - ? lockedTaskRun.attempts[0].number + 1 - : 1; + //todo deal with checkpoints const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { @@ -606,10 +604,8 @@ export class RunEngine { }, }); - return newSnapshot; + return null; }); - - return newSnapshot; }); } diff --git a/internal-packages/run-engine/src/engine/statuses.ts b/internal-packages/run-engine/src/engine/statuses.ts new file mode 100644 index 0000000000..27ebc35c83 --- /dev/null +++ b/internal-packages/run-engine/src/engine/statuses.ts @@ -0,0 +1,6 @@ +import { TaskRunExecutionStatus } from "@trigger.dev/database"; + +export function isDequeueableExecutionStatus(status: TaskRunExecutionStatus): boolean { + const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED", "BLOCKED_BY_WAITPOINTS"]; + return dequeuableExecutionStatuses.includes(status); +} From 7d4e40ae3f3bfc69b8fd1dbe612c6eb0cf96c5d6 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 15:54:17 +0100 Subject: [PATCH 027/485] Updated test and logic --- .../run-engine/src/engine/db/worker.ts | 1 - .../run-engine/src/engine/index.test.ts | 104 ++++++++++++++++-- .../run-engine/src/engine/index.ts | 43 +++++++- .../run-engine/src/engine/messages.ts | 3 +- 4 files changed, 135 insertions(+), 16 deletions(-) diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts index f721f244d7..f3bdb40e59 100644 --- a/internal-packages/run-engine/src/engine/db/worker.ts +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -3,7 +3,6 @@ import { BackgroundWorkerTask, Prisma, PrismaClientOrTransaction, - RuntimeEnvironmentType, WorkerDeployment, } from "@trigger.dev/database"; import { CURRENT_DEPLOYMENT_LABEL } from "../consts"; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 25c4ba4054..4b3db7398f 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -3,12 +3,16 @@ import { containerTest } from "@internal/testcontainers"; import { RunEngine } from "./index.js"; import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; import { trace } from "@opentelemetry/api"; +import { AuthenticatedEnvironment } from "../shared/index.js"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; describe("RunEngine", () => { containerTest( "Trigger a simple run", { timeout: 15_000 }, async ({ postgresContainer, prisma, redisContainer }) => { + //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); const engine = new RunEngine({ @@ -34,17 +38,27 @@ describe("RunEngine", () => { centsPerMs: 0.0001, }, }, + baseCostInCents: 0.0001, }, tracer: trace.getTracer("test", "0.0.0"), }); + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + //trigger the run const run = await engine.trigger( { number: 1, friendlyId: "run_1234", environment: authenticatedEnvironment, - taskIdentifier: "test-task", + taskIdentifier, payload: "{}", payloadType: "application/json", context: {}, @@ -105,8 +119,11 @@ describe("RunEngine", () => { consumerId: "test_12345", masterQueue: run.masterQueue, }); - expect(dequeued?.runId).toBe(run.id); - expect(dequeued?.executionStatus).toBe("DEQUEUED_FOR_EXECUTION"); + expect(dequeued?.action).toBe("START_RUN"); + + if (dequeued?.action !== "START_RUN") { + throw new Error("Expected action to be START_RUN"); + } const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment @@ -114,14 +131,14 @@ describe("RunEngine", () => { expect(envConcurrencyAfter).toBe(1); //create an attempt - const attemptResult = await engine.createRunAttempt({ - runId: dequeued!.runId, - snapshotId: dequeued!.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.attempt.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + // const attemptResult = await engine.createRunAttempt({ + // runId: dequeued!.payload.run.id, + // snapshotId: dequeued!.id, + // }); + // expect(attemptResult.run.id).toBe(run.id); + // expect(attemptResult.run.status).toBe("EXECUTING"); + // expect(attemptResult.attempt.status).toBe("EXECUTING"); + // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } ); @@ -184,3 +201,68 @@ async function setupAuthenticatedEnvironment(prisma: PrismaClient, type: Runtime }, }); } + +async function setupBackgroundWorker( + prisma: PrismaClient, + environment: AuthenticatedEnvironment, + taskIdentifier: string +) { + const worker = await prisma.backgroundWorker.create({ + data: { + friendlyId: generateFriendlyId("worker"), + contentHash: "hash", + projectId: environment.project.id, + runtimeEnvironmentId: environment.id, + version: "20241015.1", + metadata: {}, + }, + }); + + const task = await prisma.backgroundWorkerTask.create({ + data: { + friendlyId: generateFriendlyId("task"), + slug: taskIdentifier, + filePath: `/trigger/myTask.ts`, + exportName: "myTask", + workerId: worker.id, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + }, + }); + + if (environment.type !== "DEVELOPMENT") { + const deployment = await prisma.workerDeployment.create({ + data: { + friendlyId: generateFriendlyId("deployment"), + contentHash: worker.contentHash, + version: worker.version, + shortCode: "short_code", + imageReference: `trigger/${environment.project.externalRef}:${worker.version}.${environment.slug}`, + status: "DEPLOYED", + projectId: environment.project.id, + environmentId: environment.id, + workerId: worker.id, + }, + }); + + const promotion = await prisma.workerDeploymentPromotion.create({ + data: { + label: CURRENT_DEPLOYMENT_LABEL, + deploymentId: deployment.id, + environmentId: environment.id, + }, + }); + + return { + worker, + task, + deployment, + promotion, + }; + } + + return { + worker, + task, + }; +} diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 65db8197d2..7248fa3c03 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -591,8 +591,10 @@ export class RunEngine { const currentAttemptNumber = lockedTaskRun.attempts.at(0)?.number ?? 0; const nextAttemptNumber = currentAttemptNumber + 1; - //todo deal with checkpoints + //todo figure out if it's a continuation or a new run + const isNewRun = true; + if (isNewRun) { const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { id: runId, @@ -604,7 +606,44 @@ export class RunEngine { }, }); - return null; + return { + action: "START_RUN", + payload: { + version: "1", + execution: { + id: newSnapshot.id, + status: "DEQUEUED_FOR_EXECUTION", + }, + image: result.deployment?.imageReference ?? undefined, + checkpoint: undefined, + backgroundWorker: { + id: result.worker.id, + version: result.worker.version, + }, + run: { + id: lockedTaskRun.id, + friendlyId: lockedTaskRun.friendlyId, + isTest: lockedTaskRun.isTest, + machine: machinePreset, + attemptNumber: nextAttemptNumber, + masterQueue: lockedTaskRun.masterQueue, + }, + environment: { + id: lockedTaskRun.runtimeEnvironment.id, + type: lockedTaskRun.runtimeEnvironment.type, + }, + organization: { + id: orgId, + }, + project: { + id: lockedTaskRun.projectId, + }, + traceContext: {}, + }, + }; + } else { + throw new NotImplementedError("Continuations are not implemented yet"); + } }); }); } diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index b0bbeacf13..b5298870af 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -1,8 +1,7 @@ import { MachinePreset, EnvironmentType } from "@trigger.dev/core/v3"; import { z } from "zod"; -//turn this into a zod schema discriminated union, like the messages we have in core. -//it will need to move into core +//todo it will need to move into core because the Worker will need to use these const StartRunMessage = z.object({ action: z.literal("START_RUN"), From 22cb4f884c06f8e43d2feb1da5ac9d7e1d22bfb3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 16:48:44 +0100 Subject: [PATCH 028/485] Added the ability to shutdown the run engine --- .../run-engine/src/engine/index.test.ts | 195 +++++++++--------- .../run-engine/src/engine/index.ts | 26 ++- 2 files changed, 117 insertions(+), 104 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 4b3db7398f..de88db9134 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -43,102 +43,109 @@ describe("RunEngine", () => { tracer: trace.getTracer("test", "0.0.0"), }); - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); - - //check it's actually in the db - const runFromDb = await prisma.taskRun.findUnique({ - where: { - friendlyId: "run_1234", - }, - }); - expect(runFromDb).toBeDefined(); - expect(runFromDb?.id).toBe(run.id); - - const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: run.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - expect(snapshot).toBeDefined(); - expect(snapshot?.executionStatus).toBe("QUEUED"); - - //check the waitpoint is created - const runWaitpoint = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpoint.length).toBe(1); - expect(runWaitpoint[0].type).toBe("RUN"); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); - expect(queueLength).toBe(1); - - //concurrency before - const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyBefore).toBe(0); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - }); - expect(dequeued?.action).toBe("START_RUN"); + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", + }, + }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); - if (dequeued?.action !== "START_RUN") { - throw new Error("Expected action to be START_RUN"); + const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: run.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + expect(snapshot).toBeDefined(); + expect(snapshot?.executionStatus).toBe("QUEUED"); + + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue( + authenticatedEnvironment, + run.queue + ); + expect(queueLength).toBe(1); + + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + }); + expect(dequeued?.action).toBe("START_RUN"); + + if (dequeued?.action !== "START_RUN") { + throw new Error("Expected action to be START_RUN"); + } + + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); + + //create an attempt + // const attemptResult = await engine.createRunAttempt({ + // runId: dequeued!.payload.run.id, + // snapshotId: dequeued!.id, + // }); + // expect(attemptResult.run.id).toBe(run.id); + // expect(attemptResult.run.status).toBe("EXECUTING"); + // expect(attemptResult.attempt.status).toBe("EXECUTING"); + // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); } - - const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyAfter).toBe(1); - - //create an attempt - // const attemptResult = await engine.createRunAttempt({ - // runId: dequeued!.payload.run.id, - // snapshotId: dequeued!.id, - // }); - // expect(attemptResult.run.id).toBe(run.id); - // expect(attemptResult.run.status).toBe("EXECUTING"); - // expect(attemptResult.attempt.status).toBe("EXECUTING"); - // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } ); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 7248fa3c03..cdce6bc9b8 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -595,16 +595,16 @@ export class RunEngine { const isNewRun = true; if (isNewRun) { - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: { - id: runId, - status: snapshot.runStatus, - }, - snapshot: { - executionStatus: "DEQUEUED_FOR_EXECUTION", - description: "Run was dequeued for execution", - }, - }); + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: { + id: runId, + status: snapshot.runStatus, + }, + snapshot: { + executionStatus: "DEQUEUED_FOR_EXECUTION", + description: "Run was dequeued for execution", + }, + }); return { action: "START_RUN", @@ -925,6 +925,12 @@ export class RunEngine { async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + async quit() { + //stop the run queue + this.runQueue.quit(); + this.worker.stop(); + } + async #systemFailure({ runId, error, From ad90273f1c5ed82a6b8534d5b49295c9b8a6983a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 17:18:11 +0100 Subject: [PATCH 029/485] Added debugger support for testing the run-engine --- .vscode/launch.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.vscode/launch.json b/.vscode/launch.json index d70f6bdd98..6a1f922d4a 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -99,6 +99,14 @@ "command": "pnpm exec trigger dev", "cwd": "${workspaceFolder}/references/hello-world", "sourceMaps": true + }, + { + "type": "node-terminal", + "request": "launch", + "name": "Debug RunEngine tests", + "command": "pnpm run test --filter @internal/run-engine", + "cwd": "${workspaceFolder}", + "sourceMaps": true } ] } From 7276f4c551a0831f18933ca51879caa9338990e1 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 17:18:21 +0100 Subject: [PATCH 030/485] Add the task queue before the tests run --- .../run-engine/src/engine/index.test.ts | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index de88db9134..7b2130ecc9 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -4,7 +4,7 @@ import { RunEngine } from "./index.js"; import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; import { trace } from "@opentelemetry/api"; import { AuthenticatedEnvironment } from "../shared/index.js"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { generateFriendlyId, sanitizeQueueName } from "@trigger.dev/core/v3/apps"; import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; describe("RunEngine", () => { @@ -237,6 +237,18 @@ async function setupBackgroundWorker( }, }); + const queueName = sanitizeQueueName(`task/${taskIdentifier}`); + const taskQueue = await prisma.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit: 10, + runtimeEnvironmentId: worker.runtimeEnvironmentId, + projectId: worker.projectId, + type: "VIRTUAL", + }, + }); + if (environment.type !== "DEVELOPMENT") { const deployment = await prisma.workerDeployment.create({ data: { From ccfb4f48e777ebdd916f097532d372fd9120042b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 17:22:21 +0100 Subject: [PATCH 031/485] Move the traceContext inside the run object --- internal-packages/run-engine/src/engine/index.test.ts | 4 ++++ internal-packages/run-engine/src/engine/index.ts | 2 +- internal-packages/run-engine/src/engine/messages.ts | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 7b2130ecc9..cebbe98d39 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -129,6 +129,10 @@ describe("RunEngine", () => { throw new Error("Expected action to be START_RUN"); } + expect(dequeued.payload.run.id).toBe(run.id); + expect(dequeued.payload.run.attemptNumber).toBe(1); + expect(dequeued.payload.execution.status).toBe("DEQUEUED_FOR_EXECUTION"); + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment ); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index cdce6bc9b8..8636c926f8 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -627,6 +627,7 @@ export class RunEngine { machine: machinePreset, attemptNumber: nextAttemptNumber, masterQueue: lockedTaskRun.masterQueue, + traceContext: lockedTaskRun.traceContext, }, environment: { id: lockedTaskRun.runtimeEnvironment.id, @@ -638,7 +639,6 @@ export class RunEngine { project: { id: lockedTaskRun.projectId, }, - traceContext: {}, }, }; } else { diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index b5298870af..98e654c66f 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -32,6 +32,7 @@ const StartRunMessage = z.object({ machine: MachinePreset, attemptNumber: z.number(), masterQueue: z.string(), + traceContext: z.record(z.unknown()), }), environment: z.object({ id: z.string(), @@ -43,7 +44,6 @@ const StartRunMessage = z.object({ project: z.object({ id: z.string(), }), - traceContext: z.record(z.unknown()), }), }); export type StartRunMessage = z.infer; From 15bde9fcac7a508e2dc26fdb2aa77ef11cf0ec7b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 19:07:59 +0100 Subject: [PATCH 032/485] Attempts are being created, test working --- .../run-engine/src/engine/index.test.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index cebbe98d39..0fdda72189 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -139,14 +139,14 @@ describe("RunEngine", () => { expect(envConcurrencyAfter).toBe(1); //create an attempt - // const attemptResult = await engine.createRunAttempt({ - // runId: dequeued!.payload.run.id, - // snapshotId: dequeued!.id, - // }); - // expect(attemptResult.run.id).toBe(run.id); - // expect(attemptResult.run.status).toBe("EXECUTING"); - // expect(attemptResult.attempt.status).toBe("EXECUTING"); - // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + const attemptResult = await engine.createRunAttempt({ + runId: dequeued.payload.run.id, + snapshotId: dequeued.payload.execution.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.attempt.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } finally { engine.quit(); } From 83eb9aec955ae2227dec870807120f92877fcc6e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 15 Oct 2024 19:11:32 +0100 Subject: [PATCH 033/485] Renamed message to ScheduleRunMessage --- .../run-engine/src/engine/index.ts | 19 +++++++++++++++---- .../run-engine/src/engine/messages.ts | 16 ++++------------ 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8636c926f8..cd8ae7ca9a 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -37,7 +37,7 @@ import { MinimalAuthenticatedEnvironment } from "../shared"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { machinePresetFromConfig } from "./machinePresets"; -import { ContinueRunMessage, StartRunMessage } from "./messages"; +import { ScheduleRunMessage } from "./messages"; import { isDequeueableExecutionStatus } from "./statuses"; type Options = { @@ -413,7 +413,7 @@ export class RunEngine { consumerId: string; masterQueue: string; tx?: PrismaClientOrTransaction; - }): Promise { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { //gets a fair run from this shared queue @@ -607,7 +607,7 @@ export class RunEngine { }); return { - action: "START_RUN", + action: "SCHEDULE_RUN", payload: { version: "1", execution: { @@ -627,7 +627,7 @@ export class RunEngine { machine: machinePreset, attemptNumber: nextAttemptNumber, masterQueue: lockedTaskRun.masterQueue, - traceContext: lockedTaskRun.traceContext, + traceContext: lockedTaskRun.traceContext as Record, }, environment: { id: lockedTaskRun.runtimeEnvironment.id, @@ -919,6 +919,17 @@ export class RunEngine { }); } + /** This is called to get the */ + async resumeRun({ + runId, + snapshotId, + tx, + }: { + runId: string; + snapshotId: string; + tx?: PrismaClientOrTransaction; + }) {} + async waitForDuration() {} async complete(runId: string, completion: any) {} diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 98e654c66f..31274f97da 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -3,8 +3,8 @@ import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these -const StartRunMessage = z.object({ - action: z.literal("START_RUN"), +const ScheduleRunMessage = z.object({ + action: z.literal("SCHEDULE_RUN"), // The payload allows us to a discriminated union with the version payload: z.object({ version: z.literal("1"), @@ -46,14 +46,6 @@ const StartRunMessage = z.object({ }), }), }); -export type StartRunMessage = z.infer; +export type ScheduleRunMessage = z.infer; -export const ContinueRunMessage = z.object({ - action: z.literal("CONTINUE_RUN"), - payload: z.object({ - version: z.literal("1"), - }), -}); -export type ContinueRunMessage = z.infer; - -export const Messages = z.discriminatedUnion("action", [StartRunMessage]); +export const Messages = z.discriminatedUnion("action", [ScheduleRunMessage]); From 9416808f1e89375272d9a3ca69fd869fd0313f5e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 16 Oct 2024 18:08:01 +0100 Subject: [PATCH 034/485] Added a note --- internal-packages/run-engine/src/engine/messages.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 31274f97da..0e110d9e32 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -3,6 +3,7 @@ import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these +/** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ const ScheduleRunMessage = z.object({ action: z.literal("SCHEDULE_RUN"), // The payload allows us to a discriminated union with the version From afef9219c3a28d0122b6dc61e74f707a43867139 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 16 Oct 2024 18:08:09 +0100 Subject: [PATCH 035/485] Added a note about fixing this --- packages/core/src/v3/schemas/common.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 17d5e12ab5..eb57873d3d 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -99,6 +99,7 @@ export const TaskRunErrorCodes = { POD_EVICTED: "POD_EVICTED", POD_UNKNOWN_ERROR: "POD_UNKNOWN_ERROR", } as const; +//todo add the new errors above 👆, or hopefully not when Nick merges his change that de-duplicates this. export const TaskRunInternalError = z.object({ type: z.literal("INTERNAL_ERROR"), From 076dd93fc3fc333cdcff060f66559ba8b7aed664 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 16 Oct 2024 18:58:28 +0100 Subject: [PATCH 036/485] =?UTF-8?q?Prisma=20schema=20=E2=80=93=20added=20n?= =?UTF-8?q?ew=20checkpoints,=20changed=20waitpoints,=20removed=20attempts?= =?UTF-8?q?=20from=20snapshots=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../database/prisma/schema.prisma | 68 ++++++++++++++----- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 2848baa512..45dc8c5a68 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -416,6 +416,7 @@ model RuntimeEnvironment { currentSession RuntimeEnvironmentSession? @relation("currentSession", fields: [currentSessionId], references: [id], onDelete: SetNull, onUpdate: Cascade) currentSessionId String? taskRunNumberCounter TaskRunNumberCounter[] + taskRunCheckpoints TaskRunCheckpoint[] @@unique([projectId, slug, orgMemberId]) @@unique([projectId, shortcode]) @@ -472,6 +473,7 @@ model Project { BackgroundWorkerFile BackgroundWorkerFile[] waitpoints Waitpoint[] taskRunWaitpoints TaskRunWaitpoint[] + taskRunCheckpoints TaskRunCheckpoint[] } enum ProjectVersion { @@ -1685,6 +1687,9 @@ model TaskRun { /// The main queue that this run is part of masterQueue String @default("main") + /// From engine v2+ this will be defined after a run has been dequeued (starting at 1) + attemptNumber Int? + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -1882,21 +1887,20 @@ model TaskRunExecutionSnapshot { run TaskRun @relation(fields: [runId], references: [id]) runStatus TaskRunStatus - /// Attempt - currentAttemptId String? - currentAttempt TaskRunAttempt? @relation(fields: [currentAttemptId], references: [id]) - currentAttemptStatus TaskRunAttemptStatus? + attemptNumber Int? - /// todo Checkpoint + /// Waitpoints that are blocking the run + blockedByWaitpoints Waitpoint[] @relation("blockedByWaitpoints") - /// These are only ever appended, so we don't need updatedAt - createdAt DateTime @default(now()) + /// Waitpoints that have been completed for this execution + completedWaitpoints Waitpoint[] @relation("completedWaitpoints") - ///todo machine spec? + /// Checkpoint + checkpointId String? + checkpoint TaskRunCheckpoint? @relation(fields: [checkpointId], references: [id]) - ///todo add worker, we'll use it to call back out to the worker - workerId String? - worker Worker? @relation(fields: [workerId], references: [id]) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt /// Used to get the latest state quickly @@index([runId, createdAt(sort: Desc)]) @@ -1907,10 +1911,39 @@ enum TaskRunExecutionStatus { QUEUED DEQUEUED_FOR_EXECUTION EXECUTING - BLOCKED_BY_WAITPOINTS + EXECUTING_WITH_WAITPOINTS + QUEUED_WITH_WAITPOINTS FINISHED } +model TaskRunCheckpoint { + id String @id @default(cuid()) + + friendlyId String @unique + + type TaskRunCheckpointType + location String + imageRef String + reason String? + metadata String? + + project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) + projectId String + + runtimeEnvironment RuntimeEnvironment @relation(fields: [runtimeEnvironmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) + runtimeEnvironmentId String + + executionSnapshot TaskRunExecutionSnapshot[] + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt +} + +enum TaskRunCheckpointType { + DOCKER + KUBERNETES +} + /// A Waitpoint blocks a run from continuing until it's completed /// If there's a waitpoint blocking a run, it shouldn't be in the queue model Waitpoint { @@ -1939,6 +1972,10 @@ model Waitpoint { /// The runs this waitpoint is blocking blockingTaskRuns TaskRunWaitpoint[] + blockingExecutionSnapshots TaskRunExecutionSnapshot[] @relation("blockedByWaitpoints") + /// When a waitpoint is complete + completedExecutionSnapshots TaskRunExecutionSnapshot[] @relation("completedWaitpoints") + /// When completed, an output can be stored here output String? outputType String @default("application/json") @@ -1984,8 +2021,7 @@ model TaskRunWaitpoint { } model Worker { - id String @id @default(cuid()) - executionSnapshots TaskRunExecutionSnapshot[] + id String @id @default(cuid()) } model WorkerGroup { @@ -2054,6 +2090,7 @@ model TaskRunNumberCounter { @@unique([taskIdentifier, environmentId]) } +/// This is not used from engine v2+, attempts use the TaskRunExecutionSnapshot and TaskRun model TaskRunAttempt { id String @id @default(cuid()) number Int @default(0) @@ -2096,8 +2133,7 @@ model TaskRunAttempt { batchTaskRunItems BatchTaskRunItem[] CheckpointRestoreEvent CheckpointRestoreEvent[] alerts ProjectAlert[] - childRuns TaskRun[] @relation("TaskParentRunAttempt") - executionSnapshots TaskRunExecutionSnapshot[] + childRuns TaskRun[] @relation("TaskParentRunAttempt") @@unique([taskRunId, number]) @@index([taskRunId]) From 66f5341ffcebac7f5ec17fc79fe889a9144c3751 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 16 Oct 2024 18:58:44 +0100 Subject: [PATCH 037/485] Removed TaskRunAttempt from the run engine --- .../run-engine/src/engine/index.test.ts | 7 +- .../run-engine/src/engine/index.ts | 105 ++++++++---------- .../run-engine/src/engine/statuses.ts | 13 ++- 3 files changed, 64 insertions(+), 61 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 0fdda72189..e727826136 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -123,9 +123,9 @@ describe("RunEngine", () => { consumerId: "test_12345", masterQueue: run.masterQueue, }); - expect(dequeued?.action).toBe("START_RUN"); + expect(dequeued?.action).toBe("SCHEDULE_RUN"); - if (dequeued?.action !== "START_RUN") { + if (dequeued?.action !== "SCHEDULE_RUN") { throw new Error("Expected action to be START_RUN"); } @@ -139,13 +139,12 @@ describe("RunEngine", () => { expect(envConcurrencyAfter).toBe(1); //create an attempt - const attemptResult = await engine.createRunAttempt({ + const attemptResult = await engine.startRunAttempt({ runId: dequeued.payload.run.id, snapshotId: dequeued.payload.execution.id, }); expect(attemptResult.run.id).toBe(run.id); expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.attempt.status).toBe("EXECUTING"); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } finally { engine.quit(); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index cd8ae7ca9a..b5b5980bba 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -38,7 +38,7 @@ import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { machinePresetFromConfig } from "./machinePresets"; import { ScheduleRunMessage } from "./messages"; -import { isDequeueableExecutionStatus } from "./statuses"; +import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; type Options = { redis: RedisOptions; @@ -648,7 +648,7 @@ export class RunEngine { }); } - async createRunAttempt({ + async startRunAttempt({ runId, snapshotId, tx, @@ -695,12 +695,6 @@ export class RunEngine { }, include: { tags: true, - attempts: { - take: 1, - orderBy: { - number: "desc", - }, - }, lockedBy: { include: { worker: { @@ -754,7 +748,8 @@ export class RunEngine { throw new ServiceValidationError("Queue not found", 404); } - const nextAttemptNumber = taskRun.attempts[0] ? taskRun.attempts[0].number + 1 : 1; + //increment the attempt number (start at 1) + const nextAttemptNumber = (taskRun.attemptNumber ?? 0) + 1; if (nextAttemptNumber > MAX_TASK_RUN_ATTEMPTS) { await this.#crash({ @@ -762,38 +757,22 @@ export class RunEngine { error: { type: "INTERNAL_ERROR", code: "TASK_RUN_CRASHED", - message: taskRun.lockedBy.worker.supportsLazyAttempts - ? "Max attempts reached." - : "Max attempts reached. Please upgrade your CLI and SDK.", + message: "Max attempts reached.", }, }); - throw new ServiceValidationError("Max attempts reached", 400); } const result = await $transaction( prisma, async (tx) => { - const attempt = await tx.taskRunAttempt.create({ - data: { - number: nextAttemptNumber, - friendlyId: generateFriendlyId("attempt"), - taskRunId: taskRun.id, - startedAt: new Date(), - backgroundWorkerId: taskRun.lockedBy!.worker.id, - backgroundWorkerTaskId: taskRun.lockedBy!.id, - status: "EXECUTING", - queueId: queue.id, - runtimeEnvironmentId: environment.id, - }, - }); - const run = await tx.taskRun.update({ where: { id: taskRun.id, }, data: { status: "EXECUTING", + attemptNumber: nextAttemptNumber, }, include: { tags: true, @@ -805,7 +784,6 @@ export class RunEngine { const newSnapshot = await this.#createExecutionSnapshot(tx, { run, - attempt, snapshot: { executionStatus: "EXECUTING", description: "Attempt created, starting execution", @@ -817,7 +795,7 @@ export class RunEngine { await this.worker.ack(`expireRun:${taskRun.id}`); } - return { run, attempt, snapshot: newSnapshot }; + return { run, snapshot: newSnapshot }; }, (error) => { this.logger.error("RunEngine.createRunAttempt(): prisma.$transaction error", { @@ -827,6 +805,10 @@ export class RunEngine { message: error.message, name: error.name, }); + throw new ServiceValidationError( + "Failed to update task run and execution snapshot", + 500 + ); } ); @@ -838,7 +820,7 @@ export class RunEngine { throw new ServiceValidationError("Failed to create task run attempt", 500); } - const { run, attempt, snapshot } = result; + const { run, snapshot } = result; const machinePreset = machinePresetFromConfig({ machines: this.options.machines.machines, @@ -857,10 +839,11 @@ export class RunEngine { filePath: run.lockedBy!.filePath, exportName: run.lockedBy!.exportName, }, + //this is for backwards compatibility with the SDK attempt: { - id: attempt.friendlyId, - number: attempt.number, - startedAt: attempt.startedAt ?? attempt.createdAt, + id: generateFriendlyId("attempt"), + number: nextAttemptNumber, + startedAt: latestSnapshot.updatedAt, backgroundWorkerId: run.lockedBy!.worker.id, backgroundWorkerTaskId: run.lockedBy!.id, status: "EXECUTING" as const, @@ -882,6 +865,8 @@ export class RunEngine { version: run.lockedBy!.worker.version, metadata, maxDuration: run.maxDurationInSeconds ?? undefined, + //todo add this, it needs to be added to all the SDK functions + // attemptNumber: nextAttemptNumber, }, queue: { id: queue.friendlyId, @@ -912,7 +897,6 @@ export class RunEngine { return { run, - attempt, snapshot, }; }); @@ -1013,7 +997,7 @@ export class RunEngine { } //run is still executing, send a message to the worker - if (snapshot.executionStatus === "EXECUTING" && snapshot.worker) { + if (isExecuting(snapshot.executionStatus)) { const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: run, snapshot: { @@ -1177,6 +1161,8 @@ export class RunEngine { }, }); + //todo update the execution snapshot + // 5. Continue the runs that have no more waitpoints for (const run of taskRunsToResume) { await this.#continueRun(run, run.runtimeEnvironment, tx); @@ -1195,11 +1181,9 @@ export class RunEngine { prisma: PrismaClientOrTransaction, { run, - attempt, snapshot, }: { - run: { id: string; status: TaskRunStatus }; - attempt?: { id: string; status: TaskRunAttemptStatus }; + run: { id: string; status: TaskRunStatus; attemptNumber?: number | null }; snapshot: { executionStatus: TaskRunExecutionStatus; description: string; @@ -1213,18 +1197,20 @@ export class RunEngine { description: snapshot.description, runId: run.id, runStatus: run.status, - currentAttemptId: attempt?.id, - currentAttemptStatus: attempt?.status, + attemptNumber: run.attemptNumber ?? undefined, }, }); //create heartbeat (if relevant) switch (snapshot.executionStatus) { case "RUN_CREATED": - case "QUEUED": - case "BLOCKED_BY_WAITPOINTS": case "FINISHED": - case "DEQUEUED_FOR_EXECUTION": { + case "QUEUED": { + //we don't need to heartbeat these statuses + break; + } + case "DEQUEUED_FOR_EXECUTION": + case "QUEUED_WITH_WAITPOINTS": { await this.#startHeartbeating({ runId: run.id, snapshotId: newSnapshot.id, @@ -1232,7 +1218,8 @@ export class RunEngine { }); break; } - case "EXECUTING": { + case "EXECUTING": + case "EXECUTING_WITH_WAITPOINTS": { await this.#startHeartbeating({ runId: run.id, snapshotId: newSnapshot.id, @@ -1240,6 +1227,9 @@ export class RunEngine { }); break; } + default: { + assertNever(snapshot.executionStatus); + } } return newSnapshot; @@ -1247,7 +1237,6 @@ export class RunEngine { async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { return prisma.taskRunExecutionSnapshot.findFirst({ - include: { worker: true }, where: { runId }, orderBy: { createdAt: "desc" }, }); @@ -1341,30 +1330,34 @@ export class RunEngine { //todo fail attempt if there is one? switch (latestSnapshot.executionStatus) { - case "BLOCKED_BY_WAITPOINTS": { - //we need to check if the waitpoints are still blocking the run - throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); - } - case "DEQUEUED_FOR_EXECUTION": { - //we need to check if the run is still dequeued - throw new NotImplementedError("Not implemented DEQUEUED_FOR_EXECUTION"); + case "RUN_CREATED": { + //we need to check if the run is still created + throw new NotImplementedError("Not implemented RUN_CREATED"); } case "QUEUED": { //we need to check if the run is still QUEUED throw new NotImplementedError("Not implemented QUEUED"); } + case "DEQUEUED_FOR_EXECUTION": { + //we need to check if the run is still dequeued + throw new NotImplementedError("Not implemented DEQUEUED_FOR_EXECUTION"); + } case "EXECUTING": { //we need to check if the run is still executing throw new NotImplementedError("Not implemented EXECUTING"); } + case "EXECUTING_WITH_WAITPOINTS": { + //we need to check if the run is still executing + throw new NotImplementedError("Not implemented EXECUTING_WITH_WAITPOINTS"); + } + case "QUEUED_WITH_WAITPOINTS": { + //we need to check if the waitpoints are still blocking the run + throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); + } case "FINISHED": { //we need to check if the run is still finished throw new NotImplementedError("Not implemented FINISHED"); } - case "RUN_CREATED": { - //we need to check if the run is still created - throw new NotImplementedError("Not implemented RUN_CREATED"); - } default: { assertNever(latestSnapshot.executionStatus); } diff --git a/internal-packages/run-engine/src/engine/statuses.ts b/internal-packages/run-engine/src/engine/statuses.ts index 27ebc35c83..be7e7d0a39 100644 --- a/internal-packages/run-engine/src/engine/statuses.ts +++ b/internal-packages/run-engine/src/engine/statuses.ts @@ -1,6 +1,17 @@ import { TaskRunExecutionStatus } from "@trigger.dev/database"; export function isDequeueableExecutionStatus(status: TaskRunExecutionStatus): boolean { - const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED", "BLOCKED_BY_WAITPOINTS"]; + const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = [ + "QUEUED", + "QUEUED_WITH_WAITPOINTS", + ]; return dequeuableExecutionStatuses.includes(status); } + +export function isExecuting(status: TaskRunExecutionStatus): boolean { + const executingExecutionStatuses: TaskRunExecutionStatus[] = [ + "EXECUTING", + "EXECUTING_WITH_WAITPOINTS", + ]; + return executingExecutionStatuses.includes(status); +} From 2599a97c60a08aedbb6d6bc22b7dd8e6383a2c55 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 11:55:01 +0100 Subject: [PATCH 038/485] =?UTF-8?q?Don=E2=80=99t=20keep=20the=20blocked=20?= =?UTF-8?q?waitpoints=20on=20the=20snapshot,=20it=E2=80=99s=20not=20needed?= =?UTF-8?q?=20to=20duplicate=20from=20the=20run?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal-packages/database/prisma/schema.prisma | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 45dc8c5a68..5f71b7da6f 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1889,9 +1889,6 @@ model TaskRunExecutionSnapshot { attemptNumber Int? - /// Waitpoints that are blocking the run - blockedByWaitpoints Waitpoint[] @relation("blockedByWaitpoints") - /// Waitpoints that have been completed for this execution completedWaitpoints Waitpoint[] @relation("completedWaitpoints") @@ -1972,7 +1969,6 @@ model Waitpoint { /// The runs this waitpoint is blocking blockingTaskRuns TaskRunWaitpoint[] - blockingExecutionSnapshots TaskRunExecutionSnapshot[] @relation("blockedByWaitpoints") /// When a waitpoint is complete completedExecutionSnapshots TaskRunExecutionSnapshot[] @relation("completedWaitpoints") From 800b6a04bc9274ceffc949cac4fcc7369aed5799 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 11:55:16 +0100 Subject: [PATCH 039/485] Use the fancy `signal.throwIfAborted()` --- .../run-engine/src/engine/index.ts | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index b5b5980bba..27978d7c0d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -296,9 +296,7 @@ export class RunEngine { } //Make sure lock extension succeeded - if (signal.aborted) { - throw signal.error; - } + signal.throwIfAborted(); if (queue) { const concurrencyLimit = @@ -376,9 +374,7 @@ export class RunEngine { } //Make sure lock extension succeeded - if (signal.aborted) { - throw signal.error; - } + signal.throwIfAborted(); //enqueue the run if it's not delayed if (!taskRun.delayUntil) { @@ -839,11 +835,11 @@ export class RunEngine { filePath: run.lockedBy!.filePath, exportName: run.lockedBy!.exportName, }, - //this is for backwards compatibility with the SDK attempt: { - id: generateFriendlyId("attempt"), number: nextAttemptNumber, startedAt: latestSnapshot.updatedAt, + //todo deprecate everything below + id: generateFriendlyId("attempt"), backgroundWorkerId: run.lockedBy!.worker.id, backgroundWorkerTaskId: run.lockedBy!.id, status: "EXECUTING" as const, @@ -865,8 +861,6 @@ export class RunEngine { version: run.lockedBy!.worker.version, metadata, maxDuration: run.maxDurationInSeconds ?? undefined, - //todo add this, it needs to be added to all the SDK functions - // attemptNumber: nextAttemptNumber, }, queue: { id: queue.friendlyId, @@ -1087,7 +1081,6 @@ export class RunEngine { ) { //todo it would be better if we didn't remove from the queue, because this removes the payload //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload - //todo release concurrency and make sure the run isn't in the queue // await this.runQueue.blockMessage(orgId, runId); @@ -1100,6 +1093,8 @@ export class RunEngine { projectId: waitpoint.projectId, }, }); + + //todo we need to update the relevant snapshot as well } /** This completes a waitpoint and then continues any runs blocked by the waitpoint, @@ -1162,6 +1157,7 @@ export class RunEngine { }); //todo update the execution snapshot + //todo this needs to be done inside the transaction // 5. Continue the runs that have no more waitpoints for (const run of taskRunsToResume) { From 75dcf4e1dd9ad7058a5cf5619782a4236100c4d2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 12:23:19 +0100 Subject: [PATCH 040/485] Created a wrapper around Redlock that prevents locking the same resources inside the same async context --- .../run-engine/src/engine/index.ts | 215 +++++++++--------- .../run-engine/src/engine/locking.ts | 56 +++++ 2 files changed, 168 insertions(+), 103 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/locking.ts diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 27978d7c0d..a7ecd1819b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -11,8 +11,8 @@ import { } from "@trigger.dev/core/v3"; import { generateFriendlyId, - parseNaturalLanguageDuration, getMaxDuration, + parseNaturalLanguageDuration, sanitizeQueueName, } from "@trigger.dev/core/v3/apps"; import { @@ -21,7 +21,6 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, - TaskRunAttemptStatus, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -29,13 +28,13 @@ import { import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; import { nanoid } from "nanoid"; -import Redlock from "redlock"; import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; +import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { ScheduleRunMessage } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; @@ -117,7 +116,7 @@ type EngineWorker = Worker; export class RunEngine { private redis: Redis; private prisma: PrismaClient; - private redlock: Redlock; + private runLock: RunLocker; runQueue: RunQueue; private worker: EngineWorker; private logger = new Logger("RunEngine", "debug"); @@ -126,13 +125,7 @@ export class RunEngine { constructor(private readonly options: Options) { this.prisma = options.prisma; this.redis = new Redis(options.redis); - this.redlock = new Redlock([this.redis], { - driftFactor: 0.01, - retryCount: 10, - retryDelay: 200, // time in ms - retryJitter: 200, // time in ms - automaticExtensionThreshold: 500, // time in ms - }); + this.runLock = new RunLocker({ redis: this.redis }); this.runQueue = new RunQueue({ name: "rq", @@ -155,7 +148,7 @@ export class RunEngine { logger: new Logger("RunEngineWorker", "debug"), jobs: { waitpointCompleteDateTime: async ({ payload }) => { - await this.#completeWaitpoint(payload.waitpointId); + await this.completeWaitpoint(payload.waitpointId); }, heartbeatSnapshot: async ({ payload }) => { await this.#handleStalledSnapshot(payload); @@ -278,7 +271,7 @@ export class RunEngine { span.setAttribute("runId", taskRun.id); - await this.redlock.using([taskRun.id], 5000, async (signal) => { + await this.runLock.lock([taskRun.id], 5000, async (signal) => { //create associated waitpoint (this completes when the run completes) const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { projectId: environment.project.id, @@ -424,7 +417,7 @@ export class RunEngine { span.setAttribute("runId", runId); //lock the run so nothing else can modify it - return this.redlock.using([runId], 5000, async (signal) => { + return this.runLock.lock([runId], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (!snapshot) { throw new Error( @@ -656,7 +649,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { - return this.redlock.using([runId], 5000, async (signal) => { + return this.runLock.lock([runId], 5000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (!latestSnapshot) { await this.#systemFailure({ @@ -914,6 +907,81 @@ export class RunEngine { async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + /** This completes a waitpoint and then continues any runs blocked by the waitpoint, + * if they're no longer blocked. This doesn't suffer from race conditions. */ + async completeWaitpoint(id: string) { + const waitpoint = await this.prisma.waitpoint.findUnique({ + where: { id }, + }); + + if (!waitpoint) { + throw new Error(`Waitpoint ${id} not found`); + } + + if (waitpoint.status === "COMPLETED") { + return; + } + + await $transaction( + this.prisma, + async (tx) => { + // 1. Find the TaskRuns associated with this waitpoint + const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ + where: { waitpointId: id }, + select: { taskRunId: true }, + }); + + if (affectedTaskRuns.length === 0) { + throw new Error(`No TaskRunWaitpoints found for waitpoint ${id}`); + } + + // 2. Delete the TaskRunWaitpoint entries for this specific waitpoint + await tx.taskRunWaitpoint.deleteMany({ + where: { waitpointId: id }, + }); + + // 3. Update the waitpoint status + await tx.waitpoint.update({ + where: { id }, + data: { status: "COMPLETED" }, + }); + + // 4. Check which of the affected TaskRuns now have no waitpoints + const taskRunsToResume = await tx.taskRun.findMany({ + where: { + id: { in: affectedTaskRuns.map((run) => run.taskRunId) }, + blockedByWaitpoints: { none: {} }, + status: { in: ["PENDING", "WAITING_TO_RESUME"] }, + }, + include: { + runtimeEnvironment: { + select: { + id: true, + type: true, + maximumConcurrencyLimit: true, + project: { select: { id: true } }, + organization: { select: { id: true } }, + }, + }, + }, + }); + + //todo update the execution snapshot + //todo this needs to be done inside the transaction + + // 5. Continue the runs that have no more waitpoints + for (const run of taskRunsToResume) { + await this.#continueRun(run, run.runtimeEnvironment, tx); + } + }, + (error) => { + this.logger.error(`Error completing waitpoint ${id}, retrying`, { error }); + throw error; + }, + { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } + ); + } + async quit() { //stop the run queue this.runQueue.quit(); @@ -984,7 +1052,7 @@ export class RunEngine { ) { const prisma = tx ?? this.prisma; - await this.redlock.using([run.id], 5000, async (signal) => { + await this.runLock.lock([run.id], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, run.id); if (!snapshot) { throw new Error(`RunEngine.#continueRun(): No snapshot found for run: ${run.id}`); @@ -1084,9 +1152,7 @@ export class RunEngine { //todo release concurrency and make sure the run isn't in the queue // await this.runQueue.blockMessage(orgId, runId); - throw new NotImplementedError("Not implemented #blockRunWithWaitpoint"); - - return tx.taskRunWaitpoint.create({ + const taskWaitpoint = tx.taskRunWaitpoint.create({ data: { taskRunId: runId, waitpointId: waitpoint.id, @@ -1094,82 +1160,9 @@ export class RunEngine { }, }); - //todo we need to update the relevant snapshot as well - } - - /** This completes a waitpoint and then continues any runs blocked by the waitpoint, - * if they're no longer blocked. This doesn't suffer from race conditions. */ - async #completeWaitpoint(id: string) { - const waitpoint = await this.prisma.waitpoint.findUnique({ - where: { id }, - }); - - if (!waitpoint) { - throw new Error(`Waitpoint ${id} not found`); - } - - if (waitpoint.status === "COMPLETED") { - return; - } - - await $transaction( - this.prisma, - async (tx) => { - // 1. Find the TaskRuns associated with this waitpoint - const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ - where: { waitpointId: id }, - select: { taskRunId: true }, - }); - - if (affectedTaskRuns.length === 0) { - throw new Error(`No TaskRunWaitpoints found for waitpoint ${id}`); - } - - // 2. Delete the TaskRunWaitpoint entries for this specific waitpoint - await tx.taskRunWaitpoint.deleteMany({ - where: { waitpointId: id }, - }); - - // 3. Update the waitpoint status - await tx.waitpoint.update({ - where: { id }, - data: { status: "COMPLETED" }, - }); - - // 4. Check which of the affected TaskRuns now have no waitpoints - const taskRunsToResume = await tx.taskRun.findMany({ - where: { - id: { in: affectedTaskRuns.map((run) => run.taskRunId) }, - blockedByWaitpoints: { none: {} }, - status: { in: ["PENDING", "WAITING_TO_RESUME"] }, - }, - include: { - runtimeEnvironment: { - select: { - id: true, - type: true, - maximumConcurrencyLimit: true, - project: { select: { id: true } }, - organization: { select: { id: true } }, - }, - }, - }, - }); - - //todo update the execution snapshot - //todo this needs to be done inside the transaction + //this run is now blocked, so we change the state - // 5. Continue the runs that have no more waitpoints - for (const run of taskRunsToResume) { - await this.#continueRun(run, run.runtimeEnvironment, tx); - } - }, - (error) => { - this.logger.error(`Error completing waitpoint ${id}, retrying`, { error }); - throw error; - }, - { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } - ); + //todo we need to update the relevant snapshot as well } //MARK: - TaskRunExecutionSnapshots @@ -1197,8 +1190,26 @@ export class RunEngine { }, }); - //create heartbeat (if relevant) - switch (snapshot.executionStatus) { + //set heartbeat (if relevant) + await this.#setExecutionSnapshotHeartbeat({ + status: newSnapshot.executionStatus, + runId: run.id, + snapshotId: newSnapshot.id, + }); + + return newSnapshot; + } + + async #setExecutionSnapshotHeartbeat({ + status, + runId, + snapshotId, + }: { + status: TaskRunExecutionStatus; + runId: string; + snapshotId: string; + }) { + switch (status) { case "RUN_CREATED": case "FINISHED": case "QUEUED": { @@ -1208,8 +1219,8 @@ export class RunEngine { case "DEQUEUED_FOR_EXECUTION": case "QUEUED_WITH_WAITPOINTS": { await this.#startHeartbeating({ - runId: run.id, - snapshotId: newSnapshot.id, + runId, + snapshotId, intervalSeconds: 60, }); break; @@ -1217,18 +1228,16 @@ export class RunEngine { case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { await this.#startHeartbeating({ - runId: run.id, - snapshotId: newSnapshot.id, + runId, + snapshotId, intervalSeconds: 60 * 15, }); break; } default: { - assertNever(snapshot.executionStatus); + assertNever(status); } } - - return newSnapshot; } async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { diff --git a/internal-packages/run-engine/src/engine/locking.ts b/internal-packages/run-engine/src/engine/locking.ts new file mode 100644 index 0000000000..11ee1a3e2d --- /dev/null +++ b/internal-packages/run-engine/src/engine/locking.ts @@ -0,0 +1,56 @@ +import Redis from "ioredis"; +import Redlock, { RedlockAbortSignal } from "redlock"; +import { AsyncLocalStorage } from "async_hooks"; + +interface LockContext { + resources: string; + signal: RedlockAbortSignal; +} + +export class RunLocker { + private redlock: Redlock; + private asyncLocalStorage: AsyncLocalStorage; + + constructor(options: { redis: Redis }) { + this.redlock = new Redlock([options.redis], { + driftFactor: 0.01, + retryCount: 10, + retryDelay: 200, // time in ms + retryJitter: 200, // time in ms + automaticExtensionThreshold: 500, // time in ms + }); + this.asyncLocalStorage = new AsyncLocalStorage(); + } + + /** Locks resources using RedLock. It won't lock again if we're already inside a lock with the same resources. */ + async lock( + resources: string[], + duration: number, + routine: (signal: RedlockAbortSignal) => Promise + ): Promise { + const currentContext = this.asyncLocalStorage.getStore(); + const joinedResources = resources.sort().join(","); + + if (currentContext && currentContext.resources === joinedResources) { + // We're already inside a lock with the same resources, just run the routine + return routine(currentContext.signal); + } + + // Different resources or not in a lock, proceed with new lock + return this.redlock.using(resources, duration, async (signal) => { + const newContext: LockContext = { resources: joinedResources, signal }; + + return this.asyncLocalStorage.run(newContext, async () => { + return routine(signal); + }); + }); + } + + isInsideLock(): boolean { + return !!this.asyncLocalStorage.getStore(); + } + + getCurrentResources(): string | undefined { + return this.asyncLocalStorage.getStore()?.resources; + } +} From 254922793fdebcc82b548fd578aaaa1452d2a539 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 12:27:40 +0100 Subject: [PATCH 041/485] Added a double locking test --- .../run-engine/src/engine/locking.test.ts | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 internal-packages/run-engine/src/engine/locking.test.ts diff --git a/internal-packages/run-engine/src/engine/locking.test.ts b/internal-packages/run-engine/src/engine/locking.test.ts new file mode 100644 index 0000000000..b09e870e27 --- /dev/null +++ b/internal-packages/run-engine/src/engine/locking.test.ts @@ -0,0 +1,37 @@ +import { redisTest } from "@internal/testcontainers"; +import { expect } from "vitest"; +import { RunLocker } from "./locking.js"; + +describe("RunLocker", () => { + redisTest("Test acquiring a lock works", { timeout: 15_000 }, async ({ redis }) => { + const runLock = new RunLocker({ redis }); + + expect(runLock.isInsideLock()).toBe(false); + + await runLock.lock(["test-1"], 5000, async (signal) => { + expect(signal).toBeDefined(); + expect(runLock.isInsideLock()).toBe(true); + }); + + expect(runLock.isInsideLock()).toBe(false); + }); + + redisTest("Test double locking works", { timeout: 15_000 }, async ({ redis }) => { + const runLock = new RunLocker({ redis }); + + expect(runLock.isInsideLock()).toBe(false); + + await runLock.lock(["test-1"], 5000, async (signal) => { + expect(signal).toBeDefined(); + expect(runLock.isInsideLock()).toBe(true); + + //should be able to "lock it again" + await runLock.lock(["test-1"], 5000, async (signal) => { + expect(signal).toBeDefined(); + expect(runLock.isInsideLock()).toBe(true); + }); + }); + + expect(runLock.isInsideLock()).toBe(false); + }); +}); From 85dbea6cfa7cc18cfe86986090342e79c916c4b5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 13:15:07 +0100 Subject: [PATCH 042/485] More work on completing waitpoints --- .../run-engine/src/engine/index.ts | 163 +++++++++++++----- 1 file changed, 120 insertions(+), 43 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index a7ecd1819b..fa916a408b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -148,7 +148,7 @@ export class RunEngine { logger: new Logger("RunEngineWorker", "debug"), jobs: { waitpointCompleteDateTime: async ({ payload }) => { - await this.completeWaitpoint(payload.waitpointId); + await this.completeWaitpoint({ id: payload.waitpointId }); }, heartbeatSnapshot: async ({ payload }) => { await this.#handleStalledSnapshot(payload); @@ -907,9 +907,18 @@ export class RunEngine { async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} - /** This completes a waitpoint and then continues any runs blocked by the waitpoint, + /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ - async completeWaitpoint(id: string) { + async completeWaitpoint({ + id, + output, + }: { + id: string; + output?: { + value: string; + type?: string; + }; + }) { const waitpoint = await this.prisma.waitpoint.findUnique({ where: { id }, }); @@ -943,15 +952,38 @@ export class RunEngine { // 3. Update the waitpoint status await tx.waitpoint.update({ where: { id }, - data: { status: "COMPLETED" }, + data: { + status: "COMPLETED", + completedAt: new Date(), + output: output?.value, + outputType: output?.type, + }, }); - // 4. Check which of the affected TaskRuns now have no waitpoints + // 4. Add the completed snapshots to the snapshots + for (const run of affectedTaskRuns) { + await this.runLock.lock([run.taskRunId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, run.taskRunId); + if (!latestSnapshot) { + throw new Error(`No execution snapshot found for TaskRun ${run.taskRunId}`); + } + + await tx.taskRunExecutionSnapshot.update({ + where: { id: latestSnapshot.id }, + data: { + completedWaitpoints: { + connect: { id }, + }, + }, + }); + }); + } + + // 5. Check which of the affected TaskRuns now have no waitpoints const taskRunsToResume = await tx.taskRun.findMany({ where: { id: { in: affectedTaskRuns.map((run) => run.taskRunId) }, blockedByWaitpoints: { none: {} }, - status: { in: ["PENDING", "WAITING_TO_RESUME"] }, }, include: { runtimeEnvironment: { @@ -966,9 +998,6 @@ export class RunEngine { }, }); - //todo update the execution snapshot - //todo this needs to be done inside the transaction - // 5. Continue the runs that have no more waitpoints for (const run of taskRunsToResume) { await this.#continueRun(run, run.runtimeEnvironment, tx); @@ -1058,6 +1087,11 @@ export class RunEngine { throw new Error(`RunEngine.#continueRun(): No snapshot found for run: ${run.id}`); } + const completedWaitpoints = await this.#getExecutionSnapshotCompletedWaitpoints( + prisma, + snapshot.id + ); + //run is still executing, send a message to the worker if (isExecuting(snapshot.executionStatus)) { const newSnapshot = await this.#createExecutionSnapshot(prisma, { @@ -1147,25 +1181,56 @@ export class RunEngine { tx: PrismaClientOrTransaction, { orgId, runId, waitpoint }: { orgId: string; runId: string; waitpoint: Waitpoint } ) { - //todo it would be better if we didn't remove from the queue, because this removes the payload - //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload - //todo release concurrency and make sure the run isn't in the queue - // await this.runQueue.blockMessage(orgId, runId); + await this.runLock.lock([runId], 5000, async (signal) => { + //todo it would be better if we didn't remove from the queue, because this removes the payload + //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload + //todo release concurrency and make sure the run isn't in the queue + // await this.runQueue.blockMessage(orgId, runId); + + const taskWaitpoint = tx.taskRunWaitpoint.create({ + data: { + taskRunId: runId, + waitpointId: waitpoint.id, + projectId: waitpoint.projectId, + }, + }); - const taskWaitpoint = tx.taskRunWaitpoint.create({ - data: { - taskRunId: runId, - waitpointId: waitpoint.id, - projectId: waitpoint.projectId, - }, - }); + const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, runId); + + if (latestSnapshot) { + //if the run is QUEUE or EXECUTING, we create a new snapshot + let newStatus: TaskRunExecutionStatus | undefined = undefined; + switch (latestSnapshot.executionStatus) { + case "QUEUED": { + newStatus = "QUEUED_WITH_WAITPOINTS"; + } + case "EXECUTING": { + newStatus = "EXECUTING_WITH_WAITPOINTS"; + } + } + + if (newStatus) { + await this.#createExecutionSnapshot(tx, { + run: { + id: latestSnapshot.runId, + status: latestSnapshot.runStatus, + attemptNumber: latestSnapshot.attemptNumber, + }, + snapshot: { + executionStatus: newStatus, + description: "Run was blocked by a waitpoint.", + }, + }); + } + } - //this run is now blocked, so we change the state + //this run is now blocked, so we change the state - //todo we need to update the relevant snapshot as well + //todo we need to update the relevant snapshot as well + }); } - //MARK: - TaskRunExecutionSnapshots + //#region TaskRunExecutionSnapshots async #createExecutionSnapshot( prisma: PrismaClientOrTransaction, { @@ -1247,7 +1312,34 @@ export class RunEngine { }); } - //MARK: - Heartbeat + async #getExecutionSnapshotCompletedWaitpoints( + prisma: PrismaClientOrTransaction, + snapshotId: string + ) { + const waitpoints = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { id: snapshotId }, + include: { + completedWaitpoints: true, + }, + }); + + //deduplicate waitpoints + const waitpointIds = new Set(); + return ( + waitpoints?.completedWaitpoints.filter((waitpoint) => { + if (waitpointIds.has(waitpoint.id)) { + return false; + } else { + waitpointIds.add(waitpoint.id); + return true; + } + }) ?? [] + ); + } + + //#endregion + + //#region Heartbeat async #startHeartbeating({ runId, snapshotId, @@ -1369,6 +1461,8 @@ export class RunEngine { } } + //#endregion + async #getAuthenticatedEnvironmentFromRun(runId: string, tx?: PrismaClientOrTransaction) { const prisma = tx ?? this.prisma; const taskRun = await prisma.taskRun.findUnique({ @@ -1433,27 +1527,10 @@ export class ServiceValidationError extends Error { } } +//todo temporary during development class NotImplementedError extends Error { constructor(message: string) { + console.error("NOT IMPLEMENTED YET", { message }); super(message); } } - -/* -Starting execution flow: - -1. Run id is pulled from a queue -2. Prepare the run for an attempt (returns data to send to the worker) - a. The run is marked as "waiting to start"? - b. Create a TaskRunState with the run id, and the state "waiting to start". - c. Start a heartbeat with the TaskRunState id, in case it never starts. -3. The run is sent to the worker -4. When the worker has received the run, it ask the platform for an attempt -5. The attempt is created - a. The attempt is created - b. The TaskRunState is updated to "EXECUTING" - c. Start a heartbeat with the TaskRunState id. - c. The TaskRun is updated to "EXECUTING" -6. A response is sent back to the worker with the attempt data -7. The code executes... -*/ From ae22e85dc54296e4cf4b51ba3dd77bae477c3b16 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 17 Oct 2024 16:47:12 +0100 Subject: [PATCH 043/485] Basic test started and passing for `triggerAndWait` and completing the waitpoint --- .../run-engine/src/engine/index.test.ts | 427 ++++++++++++------ .../run-engine/src/engine/index.ts | 43 +- 2 files changed, 310 insertions(+), 160 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index e727826136..0d984d1bf6 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -1,156 +1,309 @@ -import { expect } from "vitest"; import { containerTest } from "@internal/testcontainers"; -import { RunEngine } from "./index.js"; -import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; import { trace } from "@opentelemetry/api"; -import { AuthenticatedEnvironment } from "../shared/index.js"; import { generateFriendlyId, sanitizeQueueName } from "@trigger.dev/core/v3/apps"; +import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { expect } from "vitest"; +import { AuthenticatedEnvironment } from "../shared/index.js"; import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; +import { RunEngine } from "./index.js"; + +function assertNonNullable(value: T): asserts value is NonNullable { + expect(value).toBeDefined(); + expect(value).not.toBeNull(); +} describe("RunEngine", () => { - containerTest( - "Trigger a simple run", - { timeout: 15_000 }, - async ({ postgresContainer, prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ + containerTest("Trigger a simple run", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, + prisma + ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", }, + }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); + + const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: run.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(snapshot); + expect(snapshot?.executionStatus).toBe("QUEUED"); + + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength).toBe(1); + + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + }); + expect(dequeued?.action).toBe("SCHEDULE_RUN"); + + if (dequeued?.action !== "SCHEDULE_RUN") { + throw new Error("Expected action to be START_RUN"); + } + + expect(dequeued.payload.run.id).toBe(run.id); + expect(dequeued.payload.run.attemptNumber).toBe(1); + expect(dequeued.payload.execution.status).toBe("DEQUEUED_FOR_EXECUTION"); + + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued.payload.run.id, + snapshotId: dequeued.payload.execution.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); + } + }); + + containerTest("Complete a waitpoint", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); + + const childSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: childRun.id, + }, + orderBy: { + createdAt: "desc", + }, }); + assertNonNullable(childSnapshot); + expect(childSnapshot.executionStatus).toBe("QUEUED"); - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); - - //check it's actually in the db - const runFromDb = await prisma.taskRun.findUnique({ - where: { - friendlyId: "run_1234", - }, - }); - expect(runFromDb).toBeDefined(); - expect(runFromDb?.id).toBe(run.id); + const parentSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: parentRun.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(parentSnapshot); + expect(parentSnapshot.executionStatus).toBe("QUEUED_WITH_WAITPOINTS"); - const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: run.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - expect(snapshot).toBeDefined(); - expect(snapshot?.executionStatus).toBe("QUEUED"); - - //check the waitpoint is created - const runWaitpoint = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpoint.length).toBe(1); - expect(runWaitpoint[0].type).toBe("RUN"); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfQueue( - authenticatedEnvironment, - run.queue - ); - expect(queueLength).toBe(1); - - //concurrency before - const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyBefore).toBe(0); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - }); - expect(dequeued?.action).toBe("SCHEDULE_RUN"); - - if (dequeued?.action !== "SCHEDULE_RUN") { - throw new Error("Expected action to be START_RUN"); - } - - expect(dequeued.payload.run.id).toBe(run.id); - expect(dequeued.payload.run.attemptNumber).toBe(1); - expect(dequeued.payload.execution.status).toBe("DEQUEUED_FOR_EXECUTION"); - - const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyAfter).toBe(1); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued.payload.run.id, - snapshotId: dequeued.payload.execution.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - } finally { - engine.quit(); - } + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + assertNonNullable(runWaitpoint); + expect(runWaitpoint.waitpoint.type).toBe("RUN"); + expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + + await engine.completeWaitpoint({ + id: runWaitpoint.waitpointId, + output: { value: "{}", type: "application/json" }, + }); + + const waitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: runWaitpoint.waitpointId, + }, + }); + expect(waitpointAfter?.completedAt).not.toBeNull(); + expect(waitpointAfter?.status).toBe("COMPLETED"); + + const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointAfter).toBeNull(); + + //parent snapshot + const parentSnapshotAfter = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: parentRun.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(parentSnapshotAfter); + expect(parentSnapshotAfter.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); } - ); + }); //todo triggerAndWait diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index fa916a408b..c7df502d01 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -78,7 +78,6 @@ type TriggerParams = { ttl?: string; tags: string[]; parentTaskRunId?: string; - parentTaskRunAttemptId?: string; rootTaskRunId?: string; batchId?: string; resumeParentOnCompletion?: boolean; @@ -191,7 +190,6 @@ export class RunEngine { ttl, tags, parentTaskRunId, - parentTaskRunAttemptId, rootTaskRunId, batchId, resumeParentOnCompletion, @@ -249,7 +247,6 @@ export class RunEngine { connect: tags.map((id) => ({ id })), }, parentTaskRunId, - parentTaskRunAttemptId, rootTaskRunId, batchId, resumeParentOnCompletion, @@ -1117,21 +1114,23 @@ export class RunEngine { }, }); - await this.runQueue.enqueueMessage({ - env, - masterQueue: run.masterQueue, - message: { - runId: run.id, - taskIdentifier: run.taskIdentifier, - orgId: env.organization.id, - projectId: env.project.id, - environmentId: env.id, - environmentType: env.type, - queue: run.queue, - concurrencyKey: run.concurrencyKey ?? undefined, - timestamp: Date.now(), - }, - }); + //todo instead this should be a call to unblock the run + //we don't want to free up all the concurrency, so this isn't good + // await this.runQueue.enqueueMessage({ + // env, + // masterQueue: run.masterQueue, + // message: { + // runId: run.id, + // taskIdentifier: run.taskIdentifier, + // orgId: env.organization.id, + // projectId: env.project.id, + // environmentId: env.id, + // environmentType: env.type, + // queue: run.queue, + // concurrencyKey: run.concurrencyKey ?? undefined, + // timestamp: Date.now(), + // }, + // }); }); } @@ -1187,7 +1186,7 @@ export class RunEngine { //todo release concurrency and make sure the run isn't in the queue // await this.runQueue.blockMessage(orgId, runId); - const taskWaitpoint = tx.taskRunWaitpoint.create({ + const taskWaitpoint = await tx.taskRunWaitpoint.create({ data: { taskRunId: runId, waitpointId: waitpoint.id, @@ -1203,9 +1202,11 @@ export class RunEngine { switch (latestSnapshot.executionStatus) { case "QUEUED": { newStatus = "QUEUED_WITH_WAITPOINTS"; + break; } case "EXECUTING": { newStatus = "EXECUTING_WITH_WAITPOINTS"; + break; } } @@ -1223,10 +1224,6 @@ export class RunEngine { }); } } - - //this run is now blocked, so we change the state - - //todo we need to update the relevant snapshot as well }); } From 614ca8f9dd8f3d2235d1ecabfc1ef9f84d25ad5b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 18 Oct 2024 10:24:43 +0100 Subject: [PATCH 044/485] Started on the completeRunAttempt method --- .../run-engine/src/engine/index.test.ts | 2 + .../run-engine/src/engine/index.ts | 46 ++++++++++++++++++- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 0d984d1bf6..4df1d8d03d 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -266,6 +266,8 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + //todo update this test so the child run is completed instead, of completing the waitpoint explicitly + await engine.completeWaitpoint({ id: runWaitpoint.waitpointId, output: { value: "{}", type: "application/json" }, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index c7df502d01..d542e9eb46 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -6,7 +6,9 @@ import { MachinePresetName, parsePacket, QueueOptions, + sanitizeError, TaskRunExecution, + TaskRunExecutionResult, TaskRunInternalError, } from "@trigger.dev/core/v3"; import { @@ -887,6 +889,48 @@ export class RunEngine { }); } + /** How a run is completed */ + async completeRunAttempt({ + runId, + snapshotId, + completion, + tx, + }: { + runId: string; + snapshotId: string; + completion: TaskRunExecutionResult; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + //todo + //1. lock the run + //2. get the latest snapshot + //3. deal with completion errors + //4. update the run status, create final snapshot + //5. complete waitpoints + + return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { + return this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!latestSnapshot) { + throw new Error(`No execution snapshot found for TaskRun ${runId}`); + } + + if (latestSnapshot.id !== snapshotId) { + throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); + } + + span.setAttribute("completionStatus", completion.ok); + + if (completion.ok) { + } else { + const error = sanitizeError(completion.error); + } + }); + }); + } + /** This is called to get the */ async resumeRun({ runId, @@ -900,8 +944,6 @@ export class RunEngine { async waitForDuration() {} - async complete(runId: string, completion: any) {} - async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} /** This completes a waitpoint and updates all entries so the run isn't blocked, From ae6ffb2ca8da9999ce0fd2328b3bd48f58e99b64 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 18 Oct 2024 17:26:16 +0100 Subject: [PATCH 045/485] The happy path for complete attempt is working in the engine --- .../run-engine/src/engine/index.test.ts | 14 +++--- .../run-engine/src/engine/index.ts | 43 ++++++++++++++++--- 2 files changed, 46 insertions(+), 11 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 4df1d8d03d..3605d3b558 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -266,11 +266,15 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - //todo update this test so the child run is completed instead, of completing the waitpoint explicitly - - await engine.completeWaitpoint({ - id: runWaitpoint.waitpointId, - output: { value: "{}", type: "application/json" }, + await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childSnapshot.id, + completion: { + id: childRun.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, }); const waitpointAfter = await prisma.waitpoint.findFirst({ diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index d542e9eb46..87186da1fa 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -894,15 +894,11 @@ export class RunEngine { runId, snapshotId, completion, - tx, }: { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - tx?: PrismaClientOrTransaction; }) { - const prisma = tx ?? this.prisma; - //todo //1. lock the run //2. get the latest snapshot @@ -910,9 +906,9 @@ export class RunEngine { //4. update the run status, create final snapshot //5. complete waitpoints - return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { + return this.#trace("completeRunAttempt", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); if (!latestSnapshot) { throw new Error(`No execution snapshot found for TaskRun ${runId}`); } @@ -924,8 +920,43 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); if (completion.ok) { + const run = await this.prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "COMPLETED_SUCCESSFULLY", + completedAt: new Date(), + output: completion.output, + outputType: completion.outputType, + }, + select: { + associatedWaitpoint: { + select: { + id: true, + }, + }, + project: { + select: { + organizationId: true, + }, + }, + }, + }); + await this.runQueue.acknowledgeMessage(run.project.organizationId, runId); + + if (!run.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + await this.completeWaitpoint({ + id: run.associatedWaitpoint.id, + output: completion.output + ? { value: completion.output, type: completion.outputType } + : undefined, + }); } else { const error = sanitizeError(completion.error); + //todo look at CompleteAttemptService + throw new NotImplementedError("TaskRun completion error handling not implemented yet"); } }); }); From 2a881fc022636882595d953e793088cf4bcb29d4 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 18 Oct 2024 18:47:55 +0100 Subject: [PATCH 046/485] Create the final execution snapshot --- internal-packages/run-engine/src/engine/index.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 87186da1fa..3113c2722b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -927,6 +927,14 @@ export class RunEngine { completedAt: new Date(), output: completion.output, outputType: completion.outputType, + executionSnapshots: { + create: { + executionStatus: "FINISHED", + description: "Task completed successfully", + runStatus: "COMPLETED_SUCCESSFULLY", + attemptNumber: latestSnapshot.attemptNumber, + }, + }, }, select: { associatedWaitpoint: { From a4781e127d2c75910751a56ff101d9ecb2e54d93 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 20 Oct 2024 11:31:31 +0100 Subject: [PATCH 047/485] Started work on a MessageQueue --- internal-packages/message-queue/README.md | 7 + internal-packages/message-queue/package.json | 24 + internal-packages/message-queue/src/index.ts | 2 + .../message-queue/src/queue.test.ts | 329 ++++++++++++++ internal-packages/message-queue/src/queue.ts | 418 ++++++++++++++++++ internal-packages/message-queue/tsconfig.json | 25 ++ .../message-queue/vitest.config.ts | 8 + pnpm-lock.yaml | 41 +- 8 files changed, 853 insertions(+), 1 deletion(-) create mode 100644 internal-packages/message-queue/README.md create mode 100644 internal-packages/message-queue/package.json create mode 100644 internal-packages/message-queue/src/index.ts create mode 100644 internal-packages/message-queue/src/queue.test.ts create mode 100644 internal-packages/message-queue/src/queue.ts create mode 100644 internal-packages/message-queue/tsconfig.json create mode 100644 internal-packages/message-queue/vitest.config.ts diff --git a/internal-packages/message-queue/README.md b/internal-packages/message-queue/README.md new file mode 100644 index 0000000000..e34e410135 --- /dev/null +++ b/internal-packages/message-queue/README.md @@ -0,0 +1,7 @@ +# Blocking message queue + +A First-In-First-Out message queue that uses Redis. + +You can add messages to the queue with a `key` and `value`. The `key` is used to group messages together. + +When consuming messages you pass in an array of `keys` to pull messages from. Messages are returned in the order they were added to the queue. If there are no messages for the keys, it will block other consumers for those keys until the timeout is hit. This makes it easy to use as a message queue. diff --git a/internal-packages/message-queue/package.json b/internal-packages/message-queue/package.json new file mode 100644 index 0000000000..558d7298b4 --- /dev/null +++ b/internal-packages/message-queue/package.json @@ -0,0 +1,24 @@ +{ + "name": "@internal/message-queue", + "private": true, + "version": "0.0.1", + "main": "./src/index.ts", + "types": "./src/index.ts", + "type": "module", + "dependencies": { + "@opentelemetry/api": "^1.9.0", + "@trigger.dev/core": "workspace:*", + "ioredis": "^5.3.2", + "nanoid": "^5.0.7", + "typescript": "^5.5.4", + "zod": "3.22.3" + }, + "devDependencies": { + "@internal/testcontainers": "workspace:*", + "vitest": "^1.4.0" + }, + "scripts": { + "typecheck": "tsc --noEmit", + "test": "vitest" + } +} diff --git a/internal-packages/message-queue/src/index.ts b/internal-packages/message-queue/src/index.ts new file mode 100644 index 0000000000..a5893efc83 --- /dev/null +++ b/internal-packages/message-queue/src/index.ts @@ -0,0 +1,2 @@ +export * from "./queue"; +export * from "./worker"; diff --git a/internal-packages/message-queue/src/queue.test.ts b/internal-packages/message-queue/src/queue.test.ts new file mode 100644 index 0000000000..33fd104d36 --- /dev/null +++ b/internal-packages/message-queue/src/queue.test.ts @@ -0,0 +1,329 @@ +import { redisTest } from "@internal/testcontainers"; +import { describe } from "node:test"; +import { expect } from "vitest"; +import { z } from "zod"; +import { MessageQueue } from "./queue.js"; +import { Logger } from "@trigger.dev/core/logger"; + +describe("MessageQueue", () => { + redisTest("publish/consume", { timeout: 20_000 }, async ({ redisContainer }) => { + const queue = new MessageQueue({ + name: "test-1", + schema: { + test: z.object({ + value: z.number(), + }), + }, + redisOptions: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + }, + logger: new Logger("test", "log"), + }); + + try { + await queue.publish({ + key: "a", + valueType: "test", + value: { value: 1 }, + visibilityTimeoutMs: 2000, + }); + await queue.publish({ + key: "a", + valueType: "test", + value: { value: 2 }, + visibilityTimeoutMs: 2000, + }); + await queue.publish({ + key: "b", + valueType: "test", + value: { value: 3 }, + visibilityTimeoutMs: 2000, + }); + await queue.publish({ + key: "b", + valueType: "test", + value: { value: 4 }, + visibilityTimeoutMs: 2000, + }); + //todo + // expect(await queue.size()).toBe(4); + + const items = await queue.consume({ keys: ["a", "b"], count: 10 }); + expect(items.length).toBe(2); + expect(items).toEqual({ + id: "1", + job: "test", + item: { value: 1 }, + visibilityTimeoutMs: 2000, + attempt: 0, + }); + expect(await queue.size()).toBe(1); + expect(await queue.size({ includeFuture: true })).toBe(2); + + // await queue.ack(first.id); + // expect(await queue.size({ includeFuture: true })).toBe(1); + + // const [second] = await queue.dequeue(1); + // expect(second).toEqual({ + // id: "2", + // job: "test", + // item: { value: 2 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // await queue.ack(second.id); + // expect(await queue.size({ includeFuture: true })).toBe(0); + } finally { + await queue.close(); + } + }); + + // redisTest("no items", { timeout: 20_000 }, async ({ redisContainer }) => { + // const queue = new SimpleQueue({ + // name: "test-1", + // schema: { + // test: z.object({ + // value: z.number(), + // }), + // }, + // redisOptions: { + // host: redisContainer.getHost(), + // port: redisContainer.getPort(), + // password: redisContainer.getPassword(), + // }, + // logger: new Logger("test", "log"), + // }); + + // try { + // const missOne = await queue.dequeue(1); + // expect(missOne).toEqual([]); + + // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); + // const [hitOne] = await queue.dequeue(1); + // expect(hitOne).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // const missTwo = await queue.dequeue(1); + // expect(missTwo).toEqual([]); + // } finally { + // await queue.close(); + // } + // }); + + // redisTest("future item", { timeout: 20_000 }, async ({ redisContainer }) => { + // const queue = new SimpleQueue({ + // name: "test-1", + // schema: { + // test: z.object({ + // value: z.number(), + // }), + // }, + // redisOptions: { + // host: redisContainer.getHost(), + // port: redisContainer.getPort(), + // password: redisContainer.getPassword(), + // }, + // logger: new Logger("test", "log"), + // }); + + // try { + // await queue.enqueue({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // availableAt: new Date(Date.now() + 50), + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // const miss = await queue.dequeue(1); + // expect(miss).toEqual([]); + + // await new Promise((resolve) => setTimeout(resolve, 50)); + + // const [first] = await queue.dequeue(); + // expect(first).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + // } finally { + // await queue.close(); + // } + // }); + + // redisTest("invisibility timeout", { timeout: 20_000 }, async ({ redisContainer }) => { + // const queue = new SimpleQueue({ + // name: "test-1", + // schema: { + // test: z.object({ + // value: z.number(), + // }), + // }, + // redisOptions: { + // host: redisContainer.getHost(), + // port: redisContainer.getPort(), + // password: redisContainer.getPassword(), + // }, + // logger: new Logger("test", "log"), + // }); + + // try { + // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 1_000 }); + + // const [first] = await queue.dequeue(); + // expect(first).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 1_000, + // attempt: 0, + // }); + + // const missImmediate = await queue.dequeue(1); + // expect(missImmediate).toEqual([]); + + // await new Promise((resolve) => setTimeout(resolve, 1_000)); + + // const [second] = await queue.dequeue(); + // expect(second).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 1_000, + // attempt: 0, + // }); + // } finally { + // await queue.close(); + // } + // }); + + // redisTest("dequeue multiple items", { timeout: 20_000 }, async ({ redisContainer }) => { + // const queue = new SimpleQueue({ + // name: "test-1", + // schema: { + // test: z.object({ + // value: z.number(), + // }), + // }, + // redisOptions: { + // host: redisContainer.getHost(), + // port: redisContainer.getPort(), + // password: redisContainer.getPassword(), + // }, + // logger: new Logger("test", "log"), + // }); + + // try { + // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); + // await queue.enqueue({ id: "2", job: "test", item: { value: 2 }, visibilityTimeoutMs: 2000 }); + // await queue.enqueue({ id: "3", job: "test", item: { value: 3 }, visibilityTimeoutMs: 2000 }); + + // expect(await queue.size()).toBe(3); + + // const dequeued = await queue.dequeue(2); + // expect(dequeued).toHaveLength(2); + // expect(dequeued[0]).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + // expect(dequeued[1]).toEqual({ + // id: "2", + // job: "test", + // item: { value: 2 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // expect(await queue.size()).toBe(1); + // expect(await queue.size({ includeFuture: true })).toBe(3); + + // await queue.ack(dequeued[0].id); + // await queue.ack(dequeued[1].id); + + // expect(await queue.size({ includeFuture: true })).toBe(1); + + // const [last] = await queue.dequeue(1); + // expect(last).toEqual({ + // id: "3", + // job: "test", + // item: { value: 3 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // await queue.ack(last.id); + // expect(await queue.size({ includeFuture: true })).toBe(0); + // } finally { + // await queue.close(); + // } + // }); + + // redisTest("Dead Letter Queue", { timeout: 20_000 }, async ({ redisContainer }) => { + // const queue = new SimpleQueue({ + // name: "test-dlq", + // schema: { + // test: z.object({ + // value: z.number(), + // }), + // }, + // redisOptions: { + // host: redisContainer.getHost(), + // port: redisContainer.getPort(), + // password: redisContainer.getPassword(), + // }, + // logger: new Logger("test", "log"), + // }); + + // try { + // // Enqueue an item + // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); + // expect(await queue.size()).toBe(1); + // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); + + // // Move item to DLQ + // await queue.moveToDeadLetterQueue("1", "Test error message"); + // expect(await queue.size()).toBe(0); + // expect(await queue.sizeOfDeadLetterQueue()).toBe(1); + + // // Attempt to dequeue from the main queue should return empty + // const dequeued = await queue.dequeue(1); + // expect(dequeued).toEqual([]); + + // // Redrive item from DLQ + // await queue.redriveFromDeadLetterQueue("1"); + // expect(await queue.size()).toBe(1); + // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); + + // // Dequeue the redriven item + // const [redrivenItem] = await queue.dequeue(1); + // expect(redrivenItem).toEqual({ + // id: "1", + // job: "test", + // item: { value: 1 }, + // visibilityTimeoutMs: 2000, + // attempt: 0, + // }); + + // // Acknowledge the item + // await queue.ack(redrivenItem.id); + // expect(await queue.size()).toBe(0); + // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); + // } finally { + // await queue.close(); + // } + // }); +}); diff --git a/internal-packages/message-queue/src/queue.ts b/internal-packages/message-queue/src/queue.ts new file mode 100644 index 0000000000..8dd0fa65e7 --- /dev/null +++ b/internal-packages/message-queue/src/queue.ts @@ -0,0 +1,418 @@ +import { Logger } from "@trigger.dev/core/logger"; +import Redis, { type Callback, type RedisOptions, type Result } from "ioredis"; +import { z } from "zod"; + +export interface MessageCatalogSchema { + [key: string]: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion; +} + +export type MessageCatalogKey = keyof TMessageCatalog; +export type MessageCatalogValue< + TMessageCatalog extends MessageCatalogSchema, + TKey extends MessageCatalogKey, +> = z.infer; + +export class MessageQueue { + name: string; + private redis: Redis; + private schema: TMessageCatalog; + private logger: Logger; + + constructor({ + name, + schema, + redisOptions, + logger, + }: { + name: string; + schema: TMessageCatalog; + redisOptions: RedisOptions; + logger?: Logger; + }) { + this.name = name; + this.redis = new Redis({ + ...redisOptions, + keyPrefix: `{fifoqueue:${name}:}`, + retryStrategy(times) { + const delay = Math.min(times * 50, 1000); + return delay; + }, + maxRetriesPerRequest: 3, + }); + this.#registerCommands(); + this.schema = schema; + + this.logger = logger ?? new Logger("SimpleQueue", "debug"); + + this.redis.on("error", (error) => { + this.logger.error(`Redis Error for fifo queue ${this.name}:`, { queue: this.name, error }); + }); + + this.redis.on("connect", () => { + this.logger.log(`Redis connected for fifo queue ${this.name}`); + }); + + this.redis.on("reconnecting", () => { + this.logger.warn(`Redis reconnecting for fifo queue ${this.name}`); + }); + + this.redis.on("close", () => { + this.logger.warn(`Redis connection closed for fifo queue ${this.name}`); + }); + } + + async publish({ + key, + valueType, + value, + attempt = 0, + visibilityTimeoutMs = 10_000, + }: { + key: string; + valueType: MessageCatalogKey; + value: MessageCatalogValue>; + attempt?: number; + visibilityTimeoutMs?: number; + }): Promise { + try { + const serializedValue = JSON.stringify({ value, valueType, attempt, visibilityTimeoutMs }); + + const result = await this.redis.rpush(key, serializedValue); + if (result <= 0) { + throw new Error("publish operation failed"); + } + } catch (e) { + this.logger.error(`MessageQueue ${this.name}.enqueue(): error enqueuing`, { + queue: this.name, + error: e, + key, + valueType, + value, + attempt, + }); + throw e; + } + } + + /** + Consume messages with the passed in keys. + This will hold a connection open up until the timeout (in seconds) if there are no messages yet. + If the message isn't confirmed to have been read in the visibility timeout, it will be reattempted. + */ + async consume({ + keys, + timeout = 10, + count = 10, + }: { + keys: string[]; + timeout?: number; + count?: number; + }): Promise< + Array<{ + key: string; + valueType: MessageCatalogKey; + value: MessageCatalogValue>; + visibilityTimeoutMs: number; + attempt: number; + }> + > { + try { + const results = await this.redis.dequeueItems(keys.length, ...keys, timeout, count); + const parsed = JSON.parse(results); + + const dequeuedItems = []; + + for (const [key, serializedItem] of parsed) { + const parsedItem = JSON.parse(serializedItem); + if (typeof parsedItem.valueType !== "string") { + this.logger.error(`Invalid item in queue`, { queue: this.name, key, item: parsedItem }); + continue; + } + + const schema = this.schema[parsedItem.valueType]; + + if (!schema) { + this.logger.error(`Invalid item in queue, schema not found`, { + queue: this.name, + key, + item: parsedItem, + }); + continue; + } + + const validatedItem = schema.safeParse(parsedItem.item); + + if (!validatedItem.success) { + this.logger.error("Invalid item in queue", { + queue: this.name, + id: key, + item: parsedItem, + errors: validatedItem.error, + attempt: parsedItem.attempt, + }); + continue; + } + + const visibilityTimeoutMs = parsedItem.visibilityTimeoutMs as number; + // const invisibleUntil = now + visibilityTimeoutMs; + + // await this.redis.zadd(`queue`, invisibleUntil, id); + + dequeuedItems.push({ + key, + valueType: parsedItem.valueType, + value: validatedItem.data, + visibilityTimeoutMs, + attempt: parsedItem.attempt ?? 0, + }); + } + + return dequeuedItems; + } catch (e) { + this.logger.error(`SimpleQueue ${this.name}.dequeue(): error dequeuing`, { + queue: this.name, + error: e, + count, + }); + throw e; + } + } + + async ack(id: string): Promise { + try { + await this.redis.ackItem(`queue`, `items`, id); + } catch (e) { + this.logger.error(`SimpleQueue ${this.name}.ack(): error acknowledging item`, { + queue: this.name, + error: e, + id, + }); + throw e; + } + } + + async size({ includeFuture = false }: { includeFuture?: boolean } = {}): Promise { + try { + if (includeFuture) { + // If includeFuture is true, return the total count of all items + return await this.redis.zcard(`queue`); + } else { + // If includeFuture is false, return the count of items available now + const now = Date.now(); + return await this.redis.zcount(`queue`, "-inf", now); + } + } catch (e) { + this.logger.error(`SimpleQueue ${this.name}.size(): error getting queue size`, { + queue: this.name, + error: e, + includeFuture, + }); + throw e; + } + } + + async moveToDeadLetterQueue(id: string, errorMessage: string): Promise { + try { + const result = await this.redis.moveToDeadLetterQueue( + `queue`, + `items`, + `dlq`, + `dlq:items`, + id, + errorMessage + ); + + if (result !== 1) { + throw new Error("Move to Dead Letter Queue operation failed"); + } + } catch (e) { + this.logger.error( + `SimpleQueue ${this.name}.moveToDeadLetterQueue(): error moving item to DLQ`, + { + queue: this.name, + error: e, + id, + errorMessage, + } + ); + throw e; + } + } + + async sizeOfDeadLetterQueue(): Promise { + try { + return await this.redis.zcard(`dlq`); + } catch (e) { + this.logger.error(`SimpleQueue ${this.name}.dlqSize(): error getting DLQ size`, { + queue: this.name, + error: e, + }); + throw e; + } + } + + async redriveFromDeadLetterQueue(id: string): Promise { + try { + const result = await this.redis.redriveFromDeadLetterQueue( + `queue`, + `items`, + `dlq`, + `dlq:items`, + id + ); + + if (result !== 1) { + throw new Error("Redrive from Dead Letter Queue operation failed"); + } + } catch (e) { + this.logger.error( + `SimpleQueue ${this.name}.redriveFromDeadLetterQueue(): error redriving item from DLQ`, + { + queue: this.name, + error: e, + id, + } + ); + throw e; + } + } + + async close(): Promise { + await this.redis.quit(); + } + + #registerCommands() { + this.redis.defineCommand("dequeueItems", { + numberOfKeys: 0, + lua: ` + local numKeys = tonumber(ARGV[1]) + local keys = {} + for i = 2, numKeys + 1 do + table.insert(keys, ARGV[i]) + end + local timeout = tonumber(ARGV[numKeys + 2]) + local count = tonumber(ARGV[numKeys + 3]) + + local result = redis.call('BLMPOP', timeout, numKeys, unpack(keys), 'LEFT', 'COUNT', count) + + if not result then + return '[]' + end + + local key = result[1] + local items = result[2] + local dequeued = {} + + for i = 1, #items do + table.insert(dequeued, {key, items[i]}) + end + + return cjson.encode(dequeued) + `, + }); + + this.redis.defineCommand("ackItem", { + numberOfKeys: 2, + lua: ` + local queue = KEYS[1] + local items = KEYS[2] + local id = ARGV[1] + + redis.call('ZREM', queue, id) + redis.call('HDEL', items, id) + + return 1 + `, + }); + + this.redis.defineCommand("moveToDeadLetterQueue", { + numberOfKeys: 4, + lua: ` + local queue = KEYS[1] + local items = KEYS[2] + local dlq = KEYS[3] + local dlqItems = KEYS[4] + local id = ARGV[1] + local errorMessage = ARGV[2] + + local item = redis.call('HGET', items, id) + if not item then + return 0 + end + + local parsedItem = cjson.decode(item) + parsedItem.errorMessage = errorMessage + + redis.call('ZREM', queue, id) + redis.call('HDEL', items, id) + + redis.call('ZADD', dlq, redis.call('TIME')[1], id) + redis.call('HSET', dlqItems, id, cjson.encode(parsedItem)) + + return 1 + `, + }); + + this.redis.defineCommand("redriveFromDeadLetterQueue", { + numberOfKeys: 4, + lua: ` + local queue = KEYS[1] + local items = KEYS[2] + local dlq = KEYS[3] + local dlqItems = KEYS[4] + local id = ARGV[1] + + local item = redis.call('HGET', dlqItems, id) + if not item then + return 0 + end + + local parsedItem = cjson.decode(item) + parsedItem.errorMessage = nil + + redis.call('ZREM', dlq, id) + redis.call('HDEL', dlqItems, id) + + redis.call('ZADD', queue, redis.call('TIME')[1], id) + redis.call('HSET', items, id, cjson.encode(parsedItem)) + + return 1 + `, + }); + } +} + +declare module "ioredis" { + interface RedisCommander { + dequeueItems( + numKeys: number, + ...args: [...keys: string[], timeout: number, count: number] + ): Result; + + ackItem( + queue: string, + items: string, + id: string, + callback?: Callback + ): Result; + + redriveFromDeadLetterQueue( + queue: string, + items: string, + dlq: string, + dlqItems: string, + id: string, + callback?: Callback + ): Result; + + moveToDeadLetterQueue( + queue: string, + items: string, + dlq: string, + dlqItems: string, + id: string, + errorMessage: string, + callback?: Callback + ): Result; + } +} diff --git a/internal-packages/message-queue/tsconfig.json b/internal-packages/message-queue/tsconfig.json new file mode 100644 index 0000000000..766df37eae --- /dev/null +++ b/internal-packages/message-queue/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable"], + "module": "CommonJS", + "moduleResolution": "Node", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "types": ["vitest/globals"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "noEmit": true, + "strict": true, + "paths": { + "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], + "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], + "@trigger.dev/core": ["../../packages/core/src/index"], + "@trigger.dev/core/*": ["../../packages/core/src/*"] + } + }, + "exclude": ["node_modules"] +} diff --git a/internal-packages/message-queue/vitest.config.ts b/internal-packages/message-queue/vitest.config.ts new file mode 100644 index 0000000000..4afd926425 --- /dev/null +++ b/internal-packages/message-queue/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + include: ["**/*.test.ts"], + globals: true, + }, +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bdb0d98e96..d87492793c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -867,6 +867,34 @@ importers: specifier: ^4.9.4 version: 4.9.5 + internal-packages/message-queue: + dependencies: + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core + ioredis: + specifier: ^5.3.2 + version: 5.3.2 + nanoid: + specifier: ^5.0.7 + version: 5.0.7 + typescript: + specifier: ^5.5.4 + version: 5.5.4 + zod: + specifier: 3.22.3 + version: 3.22.3 + devDependencies: + '@internal/testcontainers': + specifier: workspace:* + version: link:../testcontainers + vitest: + specifier: ^1.4.0 + version: 1.6.0(@types/node@20.14.14) + internal-packages/otlp-importer: dependencies: long: @@ -951,7 +979,7 @@ importers: version: 3.3.7 redlock: specifier: 5.0.0-beta.2 - version: 5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu) + version: 5.0.0-beta.2 typescript: specifier: ^5.5.4 version: 5.5.4 @@ -23137,6 +23165,10 @@ packages: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} dev: true + /node-abort-controller@3.1.1: + resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} + dev: false + /node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -25844,6 +25876,13 @@ packages: redis-errors: 1.2.0 dev: false + /redlock@5.0.0-beta.2: + resolution: {integrity: sha512-2RDWXg5jgRptDrB1w9O/JgSZC0j7y4SlaXnor93H/UJm/QyDiFgBKNtrh0TI6oCXqYSaSoXxFh6Sd3VtYfhRXw==} + engines: {node: '>=12'} + dependencies: + node-abort-controller: 3.1.1 + dev: false + /reduce-css-calc@2.1.8: resolution: {integrity: sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==} dependencies: From 5851fd318325ae511a27c07a3482255af39bf70b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 21 Oct 2024 13:25:52 +0100 Subject: [PATCH 048/485] =?UTF-8?q?We=20don=E2=80=99t=20need=20a=20message?= =?UTF-8?q?=20queue,=20we=E2=80=99re=20going=20to=20just=20read=20from=20t?= =?UTF-8?q?he=20snapshots?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal-packages/message-queue/README.md | 7 - internal-packages/message-queue/package.json | 24 - internal-packages/message-queue/src/index.ts | 2 - .../message-queue/src/queue.test.ts | 329 -------------- internal-packages/message-queue/src/queue.ts | 418 ------------------ internal-packages/message-queue/tsconfig.json | 25 -- .../message-queue/vitest.config.ts | 8 - 7 files changed, 813 deletions(-) delete mode 100644 internal-packages/message-queue/README.md delete mode 100644 internal-packages/message-queue/package.json delete mode 100644 internal-packages/message-queue/src/index.ts delete mode 100644 internal-packages/message-queue/src/queue.test.ts delete mode 100644 internal-packages/message-queue/src/queue.ts delete mode 100644 internal-packages/message-queue/tsconfig.json delete mode 100644 internal-packages/message-queue/vitest.config.ts diff --git a/internal-packages/message-queue/README.md b/internal-packages/message-queue/README.md deleted file mode 100644 index e34e410135..0000000000 --- a/internal-packages/message-queue/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Blocking message queue - -A First-In-First-Out message queue that uses Redis. - -You can add messages to the queue with a `key` and `value`. The `key` is used to group messages together. - -When consuming messages you pass in an array of `keys` to pull messages from. Messages are returned in the order they were added to the queue. If there are no messages for the keys, it will block other consumers for those keys until the timeout is hit. This makes it easy to use as a message queue. diff --git a/internal-packages/message-queue/package.json b/internal-packages/message-queue/package.json deleted file mode 100644 index 558d7298b4..0000000000 --- a/internal-packages/message-queue/package.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "name": "@internal/message-queue", - "private": true, - "version": "0.0.1", - "main": "./src/index.ts", - "types": "./src/index.ts", - "type": "module", - "dependencies": { - "@opentelemetry/api": "^1.9.0", - "@trigger.dev/core": "workspace:*", - "ioredis": "^5.3.2", - "nanoid": "^5.0.7", - "typescript": "^5.5.4", - "zod": "3.22.3" - }, - "devDependencies": { - "@internal/testcontainers": "workspace:*", - "vitest": "^1.4.0" - }, - "scripts": { - "typecheck": "tsc --noEmit", - "test": "vitest" - } -} diff --git a/internal-packages/message-queue/src/index.ts b/internal-packages/message-queue/src/index.ts deleted file mode 100644 index a5893efc83..0000000000 --- a/internal-packages/message-queue/src/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export * from "./queue"; -export * from "./worker"; diff --git a/internal-packages/message-queue/src/queue.test.ts b/internal-packages/message-queue/src/queue.test.ts deleted file mode 100644 index 33fd104d36..0000000000 --- a/internal-packages/message-queue/src/queue.test.ts +++ /dev/null @@ -1,329 +0,0 @@ -import { redisTest } from "@internal/testcontainers"; -import { describe } from "node:test"; -import { expect } from "vitest"; -import { z } from "zod"; -import { MessageQueue } from "./queue.js"; -import { Logger } from "@trigger.dev/core/logger"; - -describe("MessageQueue", () => { - redisTest("publish/consume", { timeout: 20_000 }, async ({ redisContainer }) => { - const queue = new MessageQueue({ - name: "test-1", - schema: { - test: z.object({ - value: z.number(), - }), - }, - redisOptions: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - }, - logger: new Logger("test", "log"), - }); - - try { - await queue.publish({ - key: "a", - valueType: "test", - value: { value: 1 }, - visibilityTimeoutMs: 2000, - }); - await queue.publish({ - key: "a", - valueType: "test", - value: { value: 2 }, - visibilityTimeoutMs: 2000, - }); - await queue.publish({ - key: "b", - valueType: "test", - value: { value: 3 }, - visibilityTimeoutMs: 2000, - }); - await queue.publish({ - key: "b", - valueType: "test", - value: { value: 4 }, - visibilityTimeoutMs: 2000, - }); - //todo - // expect(await queue.size()).toBe(4); - - const items = await queue.consume({ keys: ["a", "b"], count: 10 }); - expect(items.length).toBe(2); - expect(items).toEqual({ - id: "1", - job: "test", - item: { value: 1 }, - visibilityTimeoutMs: 2000, - attempt: 0, - }); - expect(await queue.size()).toBe(1); - expect(await queue.size({ includeFuture: true })).toBe(2); - - // await queue.ack(first.id); - // expect(await queue.size({ includeFuture: true })).toBe(1); - - // const [second] = await queue.dequeue(1); - // expect(second).toEqual({ - // id: "2", - // job: "test", - // item: { value: 2 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // await queue.ack(second.id); - // expect(await queue.size({ includeFuture: true })).toBe(0); - } finally { - await queue.close(); - } - }); - - // redisTest("no items", { timeout: 20_000 }, async ({ redisContainer }) => { - // const queue = new SimpleQueue({ - // name: "test-1", - // schema: { - // test: z.object({ - // value: z.number(), - // }), - // }, - // redisOptions: { - // host: redisContainer.getHost(), - // port: redisContainer.getPort(), - // password: redisContainer.getPassword(), - // }, - // logger: new Logger("test", "log"), - // }); - - // try { - // const missOne = await queue.dequeue(1); - // expect(missOne).toEqual([]); - - // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); - // const [hitOne] = await queue.dequeue(1); - // expect(hitOne).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // const missTwo = await queue.dequeue(1); - // expect(missTwo).toEqual([]); - // } finally { - // await queue.close(); - // } - // }); - - // redisTest("future item", { timeout: 20_000 }, async ({ redisContainer }) => { - // const queue = new SimpleQueue({ - // name: "test-1", - // schema: { - // test: z.object({ - // value: z.number(), - // }), - // }, - // redisOptions: { - // host: redisContainer.getHost(), - // port: redisContainer.getPort(), - // password: redisContainer.getPassword(), - // }, - // logger: new Logger("test", "log"), - // }); - - // try { - // await queue.enqueue({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // availableAt: new Date(Date.now() + 50), - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // const miss = await queue.dequeue(1); - // expect(miss).toEqual([]); - - // await new Promise((resolve) => setTimeout(resolve, 50)); - - // const [first] = await queue.dequeue(); - // expect(first).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - // } finally { - // await queue.close(); - // } - // }); - - // redisTest("invisibility timeout", { timeout: 20_000 }, async ({ redisContainer }) => { - // const queue = new SimpleQueue({ - // name: "test-1", - // schema: { - // test: z.object({ - // value: z.number(), - // }), - // }, - // redisOptions: { - // host: redisContainer.getHost(), - // port: redisContainer.getPort(), - // password: redisContainer.getPassword(), - // }, - // logger: new Logger("test", "log"), - // }); - - // try { - // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 1_000 }); - - // const [first] = await queue.dequeue(); - // expect(first).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 1_000, - // attempt: 0, - // }); - - // const missImmediate = await queue.dequeue(1); - // expect(missImmediate).toEqual([]); - - // await new Promise((resolve) => setTimeout(resolve, 1_000)); - - // const [second] = await queue.dequeue(); - // expect(second).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 1_000, - // attempt: 0, - // }); - // } finally { - // await queue.close(); - // } - // }); - - // redisTest("dequeue multiple items", { timeout: 20_000 }, async ({ redisContainer }) => { - // const queue = new SimpleQueue({ - // name: "test-1", - // schema: { - // test: z.object({ - // value: z.number(), - // }), - // }, - // redisOptions: { - // host: redisContainer.getHost(), - // port: redisContainer.getPort(), - // password: redisContainer.getPassword(), - // }, - // logger: new Logger("test", "log"), - // }); - - // try { - // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); - // await queue.enqueue({ id: "2", job: "test", item: { value: 2 }, visibilityTimeoutMs: 2000 }); - // await queue.enqueue({ id: "3", job: "test", item: { value: 3 }, visibilityTimeoutMs: 2000 }); - - // expect(await queue.size()).toBe(3); - - // const dequeued = await queue.dequeue(2); - // expect(dequeued).toHaveLength(2); - // expect(dequeued[0]).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - // expect(dequeued[1]).toEqual({ - // id: "2", - // job: "test", - // item: { value: 2 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // expect(await queue.size()).toBe(1); - // expect(await queue.size({ includeFuture: true })).toBe(3); - - // await queue.ack(dequeued[0].id); - // await queue.ack(dequeued[1].id); - - // expect(await queue.size({ includeFuture: true })).toBe(1); - - // const [last] = await queue.dequeue(1); - // expect(last).toEqual({ - // id: "3", - // job: "test", - // item: { value: 3 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // await queue.ack(last.id); - // expect(await queue.size({ includeFuture: true })).toBe(0); - // } finally { - // await queue.close(); - // } - // }); - - // redisTest("Dead Letter Queue", { timeout: 20_000 }, async ({ redisContainer }) => { - // const queue = new SimpleQueue({ - // name: "test-dlq", - // schema: { - // test: z.object({ - // value: z.number(), - // }), - // }, - // redisOptions: { - // host: redisContainer.getHost(), - // port: redisContainer.getPort(), - // password: redisContainer.getPassword(), - // }, - // logger: new Logger("test", "log"), - // }); - - // try { - // // Enqueue an item - // await queue.enqueue({ id: "1", job: "test", item: { value: 1 }, visibilityTimeoutMs: 2000 }); - // expect(await queue.size()).toBe(1); - // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); - - // // Move item to DLQ - // await queue.moveToDeadLetterQueue("1", "Test error message"); - // expect(await queue.size()).toBe(0); - // expect(await queue.sizeOfDeadLetterQueue()).toBe(1); - - // // Attempt to dequeue from the main queue should return empty - // const dequeued = await queue.dequeue(1); - // expect(dequeued).toEqual([]); - - // // Redrive item from DLQ - // await queue.redriveFromDeadLetterQueue("1"); - // expect(await queue.size()).toBe(1); - // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); - - // // Dequeue the redriven item - // const [redrivenItem] = await queue.dequeue(1); - // expect(redrivenItem).toEqual({ - // id: "1", - // job: "test", - // item: { value: 1 }, - // visibilityTimeoutMs: 2000, - // attempt: 0, - // }); - - // // Acknowledge the item - // await queue.ack(redrivenItem.id); - // expect(await queue.size()).toBe(0); - // expect(await queue.sizeOfDeadLetterQueue()).toBe(0); - // } finally { - // await queue.close(); - // } - // }); -}); diff --git a/internal-packages/message-queue/src/queue.ts b/internal-packages/message-queue/src/queue.ts deleted file mode 100644 index 8dd0fa65e7..0000000000 --- a/internal-packages/message-queue/src/queue.ts +++ /dev/null @@ -1,418 +0,0 @@ -import { Logger } from "@trigger.dev/core/logger"; -import Redis, { type Callback, type RedisOptions, type Result } from "ioredis"; -import { z } from "zod"; - -export interface MessageCatalogSchema { - [key: string]: z.ZodFirstPartySchemaTypes | z.ZodDiscriminatedUnion; -} - -export type MessageCatalogKey = keyof TMessageCatalog; -export type MessageCatalogValue< - TMessageCatalog extends MessageCatalogSchema, - TKey extends MessageCatalogKey, -> = z.infer; - -export class MessageQueue { - name: string; - private redis: Redis; - private schema: TMessageCatalog; - private logger: Logger; - - constructor({ - name, - schema, - redisOptions, - logger, - }: { - name: string; - schema: TMessageCatalog; - redisOptions: RedisOptions; - logger?: Logger; - }) { - this.name = name; - this.redis = new Redis({ - ...redisOptions, - keyPrefix: `{fifoqueue:${name}:}`, - retryStrategy(times) { - const delay = Math.min(times * 50, 1000); - return delay; - }, - maxRetriesPerRequest: 3, - }); - this.#registerCommands(); - this.schema = schema; - - this.logger = logger ?? new Logger("SimpleQueue", "debug"); - - this.redis.on("error", (error) => { - this.logger.error(`Redis Error for fifo queue ${this.name}:`, { queue: this.name, error }); - }); - - this.redis.on("connect", () => { - this.logger.log(`Redis connected for fifo queue ${this.name}`); - }); - - this.redis.on("reconnecting", () => { - this.logger.warn(`Redis reconnecting for fifo queue ${this.name}`); - }); - - this.redis.on("close", () => { - this.logger.warn(`Redis connection closed for fifo queue ${this.name}`); - }); - } - - async publish({ - key, - valueType, - value, - attempt = 0, - visibilityTimeoutMs = 10_000, - }: { - key: string; - valueType: MessageCatalogKey; - value: MessageCatalogValue>; - attempt?: number; - visibilityTimeoutMs?: number; - }): Promise { - try { - const serializedValue = JSON.stringify({ value, valueType, attempt, visibilityTimeoutMs }); - - const result = await this.redis.rpush(key, serializedValue); - if (result <= 0) { - throw new Error("publish operation failed"); - } - } catch (e) { - this.logger.error(`MessageQueue ${this.name}.enqueue(): error enqueuing`, { - queue: this.name, - error: e, - key, - valueType, - value, - attempt, - }); - throw e; - } - } - - /** - Consume messages with the passed in keys. - This will hold a connection open up until the timeout (in seconds) if there are no messages yet. - If the message isn't confirmed to have been read in the visibility timeout, it will be reattempted. - */ - async consume({ - keys, - timeout = 10, - count = 10, - }: { - keys: string[]; - timeout?: number; - count?: number; - }): Promise< - Array<{ - key: string; - valueType: MessageCatalogKey; - value: MessageCatalogValue>; - visibilityTimeoutMs: number; - attempt: number; - }> - > { - try { - const results = await this.redis.dequeueItems(keys.length, ...keys, timeout, count); - const parsed = JSON.parse(results); - - const dequeuedItems = []; - - for (const [key, serializedItem] of parsed) { - const parsedItem = JSON.parse(serializedItem); - if (typeof parsedItem.valueType !== "string") { - this.logger.error(`Invalid item in queue`, { queue: this.name, key, item: parsedItem }); - continue; - } - - const schema = this.schema[parsedItem.valueType]; - - if (!schema) { - this.logger.error(`Invalid item in queue, schema not found`, { - queue: this.name, - key, - item: parsedItem, - }); - continue; - } - - const validatedItem = schema.safeParse(parsedItem.item); - - if (!validatedItem.success) { - this.logger.error("Invalid item in queue", { - queue: this.name, - id: key, - item: parsedItem, - errors: validatedItem.error, - attempt: parsedItem.attempt, - }); - continue; - } - - const visibilityTimeoutMs = parsedItem.visibilityTimeoutMs as number; - // const invisibleUntil = now + visibilityTimeoutMs; - - // await this.redis.zadd(`queue`, invisibleUntil, id); - - dequeuedItems.push({ - key, - valueType: parsedItem.valueType, - value: validatedItem.data, - visibilityTimeoutMs, - attempt: parsedItem.attempt ?? 0, - }); - } - - return dequeuedItems; - } catch (e) { - this.logger.error(`SimpleQueue ${this.name}.dequeue(): error dequeuing`, { - queue: this.name, - error: e, - count, - }); - throw e; - } - } - - async ack(id: string): Promise { - try { - await this.redis.ackItem(`queue`, `items`, id); - } catch (e) { - this.logger.error(`SimpleQueue ${this.name}.ack(): error acknowledging item`, { - queue: this.name, - error: e, - id, - }); - throw e; - } - } - - async size({ includeFuture = false }: { includeFuture?: boolean } = {}): Promise { - try { - if (includeFuture) { - // If includeFuture is true, return the total count of all items - return await this.redis.zcard(`queue`); - } else { - // If includeFuture is false, return the count of items available now - const now = Date.now(); - return await this.redis.zcount(`queue`, "-inf", now); - } - } catch (e) { - this.logger.error(`SimpleQueue ${this.name}.size(): error getting queue size`, { - queue: this.name, - error: e, - includeFuture, - }); - throw e; - } - } - - async moveToDeadLetterQueue(id: string, errorMessage: string): Promise { - try { - const result = await this.redis.moveToDeadLetterQueue( - `queue`, - `items`, - `dlq`, - `dlq:items`, - id, - errorMessage - ); - - if (result !== 1) { - throw new Error("Move to Dead Letter Queue operation failed"); - } - } catch (e) { - this.logger.error( - `SimpleQueue ${this.name}.moveToDeadLetterQueue(): error moving item to DLQ`, - { - queue: this.name, - error: e, - id, - errorMessage, - } - ); - throw e; - } - } - - async sizeOfDeadLetterQueue(): Promise { - try { - return await this.redis.zcard(`dlq`); - } catch (e) { - this.logger.error(`SimpleQueue ${this.name}.dlqSize(): error getting DLQ size`, { - queue: this.name, - error: e, - }); - throw e; - } - } - - async redriveFromDeadLetterQueue(id: string): Promise { - try { - const result = await this.redis.redriveFromDeadLetterQueue( - `queue`, - `items`, - `dlq`, - `dlq:items`, - id - ); - - if (result !== 1) { - throw new Error("Redrive from Dead Letter Queue operation failed"); - } - } catch (e) { - this.logger.error( - `SimpleQueue ${this.name}.redriveFromDeadLetterQueue(): error redriving item from DLQ`, - { - queue: this.name, - error: e, - id, - } - ); - throw e; - } - } - - async close(): Promise { - await this.redis.quit(); - } - - #registerCommands() { - this.redis.defineCommand("dequeueItems", { - numberOfKeys: 0, - lua: ` - local numKeys = tonumber(ARGV[1]) - local keys = {} - for i = 2, numKeys + 1 do - table.insert(keys, ARGV[i]) - end - local timeout = tonumber(ARGV[numKeys + 2]) - local count = tonumber(ARGV[numKeys + 3]) - - local result = redis.call('BLMPOP', timeout, numKeys, unpack(keys), 'LEFT', 'COUNT', count) - - if not result then - return '[]' - end - - local key = result[1] - local items = result[2] - local dequeued = {} - - for i = 1, #items do - table.insert(dequeued, {key, items[i]}) - end - - return cjson.encode(dequeued) - `, - }); - - this.redis.defineCommand("ackItem", { - numberOfKeys: 2, - lua: ` - local queue = KEYS[1] - local items = KEYS[2] - local id = ARGV[1] - - redis.call('ZREM', queue, id) - redis.call('HDEL', items, id) - - return 1 - `, - }); - - this.redis.defineCommand("moveToDeadLetterQueue", { - numberOfKeys: 4, - lua: ` - local queue = KEYS[1] - local items = KEYS[2] - local dlq = KEYS[3] - local dlqItems = KEYS[4] - local id = ARGV[1] - local errorMessage = ARGV[2] - - local item = redis.call('HGET', items, id) - if not item then - return 0 - end - - local parsedItem = cjson.decode(item) - parsedItem.errorMessage = errorMessage - - redis.call('ZREM', queue, id) - redis.call('HDEL', items, id) - - redis.call('ZADD', dlq, redis.call('TIME')[1], id) - redis.call('HSET', dlqItems, id, cjson.encode(parsedItem)) - - return 1 - `, - }); - - this.redis.defineCommand("redriveFromDeadLetterQueue", { - numberOfKeys: 4, - lua: ` - local queue = KEYS[1] - local items = KEYS[2] - local dlq = KEYS[3] - local dlqItems = KEYS[4] - local id = ARGV[1] - - local item = redis.call('HGET', dlqItems, id) - if not item then - return 0 - end - - local parsedItem = cjson.decode(item) - parsedItem.errorMessage = nil - - redis.call('ZREM', dlq, id) - redis.call('HDEL', dlqItems, id) - - redis.call('ZADD', queue, redis.call('TIME')[1], id) - redis.call('HSET', items, id, cjson.encode(parsedItem)) - - return 1 - `, - }); - } -} - -declare module "ioredis" { - interface RedisCommander { - dequeueItems( - numKeys: number, - ...args: [...keys: string[], timeout: number, count: number] - ): Result; - - ackItem( - queue: string, - items: string, - id: string, - callback?: Callback - ): Result; - - redriveFromDeadLetterQueue( - queue: string, - items: string, - dlq: string, - dlqItems: string, - id: string, - callback?: Callback - ): Result; - - moveToDeadLetterQueue( - queue: string, - items: string, - dlq: string, - dlqItems: string, - id: string, - errorMessage: string, - callback?: Callback - ): Result; - } -} diff --git a/internal-packages/message-queue/tsconfig.json b/internal-packages/message-queue/tsconfig.json deleted file mode 100644 index 766df37eae..0000000000 --- a/internal-packages/message-queue/tsconfig.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "compilerOptions": { - "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable"], - "module": "CommonJS", - "moduleResolution": "Node", - "moduleDetection": "force", - "verbatimModuleSyntax": false, - "types": ["vitest/globals"], - "esModuleInterop": true, - "forceConsistentCasingInFileNames": true, - "isolatedModules": true, - "preserveWatchOutput": true, - "skipLibCheck": true, - "noEmit": true, - "strict": true, - "paths": { - "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], - "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], - "@trigger.dev/core": ["../../packages/core/src/index"], - "@trigger.dev/core/*": ["../../packages/core/src/*"] - } - }, - "exclude": ["node_modules"] -} diff --git a/internal-packages/message-queue/vitest.config.ts b/internal-packages/message-queue/vitest.config.ts deleted file mode 100644 index 4afd926425..0000000000 --- a/internal-packages/message-queue/vitest.config.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { defineConfig } from "vitest/config"; - -export default defineConfig({ - test: { - include: ["**/*.test.ts"], - globals: true, - }, -}); From de9355173ae6dfd0d22de6038e25a0dfd132cf34 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 21 Oct 2024 14:36:07 +0100 Subject: [PATCH 049/485] Addedf the diagram to the readme --- internal-packages/run-engine/README.md | 7 +++++++ .../run-engine/execution-states.png | Bin 0 -> 521887 bytes 2 files changed, 7 insertions(+) create mode 100644 internal-packages/run-engine/execution-states.png diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index a8d535b297..230803f835 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -6,6 +6,13 @@ It is responsible for: - Creating and updating runs as they progress. - Operating the run queue, including handling concurrency. +- Mutating the state of runs. + +## Run execution + +The execution of a run is stored in the `TaskRunExecutionSnapshot` table in Postgres. + +![The execution states](./execution-states.png) ## Components diff --git a/internal-packages/run-engine/execution-states.png b/internal-packages/run-engine/execution-states.png new file mode 100644 index 0000000000000000000000000000000000000000..cc156dd7de74c51c61275d3bbfac8b43820eb2b2 GIT binary patch literal 521887 zcmeFabyQVb7e1^g1}b_lr68apsenp1>P0#Pr9(wHgmgEkSd`F(U9D#ovG?!4b+O>0 zlKk!R4SVhJ?t=j>Hr*oAeojpB1$st%W1dK`B|g_6e?lJoTF~g3^ppHWgLFMMru1NS zHa(*Q@q^?nu}=?q97?|&`bdgsHqq}8laU7{+eZD;Ds5ze)zk$${mHI^FJpGZ;zdhE zrNSco%^^HdddfN*(%6GXcN6SAaO{+*`>ua^VM4BYO*NtJ5&8e{Eb_+ZzZ`3wIrG1~ z8J=c(>Hb#k#VOXS|9aX#KUP$=O5opr@y|!2B?vz9i`~yY{4ba5=fg1D+tgG4cBTJ# z)Z@&-17oBI8!v4Q^)ChY(586L|M=*$v!_y(&PsgwAEgC?CjK8E{ZG*UUc>(r^uJoO z|914hH)a3r=zq0l|C#Q8?|A<6(f@AE{`1lQJ`nvc82eXi_FpjeuL6soH~$xmA^ek5f1QL=z>@N8?6AV?!i#By9S9tsnW?joezbhuN~e{jH6bBOLA zQ^4kj@Lt(~cKU;OX^TQ6!N}w0$Fz%0vx4DDzuaOI(h~RMFY>=%4e?=skNM+VJn{4#byFqTcSy z5j9DrWlu{ijRs=(N-;;DQ)kYZ5Od$(c%8w=}r>(h~L>F|Qz7un8dlJc7r*u>qZCWL%Z3Vagx?=!j2gd52x)uHN zn*GH_D_q(w!l`=6hzhsMo8P*N*tMC@Xhq-)3jZ6K4uK@!F+I47ujJ9(d)FIonKZYv z;`EMR`F&K8Kr3bv|6#oEg0J~YSP~%f{^*fF?~ZM;ah?BO-_51ztn!b0q2>x~h4VGv zkKw;3@d)x^R5rHKTg&RAR38fTzItkDLNQA$-`~>VS5XC)wtp0+3mOGQFr>h1IH5gf@73Qe97@EUMLeS1>P_@ z_IWxkL{rEGkE*9ZxgY0I@oxV)rjZ1MEsvf}B;$MZVFSeNrn_(8qNrpoxUJ*dxGVl` z_+yFEAAl)ec@_Qwf66z{Kp~6X)B4C(JmK*ULJsE6M&9Fpi7ftjPT?v{nd`2WC1~AqTz{{Ti z0=~2)jqO>kUQ)YxD}V1lc`4E82jGAB5A@{XJH$jW@FnW0g#tKV5}*hpCvdIt5KlKh zcA-pfvG;C}H;%2tcWzrQ?Wes;z7CF-GG$Z_e_SRd821vZw=ND+y5S?5`H{bZfcfma z|6(#NB@O-4G4}g8msvoq1w%#AzPcT56+I;mh>!HPQ;v2aob|L(jE)l?Z_G&-NAE6d zF=;G)%<^?~;X}lu50Tk9=|c1@?k2cM<4+eJ^x&|y_ie=wKjraPz%Pbi zKO^b+m+Zy0tigt)J8;NVk4B~gJnCKYDR((jH9Cxe76~bq~ zv@%;~T&d(Dp5`PQPCB|7VEmm?C!f(p8S8MGymH3?TPuo4G=~mCAb}iPCHzmYMq-o4 zLGO3tzBXAiy6Ypq$&d7-lbpoU0mg3`3GD{aAvygu9|cX4O8-d;(eD79VkvT!!^M&y zVvlD?y&%z&@BG#Wh!PUrP*P2xUq7-(=m@q%_?v(0=y1Bw%16Nw&BD3t*v3ly(0j{q zB16?2gQ2|n`RKQXLcZ;<(V*V>3+r#A1|KTM%S9dsOrNPwDQMGR;9HJ(aKikQ2lWme z*&bi?|5S$_+B};I%P^LAsftvz9ZQ;e&tZp7t)(BsW}yWuG78B4X?zV>o(>iRVf@)2 zAUzKvx$w$kTeH>aVS}*ba?97WB{32@@?Wa;XM1&D5SE@TgAga9ofo2BFt)mkKEaN$ zvz0-iax4dNJM|co4E*Inf}$Tw&;|MObipfvaju8P#kUt>hqfRAvviBC$_{OL3)-@u z;{76S!Z+FBwgP%NkH|+6_KK*;`D{D8NYW0&f)aD9Km zj-A|cD3?_j`x%Wqa&N?NR4A+LQN|hg19Nwxyz=2`}z- zY@Ict{^}$CBq0#PYlpv{xT%ZUAZwx`)V*U(^v;2TxcqYG_9P;KEl^v{v^_Og6N4Dg1ghP#+hS30hrjHWcob0LzDvG6 z==1KsH%rkcU@{p8aFanU0)7MSJ6p?*3k(u1U{xvIb|){d5(wVZ)KvLfY(SI@QS#f@ zwRY412bf@WeQ#PYY|Yoh2tFVw_gP_m5oFH(X{8kre^B|d z^-sh9F{}p*IZUm$w~!KlJ!&LjIl_~FoX5Sqg$2GT?UhYNE)nPW$0oD)-*ef5LNfNq zUq8-zWv8@$x6>ZUNY+#Hu8?%c!16Hgx>4_MFffyf$4wP2F7BRur{)%u<61?otfi|h zY?&QKDPjq7I#1*x_$w$%r#)3&SIBxxmU_I#GRoLY%Qk#1OQedzjobBteAu)KKRJ%2 zQKV#)s-Mlcsj_h=wb(LYxvh-6GFH;Rw~hyMFQ|l7%3X8?0WjQr!rwaYrQB!#?5k%& z>6+XsYV{vh-=hPmW_5QI@!?Ni3Rwe1)39HLk9m~3tW_Rl3_nI?s*1ebF*U#U*-SNK z)TcLMUcDdo(|Tv2`2_~+!rD%wGz{Zj^$y4Q`JTSI=)Ht-6Va~?{$(iAN!xk($75Fh zdj7Fgk{IzQ+AyGxDsqi`G+ITrb;SZ-&S%MZjb5@EtUb*0nL(UA2xZ^B->{e)y91GcC-$-=h?hp3Y|6=_j(u>nk{$ zm_JpzMkI{>^pxa{-BgcsW!dz{Lz!dg7-oCdxWiqy9vtaiXw#{Gs4y$EXTK}YH6Kc{ zvGgKS4lbSJE^DrIW_3#9;btyl8e(+q1~HQ%<}%VzIj!pFY`ZKTt6J6N+-Xf!o0)Ht z=UMShe^YK$A& z5{oXY_eg}COeu|1XB_OAJku22nS_|F7eHn1_B?=T2%8qqR5D;5bJI4%=ab%qMNAAQ zr+fYjEFx;O|7i%94Q9r2JHEl&*3pIF(vyoyP4S}}k9SD*KTMJLfCo7+KF)NS7MZ{s z#w-S2sZ?|oSY-`jdnqWYw4EkyPKScpJT-GG(m^jzb{AD8B}D$JaW7M>HcCXqb>)i= z?5QQv>`C|CdwRb=cq}&Bc*%9$#A0Ki4OJ(7`prla_N84K={3pQF(DjL#drHMPCB;XbdOu*B1nGvi;#5O+#NH$m8C^^-}S54*kJmHj<#*bR%% zr|FQ5`oy%mkc>aA-2xSEq3=z_!^UCS5ba?x{Q7aFewaz%h+|75qoG?OR7u?WZm zb#!Q}8c7z9zU`g>RGqZa;7NhBc#a)dHBPVfSixkp4p{Kw>+}sj7-XTjQPxQO*0;B` zm1?Mnoc@o@-!54XH`vdAt-93B!ysQOlAs}bnjh?hsisX!i`rF2*+K!wh0&b;EYi~D zA4eZo>^UmXviRWHPdlHv2TX*{766tgaQ4uz_-X}Qpt5HHui{4ICtmnONe%ALBoRt{Q z!!%rZq+c0s-}}^}_M$ku&HA~F;$HXk401pQH-p*+l5c$P_jjWdr?$D# zI-wY?my(w2`;>(7x2c{I4VyXA_k6?AwEA@s94K44_x)f!eW%>qM%<)Vt{xQvmt|Y0 ze$M6r-*@*tLC&q3s*Fd%dCau$Wywax^$ayiv-=1wt*`IDFc#Hp5}R&XuFOYj)hKl? zjkHb6PU~J?cP=4o5!kVB)=jcjm0;FsIl6sjyJrG5OquOBDi2|7+O#a1uZz^I^8V!b z1TMzj`nsRi`Pfv+l6LXhWKrg=5BF8ye(zQ-UdCkF&-6+x*)O)E`d}}eAMP=1;2R33 z-cN0?LT*pPsgG;yl5sdZY;Whr95e*Vip@TUfl3~H0h^2!dGcp@?h_)2dibF2SzlkTS{ zzwqTn3OZOGK7Juu(uee22>*h|aF0vOH%PU8)Yh_rfPLn*B*7f!*oo8gqs;9@Y zNCm%{m-~cvB_y#i`UtqiPG2Ru%(>J*w%lZnc3JAC3cL4mgC{m&`g5R0E?ahUmjls; zYnVxH^1`HzaMo6A*2tX^h5e@VB}J0CXX zojxBPUkFMB5(YU?!W4V<52ibPyZ!hYQ5mOeylfb`IIW9J@f=!!wNT0Gfs$QFvtE1y zYIA)hbD_1f|1h>OsFYM-B3tQR&dn+CDO`PCI_q~*!JB+7aavM!S!l7UEL$B-krtNz z5X#eI&!O!&QnB2U;Yw`=w)^E2KRExPa4WgPC*GmZl+5AQZCY;6kWD<(WmT_tMapxv ze{JJt;^k6{Bx89?W@~KjxE|YX0#+pIE(kfXSBhSNwnuVpf$HtN!fGS>2K+-8q5UNH zbw1vaDByB#btJ4OIDd6VNFBWZVo%HC@D~+7NZR-!{ztnu81`?dDB4f3>)-YFfwG<| zv>6Mu@oc8oo2$zz$`hQc#aLb;WYb^g{W2nT#ePw(oWR63GywJe`RS{@j3Ru@O|KImmI&0cZ6{|9;+Kq@qnOpSstf+67o_#>QEv8{ z;wX1OAu*S0r7txVDv~exxo*EV-&C$BnEY|Ja^j$mIZu8VIl4W&5m4r>18ZeDk3 z5ZB`C$28vQT1EOUf&9776UOaPTqn&T1g(JihGq@@cuY&<&(NLKKNl(FR9UjLiE-0b zPhu-upUt2C=rv0mBCs?N9$mFO)7Q7$CqMjlbm&lJGcCi)#+VNFS#veL&3(h=^|>gl zcZQowR=5>=UYF%X6ngx=k?T-|U8SGvJch8vd9XI*qWSme@JxA;jT@<%m6U2x^^ro1 zs|#Z)oJ|xu>*A@|jZ(7dK?LIasprdi$KRIs?icySh|)|pOSU>K?^xfs);cZO?WIT& zb0rEq2vbu;;tzkL_LR0Z-5(!4J=y#@Otb2^y{yoD0**`QiM=^<=smp^6gss62bLxq%pAh#{SEcUU5hIpxE~DlUi|G&hhhz-9CYX+VbDpc~ zgG8YAgj1s=V+CtompWp+RB@MNsqN+#U zHZ>ko)Iggc76%ev!;qk_duz0!Rew-N*wvIYqVdDiEnOGQL2wievF;uAM-R!k;yo?S z6Vx5(f=|fYSd_J^4;FTDJbYqShcD$Y)nYmCSZda6dQaXI50dZIu1;oF(fB}oFEW;e zf!!u3>onR)HNcL2v+FO#+ z+LcGRznzMgb>dBEl2`9*)iCoRk*_5V>auR~$t))pxSK1HzOlOCr=smZKNZ+lg+0Bx zIMLZ}5lZBHm{wR2EjZ_0JXbp7V*);+=kw}hhjEn{c5@?3cx^nZ{t}mbxBIi1lMeka zX0oCp!tIhs_ep<9SJ82?sa{$soURSNNVnK!C2_gRVI+=1w}MQimQujDH5D;Hhe9z4 z@=^H{xm|3+A}iJMy@O$9%mzMjM`|d{r3WDz(HQOMtMu`SuP+U>zVn<3y`ZZZX zo5oM1@1$<0ILjwHO!a6~eDpk&RVuvHWz`EoaIL@)r0U)!iyZ<(Mb00B9}Q)sqy3q5V=?cNKX;oa?9Duxrch^>Wp# zMwQ+pnTs99C>L%NI(&ICZ|ylPx7`*OfJ^0N?NUbZtO#EpjqCD{Z*JH!>))izYi!BiYr>0>=qauEv#4_GdrfUQZvK``6ILLC9|IO{Po!{be_YrLR+D6 z{c7@aV!pEFii*rOZKr|oF=L%E^@fteXGEj3!70ZrXeH4j+Qr=6V1<*B8J6-jP_6IbYuShVtGXrzx#jtqkq3%A7WBDxJ;QpgCXc zkO=mKhDnYMV!(#hoT()T(GgSv6Z2^d@@Az&X8|@O;Sn-zjZ??NL}47Mw}*uTxVlc z#VS7Dy@nxdzINn0hh9yTZj8^nBEC3cnd+PZh$32_BDpKaz4hf8i}e{sP9CATjTk@S zmj@ZSToF4fqkvi&2r{#cm|rvQ^Kts^-M#fL2-bDISl{h2_w1VT6)^Qn%j@AcF37IG zMrC8RD7X)(E{JPx4C{O$%16{}hZ*6yW$xv-G#gHF2f*qVn<1_aIM=@8B6I_}O-;3fjIhCqU}k568DQ-|UkO^oS<)G0Iz4FDJ= zl4cIh-IxP|%C0DylEm0@=7cbIeU9&{E#M-@IR^7ggpJj|!{ncCEK-GLwOW2*rN}~+ zk$zz-x`(z~DIATx{D&zG;}Z6aa-9Db+n!}wvY>2HMQzvbUDW&R+@1O1L!l8-`}z;v zCgaTf8V#wKxgeMeJ|(4KCeLS-!k*T|j;kXHcYL^7KZh=X;TDS$ZdXp@OW^}3MF#?? z<#^?@3P%V&3bZJQ*=_HHZ3sL1B^XPVBz3eDzUwDKK+^BMe2%h*5NM~*3A3i{`++$j zoRZ2U`}Sc$9xa~Sft!-Aa(Cek6$*QcCV`V+kNUyAJ z7#Gf$WPH7_=?i;Vc!5gkq!@gP)B0*#Sxtz5W&h{=#SRRYQ`=2tHTp=m%>w-@E}rlH z^1@HGOG+b`ybU+9<;zhJ!(*%-o6!6xlJ^<8)w1ys0rTazLb|lp~acbX!NWh|14KQ!OBNQ-NWQ+}(vAD^_806qI0zR)=Jl+<<(yhJ3d=YSeT!Wp1OLMph4 zig4ZYZcndg*N-he%vGBn=HF_M|6PEVjG)8n=f<@0b3!H#a6FgJJ?T5fB>LU1Y)xx^ zY-xhv+9u{X55}1zyjIEl-!T-0T}L@@jD_jB za9j3|T)gw?NoqZU$uU1s!k)D2%Cvx(Z6JJQG{ubyK_EEEm^$>sgfE))6q9%7q&=t? z!4^Y+vtboD(vhPPYc74d9+HR?Q^j*~=myN@;^;Eku2kvsuhNypo2z3l!Xb^;m@Ym@ z!thC1V*yPWb%jY@aOSsorLXfN9KGNys;Mf~IvB)HTG)qM)-V%B#c=I`9h{#J#?vZZ z+xn|#CV-0l&ZV8E7%$jtx$SBDj!+&4tJamI#L3i|f1+a=Ib3dRH#2c}|=$^S#i zNHh&N%gs`8-Tt@@1Q*mmb!hrP_*Te6`9SeWlrBG zWiVKdHDw9A*T^yDs8wkF6kuB>Mu`g$=NrjMhrr%$_Kb+nsOH#{3Y9&_TIuQkzt7-m^N4x_8NJM^MGzkxiNrLXS_`u5^6o;GqZxEI_J5c<(d1 z=LAQYmm3;~3Wg&6{N?zQDt7xOM6MdgnzH*uLEVksb=E)Xu1ok8AwWQ=ZES4#`jk|e zJReaC9~Hd7g&MC7;m}3GqQ3Of)zRfiJFB7^Z}1s{5Sw1K9p_T(6M@a2n9}(+?l%*f zsQXe)@j~F%M`ipAV`I_Re4>!p*OOvXFc%Czy$+YK+pN5eLA8Ht{$OZk$gT#k80yo_ z@%LFl@b)sH*76_5IBD#`Eq?-o*Ezm^H^<=X{+Y$jIV7RlI}%BJ}%D^x!|KNsqI zDa}@0U_N@rzp#OGDfU9GCxtdr>NGJm7+mvWE4%=Rc2u!02Fkql-#zyCL1ynqBgE|s zIc`C8wZz&V1_QLrGBU01_M6bdbSD9q|;03UxB~ z3jzLkXQ4%&b10s-FtgEwn1i8v1wUyiFYEqsSmJ8 z9iwk&qfA1>?yE^3Iva;!uYNl-r-RK=Ss!38I34c11eLFluLV{^;TdwmEBy=ttE8g= z_jv)x9dh$`DMx*Kdpi>xQserdnt)D%|8%kVqDh-qsNP; zTth^!D18X6BvH&~^rUlL>%2Gvo}2q=H~|r)zgd|8ZRHjoTPcik9+B30R<*Zxi9iXC0w{#KFJ|1P&?jk^+u=6HBHz|gMr5U{cZ~=cjW3l4gRBCLT~I%*Q0P9^SLJ6m zD~ALw5026V&TYKCy}v4eS&1mQWy*D_Xt-^vlTX**cr-Ymr?XQ2QkQWCwq`wYygdt* z3YCG2V4V?6ChMb^q2?vCb<$rerL2(JE$>Qy6q)tW2M3Np`!ck?{A|x{ConlUHC)(d za%MZ#z-^W2mk1TBz!5En`x8)Rh!Dn5^c_AHk0*$?yMq)dGKbr(nh=l$2|~hcTGa?` z+LLtpp<=i6@;e$zQUO_uuT(Y}zy~(7U`ctGdzNhdz%6ncZ$9~j?hb&GR;}ZmOH?mD z3i(7?4_Pl=iEK7O4L58YI?=(V%kX=Jv1K$zN&MBL2tTM4JI{~Ix&4S(^;8emS8S{U!4D;>W!x@z4R*+*65-IVeIm4Uvax^tNBQHb5LD!g zxeX-M;^{KKnza_coB5s<2%eK~5q!I^!`xsMSk31nsR0C%I?Ep^W?TCqH#t2E$bge^!CcOe>w zV3X<<;W(}?auXSXO}_TD8V@BqF0y&_Yn~YIH;g@?!Lc0b}&4l`Aay3_$IIO&jq)tp4y5VO9~Q-J^ll2JJv_xHJCE_>RBq zyYh>5C;AU@rvcJ_(PdR<*0#;iw4@SaH_^e0ppo>nK#iCSA?7#|b!99EDf4=ET^$34OwIx^bX%Q#GAV7+0b}*?2&} zEdixvC9J>re3yqW6q`ntDS3CpX%`2ZAKO7Dr}}Fz^WDKOR@zSrtn@1&*CRS%`uE2+ z@FRSztKUp^WZ@kMi>f5F$U^VX`lw1!)e;!PROwh#MxB?5nA9V4qL=&|PrLX)4f6yCY2AR*3rxZm7kphynp5bzBI3LG8;(LV; zt@k(6>VCwhyYx-h-1{WorB;9u^_Nm<-wp(pSS<<-2FLV$B9T9D@8i4&ac09! zi~_2Q4Ofp|b7ZFy?!4E;2y7vh25nhF{ZRqw0gbbsNU9eY5^+mwn`={kQS1E)4wzT1kzWJ}_DUhuZ%R}Vq33SLg;mG%n)w$pv_=(y&fxkrrix+aRYq!93;h7HUF~`t7)=XD20@w6 zo$-7U>ImRSPdM9!7;&BFA$i#eK-YnMmRk0apJLU^=7QMpNJw`P@VqZYXW~}`aiklF z2TZQSeyN*qX1e@n)u*R!-eO+lTn_yRWd~uS=(%8lV^RfacXxmIHwKYp85wA8`Fx2mo=RX9d zm=uaUim@F|P^FUrKTwkzrbKe;Icr1UcR6**HfpmSytpk0kQ(?RO<&30ir^a|BV;pN zBbo@kNpB8;ju^y0fnA2<-!d%*Lt;WpbVo0|YtzP(txc7=-78%gL|pP->S5YGNSRGkUr%-i+<>?HL5igXZn4)z5g|;}2m|u#Bn!P>9ecFkM9)_S4^GB3hwN zs=t&>I}KHdO31+}fK5~_fE-WqA+x|r$G2!I^2T)G!XE!}aGR9fg*IQBSN+`9bx%5t zTS#jmRoBVFv2=Q^5XEJBo99xyLt6!rg!!aR^9ux9=MfOHNpkFc%Ge9#M+0sH|9iwj z*1yF&)k9jZKPVf#<@Y=PoL6AQmffdSeuLB6IauqLTbYrb?Of zl@fz3@N?!ZB#=}I9t;j`-QlG@7{EOjpWMqLf6DV7Lgqp+zb@A`YOtvDdT5!Z9CY-90jnH$!Au8yZS zvnrnA@Z*4N^AqyI%in*5z!60>HxdP;m8vjz{bd1`*?%oc*hROeSft9#&B?470!^`Q zUQZVU{YRKukoxggFp3md4i+HvMUrMmKyq|Dva5Ol`*&EMFmpR+ROcHUz`vGSFi3gP zqSr;%mV;&SUQ$yNaK+xB>S?~_7`LffRJ`hisNEO%40I%KLxf$fdASOEbz{Itu}H(Z zzZ)IMhLS2}=Y{M(XGF{;qT(8l470=}`Z+x4^2lh~!fc4<6$)x7mv-ij8Ul9J4Q$Xg)pvI#Q__tu zA`c~MgH0MuOACXShKi0BDjAs%c~$^6R&nUeMo8)59RqJ|k8EUL#aTw~;6i&Kh`Chn zr4R^NNrCpoh-4}z3;up5I2eFE@HnkBoe+T#qxlhbVph7TnEU(LgR$S`C?7mJ%xZXA z%kJmH_y+Iy@AcRRS*P~S!-fnfVA{5~a}NTx!ekBvGbBE2A7w-mvp&G2nn%afpCfCU zjO=9ou~Fr{2eKnYSEm>ogTBj+g{c2+M)W6kIx>}PFIdZ>3!M*lc@UE$CFVOBC5zsG zyB1aZ13MA>bbbLshc>L*QvJw>`4WUk8|k|0X8{}_z0LOD7pww2DQV1IVi<4poB~DH zNNbGBV3Zd%|0L--J(|Dy;J;-ZcOW32R)NVichUWa5L_N1{{Tx(-5JXJGr;xwB!XS3 zuT>!$IQ~;VqSHVg9a61@K%Tw-{`+aCyXk6DZPXtk)a^OC{)VL}Is(fa7v^yP=jPbj zZ0e^PcvI&-_O4ho$+Me$kfOn>5k^0Ef9q?9#0P0~r}CTleU7r|3kFuk3BH%L=Bu;JEor)ulu$3a?*l`lT z@1u%3AT)S8mvGJ+*c@ts6|Zs6B_oG>JbO06vaO)F-+o?*ih*+7 zuwFSCiTg&lc!ekzUyh~X*61EJNMA^8As-k1Ccz&{!3C77MMa4RKME671LX?PQ#<)= zYxL-6VCRw(gVT4?Q}CvDBO5+KEU5kmP9fnn3Vbm7E2VRcIMu}q&v&j2Zm9p3Xgh=X z|LzirYI^|dgJs{*O(US?u?hpZqcB_X z^hX52REzj}`v52DFff=j9R|n{C`h%~-opE&gCZ0{zfAaM17=_Olri#n6j1d07CYU3 z0UEL4634fnu?oKOH>iVprqLTxZ|#?#yTkyzWYwWlsDV%3@ zekAPR@e`~h-=O2+XLk!Qn~L-f*(XbS0JVc(Q6b)R9ax?<#aScui_p1}d95}=fDvIv zt+F2FH?a3|-Pbu*yZDzVfD`(9`!?_m{w@z&h+GXR^ux9SjRSBz<6F5S6quHSH!|NR zUQhT0ZB+`>k7!+&Z_kexSfm`#wCm29@iB9baZMvV+-_R5duDYkBPe?_#!v1k6jtsE zSpnm>0=QaQjr zxKtXFW5FkpN=^mft^Q&V`U@Z=?E491gyih5!gE&b<}%^D7w59twfkO(GgL4D5k99I zGR9vJ+`x(YgD}qA{c+);jC3I0l=IrrY$SRbIpcC4&Uky|B6n%%qyV^a?rjy#)jmKB zy`VAY?tNZXY9Oew_ttWVql8MKkO6b%IOFL~WcO4tb_nIHC1-DPcg z`Bt>_E!sGvguK&qEv}VUUp8%K_6z$!7mp{8qLz0ni9JvvId&I5>Vw9eXEDA;W$D~V zPYFYFn>kW`N2rE5Jz{Cw-I0GpqXyzg+VClt3^#4KFhV+x15>89PxG8WSXMMOi;%3y zMY~})FPe6lsT1~RxNR6iX~8`;2k3jRL85j&3m70!LDG?O#?x8d3Tka@!yP%stuHxH zXm98WNQgC=wHa1}d<$5SWiMNk7OIn)q(_$S!e z!u-oP_1I8O8q8{IL$HvO?Z_d=GZ~_oQkOoD{gd@8J+SM@RJ3etiV}~UwHG( zs*&@`7lBHo3bvZVRye%h0&0STsV)F>R6%Vwugi&C{d)*%uwpbNBXVJ)ipJ?YG4H9R z^Rm>vN8N%?M6GoVy^BZs*=&0pg17~o;?A#E%e!7u*0O0?hODlA`~^ZSgz_UgBo9d9 zRmBc-q0X-5M|ixU;q$lpNUExRRQHY{BpxUUGcw2oAA_dH>sd~1jSsu+Fsw+KJ^UQ4 z?YAEn4I5)$GB?&MTylkC#WUB%042Gn&}efSwR!sSNlJglq0&8K>|Gz9j!<{&1w{b( zg1Q3)_PX?U!0xVmrzhPvl7Rg-u1WHz74+x;`UW7Yz8i|Lt0k1!zBZWPPUMSpE zIjeAuAVR>-x7wffDKs&rBr3VA&r~5yvjL?2^di#E0$qoF>~1U4=(#NYjf;(|N2;Ly z@RQSd-6_Yq5oq?uqLx1~zN5hou)Cc=`s5^~+0?TyC^E(UIL%+zsNl3HloCDZCvF)Iu9%k8&cAhX?ycZS)JLce_j~r zP75LCqr2u^+f_Eh3zRItE#>N0irug#KRY#7ilu|tEE*;0`ZO@B%5uZWrnBH`b)n0_ zD@ez+;AVKr?H5gQ0$jc#o6F%T^J958*Egoz%od+9AGZNyyh^2vpSMqYi6g+Z6@lJ^ zc`fdQyKNGNgi=u2b+K^a-#Mluc=YsIz&TF zG!uw0DAyz`)b^hQX*)8LppWxe-fc~4WS?9bp26rCn3?t;s*BZG7MaZN61{9?Ot!WG zwT{Z#kh1y^VW_jJzUxo>W|ZOzDCyWIs5+cLKqr~7Mb)2ulm}f95G>RDhZWx#@R^3e zBD=mgc#>a&2}XtQ{X)6)2c)QUjN%p2y^MNf9;)7(Aweaq#v zn+l5d3_7aQ(GMO~L5yUDj&x7H$StG4_h1Plc@=3?x+X!SG?htnGoh1D0bV$8YUW{7S zyYl_a%jN*)S2n!H)QkuQ@ISd%G!bOkdh%kgQ^e(? zM0AGhL=|u zk4o+}3SnX|_fq6}&$DsUxUDvIsRCEyov_@Gvb4NOoyP7aj6f2d64XZCMLI3ClvM)s z9I7?O5w#*y8wM>24iK|*%K;lUyk1FS5|p-jRj@nDZb~++`^(-x`P%4J1gb2nG{rx- z$m1RGCYZf0@#1(BqGU+MYhj_~BkFmuf2{fXIQ!e9G=fZ~1p|RE&BCt;befg9#Olp_wF8bXzAAv zo8(2(hDBvc-~L7<94B{%d0vqReBK*yGEsLo^@w&0txsP?LZ^Nqd7{_%vaj1V(CKW1 zmB{>74E5wpkbIkbUE6I#=+c-;(b*_;l_R;QtTu_Xp zQdKWrUXi$*@QBW{Qqesn_{9~i5v^~IV;^_NG`Ek2TcLeOTUj_uSL)1=t^xnKiJ3me zPml`*&pDJXWw4gts!$F7bC+lU(jmRl{818TG~q&_1=&bi+CjM3foTtd%`x4lL;jP5 zzz$lAF~gLqQC8Si^CEQ)Q(D#&6q@F@yX^aXSRFWN05F9r&AjPxAGeFx_%K(RtB(#> z^Is);y*?~1!m_H(ylc*reZHpCBi8f>FnEP+;hSGhb>v_D{Qbj&KIqCtt1}WlR}7{n zJQXkRcD|`Pu|Wbg(NiKGUw)mES=74nL_&~rdg1;?btEVl)Zr#J=d_ii_JFe&6#bpz zP>K@F-OSyLHa3!4_d!Qm$)he}9zYdd9${khMLeFZlSN7V@FNv8ny`R?ra}cGvjJ^H zFMJSNI>pcYyt&NHklJjNoTWK)XdpG@_f}mp02_>{PS=(QdHO`hK zWa-!W0=43TqCv`x1odk<{*l=5`YP`eBtbfx<>B>F4*J@uz-Lp7IyAv)B} z0gg+)D;oM8Xy90mo7fB`mDdoIv7vlO8&=xuO}&A&#n|YiLUERbvh34S3LWR`NbQ$i zm@MS2TshlidiI844?huRg%d*1Mts z`HpRCS7k(EBaeX1C}|d|zD8|MTx8V(S`El#wdrj>o&e)7<{3-LHvgHBZ}pQB7s>(jaiBn44SRJijzgO0b}Sn`^I7w$5|XSd9CfD61|LC1 zgL%4M#>WXE+@D|$lxb}g_MYHU*GB{kIj{EaEGi5N^@9&B8|RWzr2Af6;leIgiC7XG zG3=oa(hNN)%t^!i(4T+@B6q6uQbT;bPw0?LsYWo6e=ru!cccnx_%s`9X8sFmr5t6j zEs(0>eQ%^0UK6U677HyR$x;4SPBPbiwMRlUr0>KY8h6IZUiJ9RN5bfzI83{Q7HAU| z;HMxN^AYH0sta`m{!C@`Wu5BP^Ccu}Q`iWE&?N0bsLm;(gL+eWC~mre=v?aawm9ft zV1-8HtIXD~KB{z#4XKg$mu{?@kR=_G>wEE$d4-=f&b@`kY5YaYkz|FS z)FV7yOvdeyKYW7C_&lo(TF-v9>$Xw(U=?pAxX?^{2RttuFJsA3B*>n1b$jo=dj?8b zb*nvOD(e>%gJLUn5uzLL`}JJ}q3Z&o!{5+d^I2IFxjIHUh3dHfRKiwVf}CAdlNRBR ze-2jk8>Fu#S(8{ASD^6#$&;yaiZdw05!8#6F%3}+n*opj#jZhtL7?p)w5VZD=>LrP zl8bJe>oN!EJg@85g;FzLXrzo$K>6ulSKBOLjZydcD!iN31|jVtZ_p70v`9;2yp?Qu2@xS%;~R)HjzNjNUcIbg-ORZ!?4q5i=@UlG4>d!A2DzS5-<605-kQ$8S+ z1!h7uE_Esbv>iF>0_76evj%vSzKb%`bhBnc>VBGhk)})*jKtIk0EQ=k6HE>#*ckWl zA?EiuhrUb}N{$3hJUX3eemmD}RZU)G&PROJ$q3q|)h0vLx8$@1xF$LyV$FC>wUtL8 zzA^d?BGxxD#FAULD||r2swpVWsGvu%QrL`SU=!C@v>x>j7mgDt6-*_Ar; znc{g;m=&ah6%fvbEoqN*^A85=a$t%eL+gbE$g#(Pc|NaA8-rf$`9=KXFGJz}R4iR^ z7{wWzqsdgTXji-`Us18hV4?sNhGvz?FB*#V_`47h(oHx?L5Wbznh-zeym21_9{L;A zCvc^T?ep;9s2K&j5O7Lo8RXGN_HmXvjDC8-Z^?ObrO+Zf{u_e+LLaFb`X$nJB-shv zvoqIxPqssL21X{_c^2#X6$>ZnC^;`?Mb$a7>3n{plA%3QgaL~BCW8MVq;m!dIG931 zj7SQoeB2K8&<2U|0aicHn3F8w(NP9r$jL3<@7_;d+*y89gtWwIXkRj%+Hj5}R1MCm zxYX?gzxyE!7=-G;XB;>Ae^K_{@mRNi*!Z10cSV~hkxFEQ>^;)0Y_dlP$)?Qgbf+aL z*&`$K%E;cdkiBJ7_Re16d7oA;_5D51@2|Xa`CRAc9PjhJj^p5=*)Lz}$2RP-)-gwJ z3k@UP5K{Z@bM-p<9#{v%Zc7AoUc6Ta7Tk|(Y7V!`ms@x=$ISXJrn#)gUxxaoc{x)t zz(oaK*p!ej4_pNZnSGA~qYAVl^`UL#=bcR)nZOszQ{B%$mo-qQR|lZm0y0G7)WHWS z!I4bh*+|ZZr$)Y4?gnSq@vPMs172C>(X4)=o&)i02j3hUa(dNx9fHk1bz{!js!HUX zru4(Ldg|--sm>}nj*z9XKtFvQ0;gEq-swneyd-7W_3l*!3 zbc43I4ntK@u`1GY>=ICh)-onXKWA_+4!!hWH?%05&6=+{fp-cIrSwHI(3~`>k8jpf z`xLplXXn#I+N@RUC6^%*itp*Kvb2&FWvqE+V53|l9KzPLsqBf1iMjBc@cQ6 z8q-ypVF}!bdjJe$q*E|rE`Z*$X{xHW`SD1=MDh}RHsJLv@o?s)LfHV%!SnnRPwo#B zW%rwZv-R5qOEyJh+E9I@g7)>b?j{{9_YHhba9pL&( zS5Nt7;`4|9_8?LO&nbu45B{vMj6 zm44hWBPuUC1D94nV8GlG4JV>#D27H7Vu1b77`j8iM*$RT&%^ur0=*K;$~RNti-WN_ zHGqJ#NwFt2{}P6ve7E}9*urG4`awlWZvwJi0{u0R(*1ax-{Ectyduc=N3Zs%qUpJyBsKZ+V17;%B)Y zKLL1*mI%$51aw?!^TEcsHeqAZR^L2tI&?Y`05e_k-;@Tygb!4-T#qIDNQ9saU^ko& zgA%sk5S7dySt4Y*Bp#cIj0MhM`n;=e6S55<%D?wD^G^3re~{Cn0fI{E@=ZaM0cp{8 zuYCdWX^PSF^W7fIY|B$2{lbeX5NdQ@9XhPQ!4GU6sK|E|?obc#kKe9Lp=cf@TY{!W z3RZ2-=5@LkqYz8yF1)K=rRNwpQFFp}=TCY#%o<2_Gl$ahD z?P2fWv{~(?O?b5BRjeOcm#3%)<4ZCZ>-KX@i?}%_4f{TC9c^#Qi+8`AV$%Oo9dudR zpY;rtG>N8)?;Tatw-ky}^5FJf1=0#AHCJ8juMB1`XnXB+Mxc{1i({nrC4;Y17~k^8 z6$I#->s&2(etOV%RBj-2f_mC*DIV9G@vj$+Qo*u_zChv!WjjLiJSU_U3qhfsN-Hs* zc!Vg8U<#CQdicg5ekLgE%+ zaND<+EoWd=QiKf*=c(rqv|a3f3AxB(_}c3uvNKcZkcRf-WXd>M4HTcKnCzeEK!K}7 zB^7l1)1yOG3)VxtYgK|t6%u|_cZ=iZ_^B}@GX=%!t3$kA_vI?w5fTSL#$Oi=P{aZt zV(#H2LZ`v6W^dyJrSh%nsx0vyjV6~>`oEC0OO*ACtmIz~PSU?-dYExJ51Ji|Zwg7H z($g|e{@sZwu3i51T(lLUAKn#d#aKpJHlkZ}tpG(4Rp2IDP3!70bY*dgz9ADD0dQkc z`V@HdC@i4l1dc89cJsX!Vt}yY_T2ykyTmR<1)TGnJ%GM>Cfgbih`^E?hLw(qg!1lY z2XpDiznqj?K&Uyi@8q7VwNs1*KV>VKV_8Io4%aGnZ7EVSdiW2Y%#szuH|%4s@k@(8=GxH3dnCW`HWJ;<5)MOKZ6iX6tiy zK+P76L9jMZ&2V`{jZp$C>c3RaYB<{_yU9JCkgr^vdwqa-@N1&xvlCO@66fClyTTa+ z>GI>8$d}lAA$X0YZF;;@0GU}}X=^*kvCxsD-5D?R%`@t?wAM=qfCLDB1VEX-BNosw zVr_txC_F`>r;%aWt_|e}wcdVa`i>vGuG#1LHeBjHJ`21%k*8JhGTEFsfPMzoYp4kI zaz_KQCddl0pOjarV~p@d5GsYqW-Le<=;Fl9vmw!ku5ovvC5Zr58bDtCh+~tB8APOm zWY$gmQ#O}}LSGHdudZ~ix>=61RFRT}EY?Q@T0S7q!UuMDOvIG`mT$BXW6DQE6k^rb z5WjtM$XLTIpsRFr&sIBD4AN+%rHdIpOs(T0&XC0kP4SI}=izd6QfcR!w8}ccw-|>o zYicG{MfhZP17Qd+?49a_Hylwd)qZznL3pNJi-f_8J7NE+pAGkopsqVKQO=Te>2-Y= z%yApDtdbYx`MQv=Zh4Gmu=)t9Qvz~)MX5l(OunwD8}l~dIT&0_OmDFVIk}oi1EZel zkRr2gsh4u*?Q3yqjRF2r60u7qS5BJ9|2PHfEYCY#eOYpB3HWCI?aXi1ik%i@ri-#D zhoJrh(@_soQVkA+-dalkA039dn_ffAXR z*oiXD6=sB@$Q;Wy>OR>ye}cbP+!z zu@Z~Y5jA>N06Y$W3FZFBxaGNj({LtMGk^%ve-b8 zZd7ka8C>A=kU9MQInUT*@LAuiH2XYwN@DsGlD~!4tp^?xhR2>ncumN>SD9D(4tWmA zAwJ?8&&YCq&WL`9Ae0fymvo2t>Nu(zGhHq7eZ*LV}%=VQOlw7je#jtcTzr{u80pAUxLOZC8$!R3-j4QIT>npb$Hf2Cs_4w^8)FN ze;VML@pS;~A{^gZA2C=LT@2vPo084Q6(BCq=YD1bu1K-y`QV_z+E>}bJ;R^t?sfuO zAvG#cEqW-a=z5o@pc}pN$Qs=M6iUUiJ0TNK21U_2uBw4Jxub7Nw$C^?P==^#l%C44 zolV1K5@4J(iFTX^HjS&`404M#CEFiEn1~`U*xSYli8*Pfe?4r_q%JAU!T zjg(dtyfE2K4+Pn|5i>qdbkLP)?57_z_WH<3K=qRy)!yPv1r5FtUJT%~S5zeSBJ5jB z4fUIHegi5-Vd%5aWEk{+_)1Rc6_6pO@srlp3qfZ6HbMtej(d}$assOUdxn_&6vU%m z?m5pvxc@F6gtB+OKj{P?jHqYC3OXk~1H{W`#+F-ch;s^xw)At&2t?lmJhYS6>7s7u zhY7X)d2PjRC8G2=u#-l|Im!|H)2GjCFYCqXT(gr#xFVtu#${gA`f1>-p!n{t%HR>T`okV_-_hDf|8IaD0lnLg3!CL2QwXgQkPy z&x*-IUQRuGCnfrfYJ1QKohTU8!rfAvjio79ir^`^L*H1p7W`L7XfMMH?&Jtf2k8%i z^`nTU+yi159k`O6Wzd(WM1(nG;$hO7$S6{X3#CM;b^5o`X8~HJb(0Pz0QyDsfS2+%$ zB;@enw5tFOIroWvO7V`T_G2fAIof`_DzDOR7fBOm>okZaqiCmbHAJZ;oyOY{FUo;# zir8RO_R8Wy0g#q-1C@pTQe52nR?K-z|bq^pOt4f^R#n$>|0PjfLEfXccv7?z34*mHe#zPlhWL;Q*wSLDTyz6H6Lqb!P5 zjjWymM8pPc(|1Hp{vDD{beSf2d?UG2UzyK5BWyv~oSLg`fX8AclS1&|au$OSaDQ0f zKqrDZ{`LfJ^)u|Pab{HKrr|^t!G!Acl?(hkG$LC%4~$Hw0FSE+{M89PdMC#$Ao2hL zP_ky@{z$@)5Wh;H5+Ujblr6nB+<)*S`vvHg$rL$=uwu*oG^Cy%wPTIyfF!RQ9Eh^P zuKMr;Y7=3oW&tA&94w!L`Y|nHK#2QGlFn42R+dJj{^FOq`ENZzI3`dC81sLjA=eFz zZI1zme-U++4TiX#h*hhA9M>9EZlPO<2N<;uiT8=*NB%G1pJv5y{e-9{$QmaJNPOjXN{hJz36vXu&DBR0!a< zGNd|J6F}k0E6Wi`_`;CZxHiDPaZxq)liZML%D0Cni{K_8K(07Ahr|B&REB)K?#njt zMoIXS!u~7I{$3kd77q2I1_u+Ht!ni~A1sQ!XpWY$Vr5X2`C-hxQ&YbJGM7H^umcDt z9RVFD3_Q;Z{*|l;iZAUsP_>&Eb;_lty@%WO7+Co`<*)g2x)f<-GN8u?(I4Ebumyih zr#ke5iQ~V+*ai-8>nQ)cOW6G1?E{ei52lt)zJ_ee^D9N&kwI^}jR8e-_D5i68(Ayx zS~pUl0^HyiZIm#e#~sTt(?*a}e|p(RQE;4Wz`^M_0^F{ah>r;c*M-1fauOP~Oe}}$ zY5J*A!J*qr?n+wkk@%egffq3Q`^8)nJ2!O0_0lUdsLZ+ zBnf6*c>*~f#08nUNgjca!T`4WHSnJ)7d;vDJ34C*$1kD2w&q$&-mMJBrr-M#zWLa( zI)$ybhaY4r;hTG}lj&_OV9Y=8&4w}IJn+l&)qt)9QpC&JHQ~aDZC#i9I>KM+i@Vgb z0r3QJ!ya5sSx}?bI~L7m$yIFZCvBOvCz8Q}m^ZNBXzY$uaEcOW(#U zZIpdR=`Vu6fD%+<9o7)YATtd8f@NY(jel=lPwPJjT0t6GUQfM!3p0TXmoCD4SX16P zy}AA_vnQ0b&?2}&Y6eeruuY;xX*XR>>GhTi>RV8kP%=2J zExVeDb}YfKd&LD+>YfTRyZ-twm~J1SUC~=O5Wlez(W#Oz8u7{7hb;l>)mwnxe+qK&6;@X^)*LhChy4uY9U`<6V_A7>H!ng72!Id|;Q%_j+} zfUobRz(tCs4%L9vq4LtKShikk61A3jaC81rNZNeD=K2HvgXh;KLFafB^pNWqwo4Xi z!3>iyfuVAwXGdmx*KoV{{F+SLCgk160ceFOHIePcW!!@Nd!+0QRM`f?yuirRN;tX%7)Fi z{!15KMgIujUhltyPGun;UHSc2w=W$UsG$`6VV~I8$E`nv9|n%Y+djXOLAUu8f}^Nm zNh-d5ynSg@RI*})l#HAO^WH-zx7`YNI2nJJ+i?s&d2Moz?S5dbx~t|l%+R{W7R`{jyGH#0h6uY@>%0de37zbmU_^NGY9*cBGs5|g5~ z{e{r}E(4z+#lh+I_VV3dE@xrHPaiP<5@i#_hyPLC+rP#j09TPPyehr+)Zxv+uMHWT zBMMjpHd+0z+cn_+ud?gCG}YJjH#rEjCnF|$ZFfa)zjZg)3RK^Au9Z1%_S^v3pzogH zD~sfVW{ZQ7G3!TaZDjhCu#SrNQ#We&)n{G$gm~nrBo8+kyo`Ia?SpG}Qw-Hcc1onv z%7orgKCk|5r0!7s`#?_Stv7a=MDBc5Hs?CDD25GYSoLk@(#{!P07H>Wymf@p57nkP z#2V%njT*5)75Gl)*S>aunF(-qAT-_Az7ja@qk)BPJzA3Uo+R`W)1Nmd0M zqeG?eM%H-&&^qtld!axu?A`MhKPOvNH!Op`zaZX71S;~}$KOkoQQjzrHA*<=e@2Z{ zR_9$E7jX$LqF6pONt5}eOiIDKRd;JPTdsx1uJ$jcNDHKrNo&(>oCd)JK?`;3S^^=S z4$_?mBmkxe6B{u0tI7cT9PhAxn|d_mRiYt-C#u7|Q{TVi#;3ecP(9bxZg!;AT;Q`~ z);{34hj%l%AVW>4nGp}$gZTaA#+23IRJdBp-Z<}G@~Vh9FY3vTSNqGw94%ilhLd1x zMd1g`^{$WNrU{cLv)5P}QXk$6l{cbJH$L#K+$SroQLbaT(er{yu$YDPu3*-G4xAvd zS`8X6S|J}2w?9xKfF#EiSN?-nqD9OEUk5D+@&-#PcEt;q)x>7YP560($|}t z!|lpKYtwd9VCh&EFs9;tP`fr?fz%u}r9UyR3U z9I!s6|5DH*4C>CRpNGz1{Q|6FFzn-&eJ$)e-U%H)T1j}{TH3zcwcoYBtY6&h1z10w zhhzA`ehj^hJ-I#c1jj%Xc?PbuITYKs>H3?x5TV`lR|N&u_f=DYu`u~)cx~tC+|lUv zHYQ_zdVM3&s-w}-4gB2xyBG!+!X{Cj>K9?SjO-hP0#@p6CY#f|@mZIrD2UATdaVXHthB~5UDsIL$9WX24 z>is?|p_VYB+6vz7A>qc0hElcYx+*;K2fW(tu@o;maD~ zd>I@NHp1GW>*I?Nf-j`KfT6=Wl7a)t-IUw6Gx~4~ig4mxqP~+oTE7qcBIvyB(X#Cjf605!2*m-n|4-*{MytzF}5i zL+Qg4!(%13yJVa5uLf(JreKz_-RT#k$B%d~m2oOB+}@+Z_C%CK!0C5#WsHw=uujQe zE=R#56x+?R?nqZ=`(3t*@U%YE1xI` zwwuE700}eeIAyd%z~NWt-4iwb`jv3o7zWCMb~E1ltqcH*OASNpZr@?V%Fc(DRZcG0 zt})a7+J^EN$nr)iR%Y$6oM?!th}Nfu z`kSZZs(!KB_K*}lQi{SB-r?`^STDam0`BW*ABuPVh5ftJ6q~J-xg?TP?q2L6 zhyFyzrr?Uywpet1>cPBUhLgbkpgn5GpQyM%zOFd*%w>vYf3tz)x(i|Zc6>q-9@^sd z^*i=|uUQOQC73#Mqy8SO-^rZB$sVb{65NiRp8LI9_19nhmgQVu8i-@{e~)8#t0IC$ z1upvSZ95zLAk+?C6BF%YioF3E7^~%Ppj4F(c+M|ZShpVm_$m~kH~uL?hY`kzxZpEp zaSgZWg*fR8cYF%lVSMQK4J6BnH&|oSky0zc{xpw&X7Q)Jdjp*VhI^wso`jDZ0C^os zIp1JpF>hOW?EG6qWXgKJXX)3xZM}yfBKo|7Ki-1%JGeKqN8}}G)Un-CT4Z@Y;`L=> zeRzG!TTtgrUc~4Q7o$Z=&^I_eCGA%pZEeE;cpF_&~VIwQ-@2-A{6PqKoNWywk zze5L(*2{(@Y;z@_jjm{=>ILkLuy$lqQHA^D2UVZV1H2v>c)f%f{H^5Tf z!2`DnL;Xhl*MGra1^2-1Yk4|9{tpv^^})5rdcC>#UW!Yx&>w4Xp%8{z(MHNvZjYaT z-&h;}6H2!2_U=Y8HQGxe&Lmii4I2aoSge`>h5x3jp`mGPrymB^~b*FcYMp{ zY=!piMy6nln`3#pHh|^M&P@%>j>S2D^0GG{uoXq#6TAk4*&eXIU)39z3LQB$zUtyk zsGvqKY$7bQ_tL)m=O~|qhMi0EifTMf|FkJU=0T&UvREuVWn+k{tS9Z$m!W3@9z=Y3 zc#belam3JQwLfdQ%8zHR!=LAse~G$A6&II9+frO&zx}5>WAQDNmTPX7Nn z`@e?ymrLK?y(6qw#Wk_cP|OuDm4>-idRTRi|Mk3gFCg23zfYeN>jUy}aFPl&;{V%e zPX9Rh;Jmwl^UlVD^$ai6XKmXNsE%zbus-u^gHoS>&Fz_mcM{vo9fgzyjf?oZKL&o- zy9L&t& zQvwU+?T}OXhx>Hf3T?dXjU#(wl2i6p(_^16N<dd3ZPu-$cc4$W80ihV`qwJsjMAcB<+QQrw6yXnvu};|cV1rAh*i%OHS4L4Rn;0Ejm*JaYFz%OH24OM>g0ngG@oWUSfJhAP1w}%uxH# zFk!%L5IcJ&`ie?7FK=l~;IZCo+o<;0YDIkdr%T3fQ~a=UlHRA`WbMls_*I`rN37Zp zEPVftbPzc6A8` zTRXaj@hk{Qy@$r=l#~lpl6GgIL0Eu)^z=oWF?|0jR)JW5)MnV&0gWxz{dFj+>2E4J7 z&?8*wUTKQ}3pH0)bA^Z5NmD_B^fQ<$FBTh_7s)NklMp3L zcz+EOXyzKN(IOGXmWpB0!_)HKLmNDKF}c9GaUmMxuQ@g$z{0XpLwj-!91#_0TJCSu z^iJ+O@EsX=vjqg4Qn3NEJD$?Uw6gSMI2mf-u(Dr z-yMJk6uwPdNl|E&eg&GA%M#up4@Yx4+s_hRP?nkI1`!}~BseOyN=PgA?QHyA7sA(K zXE2K<9l5qOZ zS7=l*NoxSS77f>Mr)8ZaG_}4?ukNSw-q$kBFW#FwyWLlEF(;7Gjczk7cfvMth$CO` zxAR|(je=VKvl@&lRi*m_$IYjdQ%Ml}rk3 zgY&@ReV-&9`jgLrD90P^=#mHGlL`Y|1EK$Q!+bH=og$-<3!_$`#dK=i{pJdU2i!gib0*c-jj^JqtxZPm)24}`?7Ej2(wLs(2|AyG*RO7?CiSZ*=Yw-xU10Khpy zl~oP~An9&7n)tl26xm#CEg&}`!5ezlV$`tfVyaLij!qM^0_#%o>&24=2hxi9*lMJ8M&7fYrD8pU@32R3 zVaD(j;STkl&omCdh>dD!97UwbfJb1|^Iuus^BFKuEp`jm!3BI{De8xAZnVU#J2>(& zf??H_|Iz6p(i2D>^WYICPu_gOV zA{a-a8OJz`Y4*Pm+GWxJd>Y`r=;^DJKA0S(Fp?EAI*={civI~7JWsF*5mn(fR26vh}ZEV4$z~{ z?x%E3lvVHndX?{pxu$~#h=5KY?n`fDnJ@35SvdEpcwG@eL-&4BFJut zQEm8cMSvaM@U;uq@T*N3s} z%X;!8k85eC*Doe?1L&bV5U_%PT~sRwRAVXEgf#*0A2CEV(+cQKV%UbwJ^51#nIc-g zGhaJHRgRekT#vc*$U(~m2E~Xji-DRq_CV2iW{oT~1fYKurUlFC5?wJ`)O~P2KSnfP z6)Om|(HH2?`{nGyx9xz9Zs}q<7{0Jfboo@CaXcu zjErW#S0BRNXe05cXr`~V=1&EWf7bDkEI(lfq->uXlsro9_!O#7aF`lj$Y?W{r1!RU zMr>0wa#4g0Hd;Ju%5iqnX^4SAZ;Ud{l0%wrMt&rtb7cZZTTDHrt|} zfmf%f93*QsKR8AdoX@r%4Tv@Odc6ePEeuo7oWX~)(%JNfRVQ1#-c$~1f zYB}6U?{>yPT=n#zr+>H?+)X03OGhYgx!dt+ECIr;jh zWLg<Uf1GzWwE@!fr#WNUECjm^<09|XgHbsOMqRYS z6tt(W_wmlCO^zUrt1Q{OBM%6kS2=XNQnHhhl^FoePdC68M8_;pNw27_F&{eCrFg#f ze@L$o62N6~rLmI^_$$hGcLf+$WK~7=O=71SQsV`66s)g!kerz6uP|*r;J33jjMPp} zC}(dA=+*<)r$Kbv& z5R~NujUH-+PLH(@O2)4OSow2J||?a8dNV zkK`-2#Ze$%p%kccrd{W#Sh1isTx;@ehQC@it5skRS1BamnPeKd8o~mI~tDz zoYdP({nv`LEDSPgGxa{hPpD z(hf-Sihq3j#G+p7JEM8`+d|M7+gtyD>;%6n`q)P-@){45DE`WFQ%rd`T7Wbw689iXSr0?_Na+H01##Kp{ zXK6$eU}9um4d)doxhB878PYfj#vOR*yU+L#ssdHUA?*^%f>VIo{}<#d@PW1W5QT!U zYLk!IGJVEKz&@jhr&kM8i!jB;fF->%9Pg~J_=6^R4p{xGSUr zbEP9}59w_|AbD-BIB1v$4qXyNCU7m`Tw}S#8<m%S^HNYjKIMzhJ2V*VZqq^&!1$H<*%_Gn4%x&Zhu|bV*Ka0{a+x#l8>yaQV&U z;!3U@;wG9p{4|s8*S&lw7;Ib(48=}^?WPzi9nygWBKwWCfn)CpxQVP;SwO3pPCbHT z2Cf3SJTOQ=^AQCgtfjbgl~amWkm`l}&|P3?WYz(IkpIF*r%yhZD#TtYTouA7Bjfx7 zt|THv;rU@f_V{T9&eE@Tvp+R}IeRt6ei-8NCy2-8?J$6wgZEWLTrAagqgT8gEFT+T zGtSQh=1?N!2t}ov{FC#V;&T=Yy-LWj)Q$$WPy#7VemxIh1APwgYc>{M*Hcl_A&SKE zqTl{YP|tV@&PSr@BH^=Ar4RiLN~NCpQ8sosA6nm?Ns z9u>%Cq3E!>)LAtch;t6fv8&yQihUwSO{Xv~TBT>7A#Xsfa11uW>v&i;#&K4f>DiuF za`^n+gFpkCUnPiVU^SGg)tHMgRB(%Enl}O=pJov_sck*ioWM;jhNz-`q^lbKA%!r; zzVfHuYc5_8BleoI!*qGm+Oioh;5=WbZ0@;aiIrq@Nj|C{S6Vr6jY4N7w^`TfRN(er zB!GM&E#$Z*OOov?4H?qV-MbI+&xg*>BsvGDygEq6T?BVgbI(D&HSbjIx~>mlmx!jA zw3a`JzH>qBH%QfA_4S)gLCyuxY^}=Gk<;&jjrTbEwH>YO3w;Cwvl(j>aS3?XFRIxj z0OWtkeWKlUAr9Z%f#b;gF-bZ=i#fS%IylFj!03bk!Pe`5DGgN$*ZU6-k`d9wr4*4E zfy%2y>Au6Paku65t7r&QKw+PLO8dWh_!fQ^J1XxHs%C_g_6%#z%>8HDZRK`XJDA@z+W+0vO3|MdJ`)A}$=oETlJ8SjMxmuB= zLr`@-y(xSGBUdz$}(O$$5n*e=$@B9}Y~sbf23 z??&hp&$zXe1#zWVV4g%Uev7_QHa+O<0Se@1D4QEMbWvEcO(fbua|JOab^8|Z(t``0 zKM!a3sVb;JWKx2mxh9WTA1)&L6d;p&^yc!TgDN_B3daoM!bLvOo~GfPfT*umo|p@6 zOqQ_MEdV1AEaaQ}nH9qqEd?^dGz3JL_$WD^ixn^X44Y){MuLDXS7NjnRcyHE@+{nH z|Az4M9Lw_s{nIb|9!9*ca@lhV0;uu}cCURbCBD5SBr>G6^z7pwj?*jzU-#svhX>r> zJm;o0%5qTf`e*+DIS2w6d@zZus{8zefW_v;>kN1H7HYFj!bXuW_gO>x(V0&Y3SkZ= zL-n!fdIm{d*%yY$RsP9`sO9<(%x${aTynrH#BisAt)R-5uObVX>E*m?h?fD6Zf~*3 zzW*RZ#ZHK*rGL8BAUYqtIB}17*hl$qdj7gTC9UV%>oixnnJgL^)Y(f~qs4tC5#IZY z-8jH+pF#{37PAc8bRYch$$s*A4b;Zw0yd*F7nHHP`C622RCk5dgz1biE(83c)BYXy zjqHFFgAA~21KJm<(3*E5;@zd?jT^RSHQDY{uiX2a;8&O0Ex|HhL~pv1o)@Xj7UDw8Ez&~gwt3TC?D(@uD5k%DDN`zdpEWzSR0ko@$=BZzRb zIM`+-spQNdzG)fY<6j-Spn2?#!>&j$uxB6@?QJtAKcNr&Bjxhe7!z0CL!4djfoS1s zV4IGS6?Liy+$nr=3$&U%gAuR3H|eCjy%ysuT)`|>~^FcX@8C0W;M zeUf?t%mS1~?m!-J7BQj+Srsg`(`$dYkE@a>9W?({Sufbie39X4saX6BsXz8|t5Q`f zdk1f1iq+VYu3-i1f}x0Dj$V)j%lASbsrGvxpV2`ADZP+}UWjEDX2;1URSSS?Y zVmeXDVZI^JNxgpF#}fE0qOW39@K|tc4H=0y@Dyhx9c`mRh z(om>#spDV;#?Om(0QpCf9oGP1;k;@Od2TbEpYs!vvy4%f!(NCkC(&H=!Xbi{(e3MX z&yMO3{8QZJBL{#*_mTswzRGyy?M8e?-Vg#GZXjFHsgG%+Wi1R! zq;y0n@g}FGQS*oFM0U1AvFCLlI4C*6Vo*zfs5g6=zQjj)1%a;3QaP`Bj&;TEU?f~F z!f>D!v>p06hhpBOXfRBlxDE@7&pZNJ%o9tU9;HhwP?9-Cadjn8RvPo-6>Tq};h-%q zFoCCZ@I#RD{odaPznrMOEwI5q!9HFO@;Og{2ql{Gt3Ebes#nswzg;@7ZGsg%=~9Eg z{m}PT8TS5wGB-XXNB&N#xpGq1Bw4H1w@B)u<6Igx=C=) z-m2*Id?&?p%n))LK_H-!nBNnEa{(xO{DM+OA?Y_U-~B3Cqe{stM%jx{xfv3U+C|U& zxh~`3QW^4SI80EY0vYjjj9-_S+>(U12v9XWi^}fJ)r_GP3wr{3Ai=Q`Q$S3{ZXdt+ z_oQnriIm?*jX^Gq3F4+Bb^iGx&h)@1ZXk5L3mQX=dXUu}jYKK_V{I1s_oOjRp0pXM zO5$N&gi;1gP|A4~bnn_w9jJr>AMx|t)swB1912H}Cx zxQeK1QM?esIw-F2BC5=|1eCcab(%j?xZmtrh{LWhV7g-kcFXtS{jfoLfztDY`>b?> zWqOBQU>rZHOG>={nGtjc{1bCjp|nZ_o!u5#Cy!%!A&X={qcwA!`KE}}d~~{W07pd# zgtXki-C}Cl4kwN-y#-_BHtTbr0HOsaNDep9lZ5;OmctI*s!uG(6ebnq3^*siPLiqT zm^OT$seNy$H#mluMu-vl_H=O+NQ<8kevawvs|-E~iINh#QS#NhGn22Xr0pKmLO3ooM$VuAUdi1^78`)+xWxtqkHynySpXNiT5o z%bhzP_nl|e6Ss1dyW6WLz{8AuxS$C{vmA7%Xpk$BgQyY6dl$H>R59n$;396Uc$MfW zn_m$vBiW*9a*|bFrq9;22@#t^;ZZT_el6S*Oo)E)J`toi9HPGaVAqEm=(btex5ULI z*p-M1)NvQogyng`4J(+oFXFuzUP8SYoMLHG+n#H}*Ea8rx8o6_(x1VUC?N}5N=)J> z;OsP#sgoEUEv^}~S{G8p*5VB231D^S^=4?~MJZjCDk1Tfogb?$in%Upyaa^P1i%kp z0Mx-(KKn6cdD{zI2e(3h zw#ngd*~eN{d~?sh$3(eA61d?lKiKX$_)`>Wze$C|o5_*;-4F|f_%Z8>H7;Exvy$I` zlAU!pZ6hh=Khu(HYr*33Y>{4JcMFgpO(oc%LQ>ajy8)YAc zCM$3xlc4C>?HW~#QBlLTSLE0|l61nq#4f#t8HE zA>hu}gF@SRmPYu*JeG#!fHb6<8juz`j!KJORM?2txH!; zgRwepB9ZVkhC@el`=&yspag&a@oE8ZImuurC{j@;Ly@ppR@{-oN8Q>#s7nx2Jqb=g zV=&FK#iF(yo)573e^423H!oWiu6o_u2MUr2Pj~lh2%EnL7AmNhl2hF?a5G!Hc~Bg3oVj%=H_$)I=sGi*~O9E)s-bw`Y1$72y!mb*44m> zAGBbcW)ry4DnNXleYC*Okj3CWV$esq$>Bb+218k^;D_|M3$me%n&Qrk!FEG56Obh0 zq&p`k@Hhjs7mDIkvbxEEEE8CTF8S+ObiH@+f7c95oqh}b5Tk}Rr#eIKhUL;*2(9R> z*>V$ws>G7qoFHHW0lAZet?B|;7Cr_Cx)Cs4o@7`!F-n9=5%Sfd+qnZ4_O@BpxhFx} zLwe~$M4%5W(sNc8&>W%w%^}SSma^V{kajZko|%;NY!n|RGK`%RHjEXLG~t2k1Nb=e zF$qlCpKbjar~8d075)ieHIR)9!Iiv5bS`ns z6EW?|iBc(~HXB6qC-r!W_Zbf&HoPMgR~qwHAWwV+V_f%cG+??7{SR=A5F$0LsP9twx2vUiX5#U`MedVRKJRKxlK${pI2;@fNY)j&3(aW` zJcGO_stWP?f*3(Vr_IdL_B!c0j(2S-<1M#ti#Aja0{%1zJ0 zs=E?YX%Su4(+-{d=QIv4@|!dTzQ9H5T($2Z;mH8dBG_>IdfSySucu+z3n5v9jH2&g z;0o%Xvt5dQe=WnbmEEpL$Xl|uS(Yx!Tzqbf?>pkVh&?bdviaFwGIAm)tPR-wtK`3)$bN8YtGO z{OOnszHINa1(1z77KTh#+H4&d=N_~L>C(g}a0zm)bsItEC&l#IQr#0Q zCmSOjDx(igG?~XWS<)8;e=+Em>`ng8g9g zl)ZbUef_noh3~X3TP#N4(Zq_0o9KoV5^M58Dw>Ec?wwH0l5ta#mT>6w3M4(BvVAR2 z7-2DKNge2P7OWXY3NnqF0y#s{HUAA;k%FBD+S_3gvVi*+Xi{rx8|6cz{A|hXVyKYl zY+afbTf{>$JOy!oUR!fW(&ekP*ZqVnvQY=->cXrarZgf{X1Pj^NzcHI-=<@sSVAEX z>e}7YLk&FHmd)o78GX>$pu|bzAKr*_n;eSb^75zYI$L7DmxCD736vRWY;DiBa%0jN z(>KgPqBup+Z}ECzX&gCED4w}e98oQg*B1Fh&4g~+Ml#HhlP0J!v2#ub4sOMRAxP&h z`vDa#&^kAP5G4ox!(QO%%ay`%-$14gGa^4T!-IuTZb7i1iYMz-72AxjeE54v8V~>n zu(n&l#SjVEv?p()`1bT~_5vhi(;`CR3*pl(ra3cZcB7z}@q|9TDY=5?Dl{_wbC9&D zhkHa8f#qXD;!dw$=6H4{1sW) z34Y>nZcF`VZt(rwq@=AW(& zHlw;DbAtku3)6BfqECz16-n{U_`bx*c#DZQL_revUG9f``D0YLDj>pc(5?=2mh9JKxX}?8ONqlOIt$V2Q;kEV{b4A^|WT}?o?BTvj z&2opuU(MIZJDP!Ba``HCo-HzEbXHqXzf)de{BQkE&)!3glr;9N1m58bIU~&ejPIPm zE1UrLST{fJ^-4sS;Nlc?;OSvLK?jcB#JICF3Cy5*BXP&@n`fsSO^^g?*&s9tKS%d1 zaSrG#vI5`u{QU-_4#;qE0&n=#LygV8#^G82k9+_7hfJ|PACc!}4us=dEY1$puyiXF zK+Bcv9WYCUaM?V8GI?^>&k|mR4o=`q{SFnx3r%M3Y>khBlCdURMi|f8ID7C`#lZt` zpy+CsBfDfRR51yjA2k>5|LfW(8s-btTqe|_8tx<3;E&9CCIq}shdSk>QAZo<>u6sr z;Z`vAhq9pDWgxU&FG8y3D6RLOx2vLL(msAumk(NmBvoFe>WM)akruJR1<7^^oQGP=BIH&tLlYf7%6K7PqGbUlW zwNt~>-zKS$i0*Fq^o6Qm9!94LH_gw`2GKrHQfI*fCGUY2Hjd--U>Ax(ROX^=(_0zw zDs5H9(Y#onL?IbF>&2PY_}DP6@#w!aG1Kw|`-WKqYyR5qP|2sEo~{Q0cbM{T87s7M zVx8{2sR#q5YkFm8(Q$W+L>FoSB3uO5bR%GCHLh?Vh`uO-hZDj0aTQX1PoSGdsjqBB zQlUfzQUyAX$nt$-J5a7FAgv#m+fN~hda990aHWeO9?;xR&Y#;w8taLq7j)d{(;pvT zR~CRSEwh`A!Q3+?^R1mLL`d+pW;puv7jWeWpoP*Ga(S9{-%D9K7b@5EU!KROx6sNN zL?rp#zYZx5-RqE36CO`-Dn6#DdluUadGV~*)a7ZDQrARdZ!JIR?yRw9h zSlOvvqkw#PyqwDnOs~Z0_@;EIP=5w>>!8@rp7-e_`IHJP)l8NlglI+*9N{1s;V;Hk zVu7k`P@sV}Ub_!6H*};1Q>t@p5=ET%tVwE1+J#HvT>Ebi`4K3}8B(=PLD&R+ujK9e z$78Br&$SFc0w<~YSZQraQ&RV0!z?E!R>S$;3lE?J4BthMdV4)-Zx4_r+@;1!f1#_C zlIk|pgM~x*9h0qRP_qhqt0i4haC*D7A~1l9G(0HHiln6w05LO`GvQDJJ5LZLs_;bi z2dx5k7BuplRj4!mtV^3)Zw6fRV$h}k=BUT81l%QG z=w4v#;(w@DX7|;aGI7dK) z?=!+vXahz01gu`+QSl3FGA>YyU{wTROHxm;+!UyL&M?&c%JzeHSf2tS7oe!Y`tq2g zkous_(l6c#f+!X5+6XL$+4+o{Xh1z8!|Usr5%j+FheaC9U)!5Xp>|5!tLUuJ&ufg{ zgS;;TO3D$%neKqN&5H@%D6YfCna~5Ph(T=7ogH)WmkT?gG7zupe0g!ECbsUMJD`45 z42;IDx7p+%j~Z&ZTecwx>VEpnFXZ%#q2~9@X?fAR(q#m@+5t)`zg)eMs(x(TFp&EJ z|B9I*_6)&FpeqQ-Q1-eMp9(yV&U66Zr|1wT63CsLgl0?a6ce|htjw=oibS^!I za?D4pw81VMB#m`yFV-q-eFrs-3&d|c;59$f^-+l$aCDsib`?Ye1iimJC91=UE_Vme z$xy$0)pt!dIxAoiE18@ONl4lyr{L%71K)c3`VYD^KjO_C>DH_^xH)cMgTlH_$g%~a zFzn%eE`G3LDapKVCAUBGp5VgXZT)*x@8tOB^rN@0lD-jansg#|0W4dvb-7>g5`=j4 z4Rk^z^#vFef#xR93Mgq6-^ayAWdSA#gWA*=s zz4wmi@_qltBNa+16_IG!D;ZgNm61(W_6TLm&h{!aM7FY%mAyx1DKptSRLCYPEBuZt z;-&h2?)!V+zu)h#?;r0T*ZcCiuIoIH^Ei+3JdX1?9_=LiKc@?0)Ym~@MTalV-l~M8 zhwi)9%{ymC3k3=TW+pnw{yp-oDQsSx|3!C>Fl-(u#I{2RZFC#x3+MUcnEW3Lutl|$ zbC_qmHq{+{itf(L3mRe|tVIFs%AdRO-;pEv z*esi;V9yS{w6O*O4^R)h!?QR-N>I@|IO-QivuQ@jXi?V%VDqe%iqA@VVbTQkAD9;{ zMDu;@aJycL^K!ZX@fTBuzYeg{n4#x9uOm6R1TI&^xLY?_(2k;Ea>pp6peSr*`M=iW zS8bm|e-?c~Ma^$8p@EapMa&dUt?O_Oo`KG84D;qDAJUiupC2iu?B2m3z)&@cF#p?- zt8~G>r7Dvz*lWl3g>z?c=I4*f2OF;(0A(_*wPO_!f_6zE^h|m7j}2@GhnNF~iaXi= z=3_JmQfR>P=Keq5xAUatCJ6)gbM45atNB&MVyZ562VFOYnO%TvOd{1yI&3em&tV#Q zy=z*PyD`svZ5F|F$TIrJjxj-sh8Y^furuq z=f;ok$TtH@C#oTv`hx!Vh9Z0-!;1HdAsGrx1B;luj>ZX?qp9>P&p(7EcKdpt>-=9M zbf+JO(qD<0{?3M&qob5jLC&pMQ`r1b%mE>GlrDlTexozqY1}})3k&o?a-K@a*qo({ z!AAtzn(K}u+~<*U^cFX63hemS#_EY78P#l)Z1Wzzu8g)(JDy zzbZ)s!S#?bLC4#5{r~(TT0)C@k1Z)AE&e4Hejw7x#ghJCf^|O-FEgJi^sl|6i?cjO zPFTG1qrOq(m`z3dlh~cE$poZ|G<$ah?O5*((I_JY;~%tA{ju|eHjEhROB~oC666tf z9i$rn>wq5kIFFE;?H%qNo9z&`moW+#>zK`BRMU<#jV4fxySbt)?jMCbWKxmw*jxVi zQ_0B}Vrb>okNrE2k?LClX*j5Jf9YAwK?4_U6dV4BoJAcOZpg4aK+4Ou4=ft0mM5Nb zayxVuIu~|?0ojeLe=UXsvKXaT_hAUlUo4oYM!ClZRBox=B!p&F4{8SasF4|ha|%WL0utW%QN!BGu0K(mwshX1m? z0+E7~w45U+cRT^>gMJ7yRX1^*%@~^};vnWu$u5CGjD?2mbW?uqjE$n~@7TFvPZXd6uwwHr1uu4*Bz6ruxC` z*a4i8FK6WWtGF#?v~(ZVb2=V^&@%7%9b5JPXn58jmrw28!5$F^dnkwOjJ|BL2e7t$ zi#7KSFAw=slql5npn9|;gB|LkyyX_UFtSrt{aW~_Wso(AQy;_sa&iz)3yIese1{wr z1q0o6C&_SuRnGWW{h90mKa%l+&$Zqi`?H=hF5yEcWCUvQwljw@ZMy$Go}KBnN5Qk1 zw)zJXQ~E&dt;+kD2b+H}cY-NeUYlA|>vt^bwRzZ%Dt}%GuEx7#*qh3$3YuK>zxh+5 zK;MRDTU1<)R5?|8e<$W&QP91DT>77VPmqFrXyZXq*rt}-+ZN=&H&`>adT}$% zHQt=cZ zOWFVJJrO2w!_p=eTI~CW%=pa}Ql*>dKu=M;ja0MD4=5&7&-$5UY#1uoIJz)q0FGSp6w&g5Lx4qr}dpVAzBf~f*G)O3O&0_SY{{)xHfcQHo$UsDhVR~N74 z&{zzoVJXU&Il1|}I5Dst6~?n0>xWjsLX+3;s5iX^W^=zN<_6ld7obkkQi-5%;#dr! z^lH!)d+CXs8j4euf0-w2h*2c+dzF#zfB?9Nxto5|5G6?UtQkZVx2ODt_%;4`=u(R) z4rPt_Lp%W#zkjuDuAmuUw1+UYYAaJzrc@3;){fp6n-5Z+A^KpTBpOdkqF^0zT5H-9VqFK-g`(#1U?a_U1+jDW+^=5Le1tCcuya0i1uTansDe6|xY z{}^G&QJT2d-psOW)d@TH^ahXeVPJyFm||n2RQRMA`NM58|0X=DcF*HisV@_yYMs3P zvZm#;P$im2<-kT?^10?$R?f2?Tb8-BU3nXgk8IpBaqDw`u`?F-e)@){K8x4sW26|y z0eHNr^{E6~*)WvB#XxxIhVXGeFPq*ptn=)l*jwGy;N$s&d-gsW=k4Oqo;tk@0 z&#!B?q+L@BJ|Fd3Y7ajlh7^ms2NnR`Fe~{E$6m?dKRltR|@|G_URY;j-c ze&+(HM!hPNTGgcSZ#&**FhRzGx%W)KKWjdo`0Xza_=k_D;)445U@y^)v?zWM#frEpsT6;Y0v}_LGN-|O zIA-DN{TB=V<5h_NVsWI3xQ*6k{Z*!vNqT(8h2y|Z8E?7Q{#tnq_d z<^LrIqVbVK$x*ddJIXHyB%$z!4)nOh5;oDclVxvtB52toB<5rHNbJUVVzbXX{KKc) zWEc5B@ER1<^U5)Sny73XH!HER%i~*GI4Tp`W6+IV4&S=-=Qg9E0KT6opy$DawoyWO zFt|aI+;y>I)>Ifx5@igr9m0OpKU*B9j?HCU`48jEhVcn$rj_pyQC547KT$8W;=Fx( zn_hGxTCrG`^uui4zhedZIT>$)L`4gw!!0I}l z#BW@{0PY=Lf<~~`C_;t&vn(Dtx*?)5&Wh#5{$hY54S3nW0)yd*nywfpqGdJ|^Zw+1V z+#R7hA@imC#DR_8CPPd<7r(R+~rE2tMw3z+_Ly2`p?+MF8X*msNKhj8mZYDG|B5 z&}sB7%;9PC+KE@u7j<8e~ivJ#oxxhD&>7-&1Y%ilbDebhrM=8555bxzz;ecb9aVM#ze)@0oG8 zRwQM~Q(w@+hzSSM$t3v2x;n=8=KqGQ+;Q*~T17Qa75#kEw|L~mH6(o*8Y;#eL&u1B z?zd8PS;^$TznhRoSS!?f|3WhH>=aI`_2U9OCx|{!!4wT&o7~>92k>y&4va}S{_qzm z9aP0IaJ0Vc!G}THF1+my*+c_il2;Y1UmsA;})rH(gnOgj!>@Gse2F`!fDBW2hgEm*w|f+phsIr$XyAP1%B78E}*YgWH5`;J5Z-0Jc<-i;X z?7q204F%-%rhD*Y)h3$vJJ+|QDi$^{$DRR&w&@S>j?eT6gtXJI%1(lh6 z=8N51&j#efokyBJ6Pu#(ugGVD1+XKX%dV4=yTJa2W;ZvRb|Q}|NV)W!b9LJhHt4RB z?oGXkeJ0o-?CgRa7WvzJh)RLkbg6Ve1VR2DeEs~V=!;w9fs3fW4sk{?z!NnyUB;XE zhrSTJQ}G?!M*l*D57E2`__ZJAigdea&h|aEwbIB%17hl>JT$;SsqJ7cyqm3$Q8{2K z$*&N7-VSP*Ag^hA`;GehdPm_Q8rsK`Q~m9(Sl16E;NBCg=3;($?h5KF1(xrDGom=> zZp#qj-P4sh_t;g$=ZS9hiAQ3DhY7Q2o)JbaeEJb?ZU5Tg?c|56^^RjF>t`>?sjJVw z>eVJ+ZYM_T6H+D<5foOD5~6lt;cWdwj7QjJ-gxVR!uG8j_;715_kzLZE!>y8w8L3$ zvL408rF7l3@ec>Kn{_?|yainbSJ-5YyD{Dg&rv>s+ixFM%WOY_+ks2i_9b)k>u4on z7|+;^U1Qg_hVw8RJ|wdL;$>`H!TWHj-yziY`-2eg6LWx}rM z2ZQ%)&iUPI7>`mZwi5pq5l}JkAwiXA(}z)%P{4dlu$(D#?=}_Ts!Rxopq8yHacqk? zWr4`80uu?+&0BA2P)gKKo*i)CBrFWf)oYhFIn#cGis|sxGU2%$D~3b(9~T69FJ&7H z?cMURvgN~DkDg(K4>i4g@^lwA=q@E{QX60Pt>yeh4yGf3nMK##Oxz|;D$u3B#zd!S z^A@2n)?04vce*qf^E8IJ-%&tSt^0k z8?1r6UF$4L=|L?vya1O0zM6iR4}(t-V*SSjLEih&8pf01yeHjxiwJ0u{e*3gvRV4i zx}w;C%O82D>k@2J5w2Xpq8Q89&w57pdxoyA^gynBi|%J^o44GqgM=cvZ~`A=o@DSX zwd3#tloni}y@nAc##pxhydcPP$;i;=>0ReH_Xj*Lr~)5~aX&$Ug%f=RF4@M<7;W*% zFLJ1z1GG~p^u7Ex|ESTzt-$Vzn9W=Nd52@eSMiJm4{ed?*PQN4&2*?eI4PL8qQ~f7%K~F%ozgvh0NsIvqM`%LS=)1dz4GA z$G1r~!bJFu%=_VOUUNn1!=}7C8_kK0`z{Xt+VRO&(Q5~t)i{Cr?OqW?+eCc#5PnJ0 zsUP%c^OxT6lKe{;-1IOReo?i|$_*5M0GGoZ*A#ayUp~kfZ=>ADk1)iUJlqoJ^fve{ z&LER*%+1RWjq&32V<67l|3{qteX>|`iVkC4VFR>- z^f0eeyl{Pc;{+q>c%uHGQ228Cbyh-*7w00p#{#eV|63iGaVBRCexH&}soz7K z;)HvGQ&Vsf0`U`3&eyrY?rK$6M<3vl!e;{lo!S$i`3qqwP(o z1YgxUYricseqH@XQwe!5;TVkP<|@9{>b5-bMQncq?Fx{-|Kkk(zimCp;-0kYzcKem^Zko8L{Gy* zv;kb;y5uW^0Tycev|!uRLs*Up@r2$=>eOuWQ!3&wB~IMT+`L7I1)n*L(i8c;KVaC7 zFsD1B$P3nBGI!kn;N$-@D*~Cgoz6#i=_Igp|KzI9kL^*xmFpOMHQ%)M8OzpYbOkVM zEZ?~N8Pn!1H+5Km4$%{YTf2D|mM7wO;v*Wi6s}SUG3J4o7U#fv@sIfw_RA>xwIq~# zVN<$7Shm{*(oj_?LSaz69kAA|lK_oPd}1z~!f6mk*~fJ_&wYIDD(!C(*}RM z^Biskn~*ebx_0Pv#GBxu-MwulQ)(eDy%4gc8zEyo&;3 zgXEJ{(38!dQXyPHi1C!51~?zhRu<%)PWGw;(jQ^ayRe86i|h^61)&|(pbPsecWiff9WGjacKM9ppe!GQh~+ zzOACf7}zB+0l$3RBr6~%>4gR|k&`LXOL5!N16S`ZjLEIi_c{Oo_K2-D)tkw>^=afQ zpFMz!Ed@;X`)>ms4-pZ4J^MMsp3QL_0pE}Mit$OeZHl5qU`$uPMqdO6=}#(d3{y=S z-qJ1?X}P^?-rWOwD|P6#J=}cJ8Vv7_6UGi8w1bCyIlmnMMPUk;lfC@6Fx)bDEME#X z<5=z!yzSTKOTevKjwd6V%z)~Esk(&+U>%lOsayJ;3$V?C=?Ll+jr{*l;G!@1+XCm; zZ$xC_!=g2H)tf>Ges@y_mhFHkxAVjOI#^3>jZlZMXUC{!r z6I+j<4ghOBdLwzedh@4RAe74YU0NR{uf{)}(yd1PrmbHle%yIHAzZw$D_ z3b&ZLS>il5*G?OeIX1F;F{F4IxZ@8z5hUe1kmCV8&L;nC5|UCBK6Z|llNv*MUxBw? z$RA-ivH4R8V5@w}ZORiqECQNTky!$cb|(+0ZaYPhwT%xV3#LX5^tM*5aNDN08Wr5q zBm4Gs!;AWfkX+VK2(!SY+K9^wQ&de1)Ryb`rGZMdV5UTn-o@!6H zuG5)qAV+vR#uH!P)nvHgg&&^-M`KwF^fS||3t?}HlT_U=1J%m(2C5FrMxm}OL2suR zmvyWQ+UjupAXc9UMpE&b9l~eo%kV~XX>>YO33qg;Mr^5Re|(3QfrsE40I78 zR}px9o7RNiLF1IqBNO{?%H6rK>;WjfTn*(14Y^n3q!j{?GD_Ta51W_qPx9VHK>>)% z&9$vb?cY_^uhCDl8MaheeHd*`8gwUC!8#dJR^sGvULjFFs28dS1wf0d5RFI{?IPaU z4;QD?R-x~VY|&jjWRmxJLswpVo#GQSY)>_LzKB$~l8Y+}*WI6}oaOQ4B$q4@{8kv8 zvhQqyu1Dv1_^BUn<1|~s2{zh;F+|7xw|Kj;bVTKqKsW#TPonAfy-@1NVxo(^d$z;f zno2t2%>8|bycdfuUC}~AJ26SA2wv@xj@Q;k${9Cf`*)4Cr36AroRHqWT$93j#|(o@tO>c639k($cY0*bN-+%+M)(W#NDO z1$;XTR+hQCtr#k-i3T%;k2so}pC9~GU@6M6PwFTaY(d{Fz26EO*;2d+Hy%fJ;W~14 z6=OdlfA#}JO3Kf|a$A+4Nf5ZaI5Yxk3O;Amdwi5uwj)7*7V2w=t}fSCKO$;F3Ew%!m;B(C|y=Nw+59yV@!MOzdTcHd3wF*uK9C*mxu}O^`EoS)gkld%?>tbU;TsE z6jD?asHDQQ#w_PY6{^Fy#QoII+O#i2B?j7;*GdC*Ek6tT62XwURE2-bbP&R22o!9D z?92b&A{UN?b6W>MGp^^y^PxQ3D}^LQCE_07&ylv2B!dsDD+}UYr0kMuoe;oHRyqy! zOYz1WNf@-Tp`Vryhe@)-^%BQDm9D`aWmcL_-EZCZ$Ld_0%?VCd7Fxfq(l}h@TW$eW z_Ee7--l`)t=$zTpJXUBk5dckIR*G`PM_cST=rXeXy!Hl+cl8!K#44tAiRMn$U*=(g zO5*V&j!<^S-?ZoT`OcGWT5qu9_N>8f z^P`o!RVie2M{BsDJr)F%1=J9)rEvLq8VX@}Lpjj5)6e=VpNfJ+m7H3KT?x|ZV6%~p zb~w~n5*hlKCOAcvudg@ztK6hJ{ETPEFPB3l6-FqHDwWA~C%OuB@uM)tv)g6`wt<{>$k_&R^c{ zq17mI8<}S=Hm$pP|*_lvnwF)Wo88RK|vPP?%b$K%XF_Zdh zkp~Bvm3z&)vY#hwmi7uIm0pMo;V>bEPK+stgs>P5$hqnJab~#jtlt?S{AM@)r2!(l zwIvVEiZf@}JQEd?#BvN<2T}|DYN3@HJrR|J72D&6qwgj!8h2%nG(o{m!Eo86`GJq& zs+qUy=wNI6IZR2?-u~!Fn{*@1CfP7Bfyg2~1+j(>Y}++B|2Ni2FAT zzY|!uq^gUPav0NqrqBO`2;J$goS?%=l=JUg$EDAkO6-Sf&Ww%ymk92RrR0fCe|Iz8}71`E7q=edM&JRS0qJGK?p&{d z56_#A=*~U4HW=YnMKw;2xGF#^>8!ts(Nj4yJbFmn#F|%0v$u-iQ7YWVC&cayT&44mPIK`EFEUQC z{tC&H55Syxsr%ts<6KE9pIqFBHxXSKksgtWH`1t}8s#$v)m+|T{IjHq4Uc8}T-Ke+ zc6m%y?>|OIV>kQFA1o-6RujL>ks^Be1j?tELlke-JJ&B3d^dR|7as(RucAT0W0MS9 zL|Qgo3rjR`^hyC#^*^tishepxOAUfWp3C7!a=$EFwjY%wDg13^0P2JJK;0=>t3yQH zb3+r*$igp#-ALriTX(OhPtdyU?2R;yKoD`YB~B~4D?iAE^BHMFNukcMK;~`E2ZwSV zw^-J*`+&snkh*sN?(9%~R#Bea?5*qNRwGRy7HzAcg4M0XiBAP`b6Gz+GIWA;vhRKL zJMYPO%rETZ+SK}J)nrehzaK4Cy^s<$RbIHzwx~Ln0Y>teUAA<*my#>5hd$EYqvm zvR;Zh1yG|#qSBC(%BZb!Fs*d$vCAqHhV>bWSea6C5-{^_cZPC;dE=i7%BQ@>xNmi4 z4h}$x7p$RI#$aKH9PBOn=B3J8k`aW1+S2K01&v+hZ^yBO$xrP4*TFeSX zICrTiS6zfQl(VrgG<2Nr=LPnLrW94qSE>ujati0>Rf_GsvJG455;FE5rO|TR=hzLr z=IeX*8n?Ika6`Nwm?6z8P)9fXD4qPJgjsHDG2b)p<2{Af*aQa*O4=iPGVbps@QagN z-%q>>%@$`ylhRJ0kGV+Rr=wR)_5ra&($iwrl5m~Xeooc-X>)_n(8ur?&^R9V*Cg~8 zb*p^Z8C#Qn62x423f8swi(i5~)E! z$~J9c?@YjxqLL#PCt=rZ)=_E71mcwS9wVvN{+5!}AF2jTF^^7iS+L2pNA zx~pW}@)~)zJU@2g#g)Dq!3e3>Q=}z4E^GG&)lyVTX^QRL6^ljgi$Y%>*2`nOlJwUN zltf%??wd5n*ZaVd?dcA8#skC5A9d;Aw$Fv4!~u)%v{obJAlyXo2nkajPYfdn41v^0 z;HRgqs5J%el!cMZ8AWM`o&pHYsdoy=T@2!M3S}7AJ&+SLvPf&!3d0my3I1 z-dteTjFN=uNis8Q6m=vlf9ZjJpflHI z-6=w@a+j#9<~qJPqLfZ1C2#wa> zxsI(DVa}!-r+u;T2UMRU2@DulNz*9S11G?{Svk)%MgMz6;VSev^5d~R)vW`iw>kQ| z+O;02H`maC`F40!x!H(PCX~$}TK<*n`xZ(1O!>sQ?2()o0-C{l3Bt|~blIM@fPP1nleLG+4N?%S z8MuBRs^D^5Oox}Zl4My7#_K^@sGvJ9rtmmTy3TpMKzoY7Jp_omUd6k^Rc z;%ONQzAs-T0`$vuaN|3d%2QD+xYm*P}aMSbA zIIr7FHSJqUnVp4I53Vz{E-OXevEMEE{MD8(1Y(8`9!~8_^`S~?y?XEzuNIduy^?&Y zTIbzjqjshls1Un{&s0rKl*@K9tg(;w5{vWCyDzc5Y_s;`7_@pq{Y>=;$C>J=k7-xX zP&|n1M=RNv8!0NZ)qdyq8}dq16Uf0HYy9yHj9Ame$>`R5ASCF{3LgjtA9$~I1W4GL zCnql6A0;!6hZ3ub9)~E7%Tr!WvS-jJdS~WNk&@Z;N{+gAL_B~-TUGB}^YnNpGh-!E zYr3h#>3e%XPlz=$JF35%NiCo9y=QQGQ3RA$gVNS@-luU1Z1r;@p*=#iMRI8*dTxXw zYpy<$zx2cN*@!+dnTY#yMJgBKB|~$a^M1U0RPPnJXX+by`gBohpsB^T4~NI~j`T?n z`GPIn-v{+kkFblU4kRtRQ5&nm5T5nHxkG@IN!^i}+XnVflovqcA9-1s0XW5exV)(y ztbBF{@^fc5*ue#8j{6vkEeJ_8;R&DG7KJ($E>D-7P*0MHk$B#8?+yS7)q(WEgYz#0 z`h4(+s7TVf70x!ciwD|Yg;MiI+&?}%XJ+86F-mv|g|e@hxt7X!9n3W8RxCYcKSy=9 zkB%k6zq?MkZ)K+TV%)BWAHNI+^VERiyBDzly?cZNaK9L)m0lK%a9LaOc`(+Mlk_2! zv+qa#oBNvn!d|4xy;=K{?s6kNOe{W^IoDn=D`po)+PcpCbUgH(K+LX}SuOveBiLYZ zlW*K8k29&A1N$fY!&pbg%V=%FKAWG5=HM9kjST*n=qAI;gKCYmG4o4V^`MrFdyA@B zBW%kmX5*UGRwaegI-xwAXsPq+Ig>87Ot3?JzD@Qh(|^u3?NL!oRnt2mkYqX0m2+_* zJ!}C*DpM2685w(|6}k-joDq8b?BmkfiX~&<-SNaesI;%9lIMIKO5~ra4Qs8CbFZ9@ zOP$WBWK~+`R)1>qETYV7?-O9>!+nDT)kM(>&mFs$W{GthqR^Q-l~Ua(<$Ih@b_*a8 z@FT?G+tQg(#0S<0C&L_40Nx>&BwfZq+YvUz0{DcBsCzect%KVTVk4696k>Zr7kY5| zF{yLco?-v)bMdy+b1qAd!D1j+l_zs9$32pg7j6^Xwj$swJ<%CA<;_bHd(ldhP3Lab z{Fu3tZTy5FmJ!%@CEye&LB$}h8oDGh#8b9p?eTvF?tQ^I{T9I7zP}xRUUxwa&4vf<{oo~=CPW`nNL9IKGmnJ8Yo=1+VA~T2enifVaj+QV|Tyb*#}PUQOD>NMaJ4pzE(mz zLRCS52PN&aZXZAJ+I#k@l6IE_0fVZX2zuS4G;k;zYXPa0}JhVwA=Pi{b(2KL1U z%~Y(=LMmzwY9sQ{DkkR_Ess)mk6r-V-xvN1sdx<*x9(IgU#@~B$PPx8p0~|&9!o^j zm-FT(J}n(P+tS|yj^8H zY}oS4r8X0Qs{nj(xBsV9pl!bd&Igh{h{r?_|G!s9$vZtLZE|) zzOTaK)&?;~DlG%Lqf!&ZNVdKVpxJq*RKc?8?E4xENTXcGK4=9l(wV8pP`Rvei1cHV z5d)O!%pGcs43)mzc?KF)`RJ|l?E_~=)p>RPM(s4LX)vJC${Op?7U!I1$n0%V@hNs$>OduxubW01gnTgq~tncyq(OTzofs(Di2sxV2lKQmJrg5r|w zbhb+zh4ln3)cn??F|jiZ04kfw=QI%#O)Mj>Wyx9?`Ib>Y$pNnAyNb(MVmO2%`M?5h z-R9@EcnKs6;NXz@Ja%rEmjDJ(h>PSA;wmwXtCTwPo!~HjruU6Bu^KdKtjhae2v?<) zuuF>k%VS24s$eMC{pjRwdevOITGS1N?7{8Jh-*jiLa?t7CB&m2K)vUyA0taCVb=rz ztkrEH0qRxvW7vXZ+98q#%@AlXHsSyZ>6Kc6*v~OiUJ^e0Y<-ckyuo_SGI_LM9|hq2 z{Zz}SItMA0FGA&edL`-a&Q0-CHn3pzMe-~-J@WL-nrH_X<2lblUs+`YU!hmSLv@GM z`4_C_Uj=JbZFDqWTUFzla^-a87@deHNpW1lk3J^a@|y(Uzi9@z1epvf z*)pe~YHe);X-RbuW8tudzGb3u?3BSE0QeEdRzFOeP&ntK9v{7MqjH+y5g!;htVV4T zwHY>n*4fzg0K1O{UwulilKt#@bw%oEuwtD3c*cRa3m_)x0hiBvlGOg9l9I$Fg(Yq% z+lbWdQ>LCLVnb!x&yyC|OsKF3G*SN}srwRwCpP`J@kA@L*K+I!3l+xd#z;SYmtU{i z5U9OP3iNhIX=G4B$TiQ*dl$<=lVr{wdnD<_VS)?b1XkhV@UA=VZmc;|G65j4FX&|z4b6<31psYzPi5zsbTiG5 zwLhK8vY)4`_ATcnCaBaGp}J55vgr9>JhU!A8&x;-9EKWVf zkAIwD;F2OpiGe!KYbw3onbCr~4~w$;^I|c{JAus=TOrm7-ay0l?lZ zyF!&5LssXXmuklz9HIyU8CX#|d9-LY1)HdT%z1mxLY3hYP zrUNa;-%1$rnsiw~srby{9Nl(Ow!2au2iez+7d`aC93ANyxD=|VFQ<8@U?l)te1iFQ zmCqFpvtHeh5a)xpz!aZT0iQ~*%I8hcc{jtfFZ={pdkwkkA$~#H>OJ@R#d%0j505eb z#;|Q$WH4$iIM}96qUmEQa!HdQ#YPD+Tf__NR50uo&@^Ny2$B<|<4J7C!zdF0_Y$1{ zLFzesW1`$)K9s}64@|yLlvLQF8nlYHX*LRG*7O~yp30WL*jPgdy?HWRyA-ZXPh^Al z@p9P%{ETDJcSLOj@bEIOy@g5ka*ly?3IkQXFM|Ze6I1WnHpz&*xauycAj$SA<}~%x zaqskx8?xO^52=ZsdVqw5Wo_PlKYsuSD;}o4LOL&7>cajFk3qG5gez5}_%*BJ;+@)p zs{|~I-AUI@*$LWAe$pQK`bySvyyLp~E+fziO}QpK{nkC@U|m*QIrfz}=|P>;nXjHH z(9O0tZecAY*RM0-^;ZL%R=>GokZafW649xiGV*DxpIw_~= zk-kz0VzOEdk3IU4yYgIP!xMjNGfCRAwAjxcw6OQkDF%OP=?fa`bItb=csyizQ871@ z?9;1zLAIZhz>HwE&*qAX*O}-oRz+~7cZa&8h4zp1l}7eaIHlybytWQqybL1EccMEl z)Vnd#sCaLY>ZP=K0K#gM($;Ky7Q@pY>c_@|&V3;NhPvTdk=d_sFUElnbQJ_f9~E7= z4Pw9+F3lI#E;#JKFw3mL997HHlOWkrG-Z0otz2!qXOjSXd5K*@hbXEw*3kDd$}E-_ zPHiZ@^|yN>7O>ql*hpTy2!;mlZ>%PNhZWM66sZw(L9aVE1s!y2WerqqtWKqObgTJz zS8%AG_Ui9k0L_RD9KNCz$q+VWj{_%ea$EIR5J3Ebs@Z4*Ir;ej7n|{O%PW6B24z+` z&TyB;==WbWn@6vJJf&=q5w<4oGb9Jhij9uYE^l^-S>(}?kJ{vgi8S_(m&&=hBh)*L z>m&Fwwcb3){Sw#|8yNtmAc=U!erda_=SWf0DI_|?ECAZ<7iW>IY@fXwqPX5jheE4> zU$3{mwi;&&=m&8M;XwB{2-~R_uHBfZpk?v9phJa7-6*-r(zI5$>_i`19;;y=`IO}+ z6C|W<&8|<)V4H-1PAj`>w=k(b16?XZUjybP4eK;$tI<kn)H9GIca9G{jE>_!M= zlevt?h!S~y4wq{tlj`MmoaMgZT5{#(i0+0;B$+V%B#P0y2(( z(*k?D9BROfL&8R9E6BnR8qUo0@fo4*L4R93)kqrSz5vysHS zh-%lQpxb}^(mvJftB)1M%nj}P*ZZH|CR){;%C#62s}8IkzvQy!IMOs&+91^6zq(F7 z1uC)9Au*jfJnP*ImB!PAozTHloO&(Gth43xyA}~B{=WmJmhl& zf+)J+PX^5nR{P(8u*}grr{374CmAEr$CuHK0*Cl?Nxy-7IYNAcb&b7nl#8^Y!H^L+ zC{yA}+Wr)`0RvF#!HlYxGv)yB{1x-OtL`Ga_mFd)S$gfoGO*ra^lFJE`g4Be0c2@V z{PuSH@_;A6Qq-?i%RUpR7>A#mqERenF;JyF>b&;Tj`sSQk2Vjz$u)z(0$1&wO|{5z zUgZXuMO>u)sZH*Fi6zBh!_Lguxv}kONiO3P-JEw6UtHZ| zx6tz{$QgjIAh2WhikpqtB5{-X(bmDv+hZbt?xKp3|ta`M^hH?MmRe>Sje}81v&wV(^{(Ars=NrC+`M7UkA;1Ik4N2~RSM#0jzP$7E%OF7^ug8Nb6Md)SLopn3! z{F7JkDR}q34}oHHd;~1|KG(`m0$7}K^0K26M5NEEym;H(@)CNV%jOs= zG@3`cl)ON2gotD9O0?`5f8U2%of)Wx6$Mf>0!{p+$P(oPfs@@7cDp z2YdM)Rgs`M;O<9?H2QW!7xmN7AqLNocWRgasi-dxK&0(eC>C59stxYL#j;ZwtdXuA zsP<5ZZ!kU;ILiCchSA+W~gKV|cWtDeyo4)b@3eJc?V8(dqcjuw(L31WQU5^U(iq!#(e zTycw~fc=n!uA`=jb$-|i;SGlDDmfLYDau)0Kfakym6ULHIP_X3N_@3*S^Rz&;)Y7y zvt0`Ejou|_{Nhdxv)2ozIu%a>51$=R!ZUD<22ClqhV!|g zQ*lY-B!r?^-PL)?`2kpFOj}A zgbsv*CIn~nDc>}+SwRezU%V9JiPaG97uqcVo%8&zzqt*uI$`$1@lp3n;*VWpjrkXTSC~YmWRfKgRj4G&wLm`%I8}H^l$g3}zw=Y$LO7mu8;?(}s*y`z=oMm`&ux zE^P=LG`+l5zgPk}D*(S;(}Rr$ru}mnW^(gHr5uoc2E9c>RsH6u?_fUM%ZsMoVv1eZ zcu0b(441b6Sf5B#m7b>#@^L}XY&oiASkkV-eam0x%&(oDxRHC+YjL6Y>Fj0w@~-ol z&J#DAFM*b0BOhD6)it15bH7yLbaHtsa=zX(45Lx*y7R4fp#@-`XetAu$Hi)?@guPAXzT)6B{`JyP%3htH^X-~5 zi~XI(Nu&CufJ%8P_I)=g;cPPdlz%1r^%L7>pR%7z6jjg;K+EmeDVLw-XYa(65(hAA zeq1Ck%Nwa%;?CtD_ykdo)RC!ugS9u(ozftF+4OqeGiU)8%=1F98(X~0lhO>URS@%> zZ6$WIRNbk)mSWJaY`iO{IXBYGIs#fS3t0?()S2UR{`rjd`@o6MpdmdnCNn&kA&}zl z6I)4P2`%GAec!5ZRXH?3IQp@o#k9$?L*!mANBRRxKboV0_U1Z<%AqO@u8@+_T|XlQ zsY7P~SIITW8*Md0;^46+#UZ%BxA4k+jrySmE1^@=_m%% z{jW2iS#a2R`f1dS5Z#*d6k-$bnJWI0;(& z*;}B3&w=M>QAut~!kIMUrG-RP&HF2Ar=Ul@K&qqwBLvw_zkht};z%Tc0FQw7wq z&<}cITAO&PdW^s*UOF-eEbNNDsGy^s&346$H|hItaTXga07AF_u1;KYQIVNo&e$w; z^AsCtijCE8jP|?i#NEi(MqM?=PrBGh7Fh~dUL$q;Q*xeHO;zJ zr~A1KD`>&VY`FdL8AxK3Y@r&NVgfZhU7Dxg09`N(a&<};zj|g`4AONk%-wP>e$|&} z@Gx5D#ajy)B~IN%8If{Z8G;o3yF#Jr&qOsBta)w56Q&&`qT=3og5V?TInMM`2BhNn zf)6aaG$_7f`bwo!(qWh0;BsGL5BJ|&I_EOc1<|G$V`Za2GHJ~o7lPXzdp@$?k<0k% zqVVviq`b>3jZQ z>7BpJznKS?aWYNep<L0{mX@CxJ-ddAQ8T?oQr5l@20OjsCN0r!eu&8$DDJbr!Ct&At#toLWPOQbebVWBkU7~rU1Z;1ET=%S+5@>e6BQYxg4}nsN)Lxg z>XWhRJ{(zKN%4W^rfrWimmSb+&Y#l&1Zv8)kiJQ$0~WzGNM1UA$IgCs9nxp?y0X{b zl1K&mW~X_WX46IIY6(a$^C-b`dg-v`^8ylGI;)bdg`Vb-xf!B(suR*AbO?1fEs#n% z{Y6K>!-WTl3UhD17c=O2oipK}(kYVWy_;?qLg%5fKb*; z^Abn|5CfZBa;UA?Zv2ALcR?OB#qkMR(U+JR~~5-njE^?zXV?$v%88 zG+p!J5kRW0;&l%f^o7rO7>KxNlTCTsCwsV$)BPMK>u{Bvv0nU!09#Fks$!F@}Xd<$nl<3byd6ZjLIjY>=5TVLU0eK841kM zDcGwxudi}GXVH#^B*ynv&lICOOIFLic>;qZG zowChdX-M`5mu2r${#h_s$HIMBO6&5 z?_?yEoiQa%Zqs^N+7>TOVbD6dM?4ty0*xYSui*#e$l~Yf41{5uUX<4gEAYeehYQ|6 z2?3o)hsN}2gao8Xjm-_03qM%a^AS@7G&v_Jruc<%t*?ex=Z&x{LeBNe$I>@dH(ZX~ zAZ>7CsOHD`iSOJ^*~R9v#w-4L8M4S^6I+Pl=ooG8k1Rp^b}cm>NTA9}a95Q5`PK6x zt8oWQqg@Ak{9|TFbTq|rV52Ox7{qUWq4I?q0w%fr2Ks7A3S4`h79DEZb$#|5qeTBD z_dFzXz2EZ6`pSDEvG&zwLw_<(QvgjWNY*C~Cl~~VzzmRl{@vE>z3rn{xGMo<>Hd)> z7w=uP^i8DMVKjo%>~gb_ik&Duiydcp>DuKf)fZKTPc^3Y9XgecGhRe5Vz+Drcwdk- z0@&i_=OXSUSf~9f0Qb`j)Kl{b#v;YdED%PW63@-&0jhui8@xg+G zy&44KT-uw(yKRd?e1#N?AiKj|er`M>@zNXl_>BFvZbx!6l9It1=;yf|pYC;={FG#T z&fb?j#cQigFV~!=)$zt~UJ6s-5a#z{18VON$uZ~el0x#h|E;=^?o(-;d!gg;?PyLp z`;cD8AbuD0B0h^0HFT<{m3`IZqF_3=SD@HVayNTKTf+6T{RSOh3^##g((+f2x)wQ# z3z|m02}^4hQtM4^fJw*r!%E|)9=txq-WXqNL?V*Ht{cP2?2#n-d?vT(hst7DIXiR3 zm0jo#Bq!WqC6#l@DF5m@gQQ4vX!pFwPYAE70T@Ai2bZA7P=Q2x7fJw2rLI!0@qOPe zBs(vh$5v_WWNwn|OSD!>s%(iPS9c9Z#O$!-BIzlJBjxMsSG^h`H+VpjtSzbGEeV!E()gx3kZ9 z+Lq}cry??Ax#VN65E6*k?aSoV(a67Z1+upoNOOOfj+W?F9}(BOR*u@e?;xwf65cJZ zwPk&!ILjfaXe`SVi)`ag){*u!7nycH@1#;@oAJd=UI?*3rnW6ft4%hkcf8-t^jW!U z1YN_VyK|sbA8f!eiT3Z1r4|F`%JhTD-r`#Mg4@tFo%Ple%UH@3Ha3d^5!&m~ZWB`p z%MK+TnqZq6K7YB!3s`dv>%=>c>Y{z}6aasYC&IWK$85AiBz zb`M0lq@HlO>_RkHq*RpXFha395*j$rJpN`%(Wnf$7lXanhBlk z+vTmUM3oU}e(#lWKVCf!e)l;lT_Pq6@Sgm1Pr?Sm$129M2Gh@vzTHJyMrzn@UUb%m z^XYLqc^N?4dehkrTWC(O8)o;9m=+8kKYQnEfoNNE#*MUIzzxS2`%l5iiGGiccO1p$ z*UdgV4F)n<3^$zC$;UrNS5-Kr68Xt&;hkCEEF_Q0&xyA6)^R4koPwhzDqsL=tDX=U znwxJY9(?OQYcbkV%Got6iD%#cs&{yKWy*zwQ{7Oea-~9?oyTrQup^ORgm->#4WK90 zYpW|n;&s!AvHV*;+#_som0p?|^y1e6uZ&#&^ZSd&2*$YhU0XQz;Oa%I^|jryS>Ifa zDLYB0ah@?e76LLAkkRLZ-aL~mrXhVd8Qx05~cFJ2n&4OXM;UvcV;Be#1OwjdF zPn&m%gNuOV!;_X8Jvq4)r#~1v);VX}0+A=OK)LL?EeAkIu<2RB5_?05VQj2MvHgkm^`Xc}AHbSV9Fyafbm>cm zNXaMY>@NJB)+cA^Qp)$4cSNyIC~KV-dYIc&S51CK6gnZT2s0uK8 zug;j{6gPj0bL_h03>+{(^7aKvxRjDSp*!i7%WHR2blfRcJKOPjApGOknbqfyAZiV5 z?@BcbZA=>JVn~?)sqjBQI+v)As(&@&5*om?>ZV&8B*}W~!#T(pR`?jU)U;@4zD%y^ z@`Fk;gy!|WUWKwb%ZZvmNYn|RtqDw$Sobutwx9>;fzN1EUyG93O2hocIAxe)&$UA*{4!|fjIsGn=ycp zhx)UoJef;;PVw14Rt)kMl-s@P2(aC=81dkbxYCfvylPR;M+>WH%DKic<3!@=d~hs+ z!?;6+?am2FNtYWW9ODE?!osVjoCVVP(SZ}_o@>z2@cauo` zUD@(V_?kp8okGpx^QH%Tho=gbOI?0WKbY=GbgoYf?15u8%mw!I%>8_;;WyIASBvLg zJUGR+wj#y*uyzYRF+_5PwHQjrHt6Kos2~CUYOMF&*Fs8weGG6 znJO4E?SCON8nvF(9-X@S?vZ~JEMwF<*=J!@%bVX`YAkoEzPM_qqbV0_->{h6hMx}^ zyR6R4aYB)0bHv0Vz7pnf?t`OintO(ZRWHyZNp4pWpg4rM7!%;36x;Jtp+eNS1CbfL z10`>w$WGt&_o+pvOI>Zbv=>rcgUi6}JqKZRXX!K-WjjFe`>F6(YlY(DT)rqJ$Y|Gg z+xtVxvJqn%1)se{g!8gN<@9*O@s#lH93wNFCq=5z3l*^WhqF z!*gJB5i>iV4Q5(p;9red|Ec?PPic=_MR>&56zzyag-G+T!sd{n%`B%beTcp^hlHY~S1&#XLeFtxt6M~AM`=V@=$%z(e%JuW_{U(OA3-jjy; z{)6i%GpqK#*tvRjQfyPAhS2HkJ#t>uen!nQHL&d&;z__^kqBYq@{7M^%#4+CIc7a2 zD$+ou)754bY!S6}zWo1(t+#-xa@+nuC8VTLkOmP@M3C+h6_HX(8bOin?i8d!O6dma zl5UU^K}x!%yBprzo^$X2zVY4}`#AP-9Abap`qrBBSCfk4mywm)2~&!UHye;+m@4^? z0H$o~tueQ)_DLMJju$R^l8em8RmZ1g?zT0MWv(lyM^SidwXE5)Er=APN`4FAPSF>{ z*MnZ**;AgZL+~dCi{@ml*h?AB6*9dkBKrO79avT*3pA>NwsiK}wtToR>I-zgUopu$ zxAhf;DszK`(+66o@yZh|k@f?P#O=__lLF_w=|ESY_MUCiM0z+pURvMc#X$k9!Q}BC z!(gU!+UyWeLvOx{E??B8Y-XU@K5fq zaHMNDxalN$X(g$h^a{{qim+1h+hD9{sO4)k47jWp@3(nl?2q-MBLkr(3VYeDew9o9 zHQRhzW}Lxg5wgs_;r;60$yJOmh2MAIp4X9_HF5e0YL8L&^a4%qtq)kYN-j=*V;LC# zb}>;*z^2cpET~%Jny7Q;nMcAS)7^D_sa&+QW3Rc4a?|Z(VfabD+|gbqqtp*ri^07) zYomkGM<6b&PE~rIkTWWvNi=M|883jaKfeqleYu>>pN)IT%Gu1H63_>!9R)F>&>3cEi(K)D zENlC6K?`#EH4~v(>i*0&S((yU@C~7G%u_LRk}^mAg~eC@@a6VfaEikElT!-`418dC zrnm-HJyN;JeKV(dF}SU;qddUW{Y`$Gb7p>g|9#Z%!UNurukQB+Tu(WFoB{e71%fHC zUx>KHFB2Wt#R=1Y6ZQ0ROA6UAe|4IE3@uayOhxWKWW|??aqB-cOeO1x1wZOdW&`Sd z0Llvq%u*40=3yukdJYDcCJ@JFKi0CsukkX&w|pzGxpqY4M%X>z9mfICV+n0M>~3ri z9BM&veMAHMXTFCU+9iS*PumB#2!W{4QT#TLiT5734i}Yrbc{vkd%e~B0(AMCxjYk$ z=};^@2gG}sXWw%7kL1^vv-iheuEqd+>=Khoi=WT1G`R0;f3f%CL5JZzo&Dts=INSc zZ~4QmX_E369Zo3DAl(hVzhZA&y$DvhxSW*hYgxk|l-quHjSlA;(meIP?{7%c$?gL>@dg zBqIjUs)B&5?`4Xxk0fDKGQOH;+Y!G^TwiRUbiGoEL`e#$EiYrPf9t$imOqowV338!DuXEWbit(#6% z`hzrrt*;*2;w?hFTzu#~O5@QO3?=1K+c?qt|4A)?SV!Vtz_DEKQRV!P7NBPbXOTA{ z(;RP4+YXiJ_YSHlcb=v$){GBur)(4;d>(m5>?t~U1MD9Np*s!0Ox7A}a6K}lzm!BD z9k4zIz|uf*Y{zt|S38W5KF|G?%VIZ4G&w;}k_apPi&nnG=}p7p(-`Yeq3EW=&(PMZ z{VsJq!~F`CUFzZ9Uuk+0pY=1&^|?XtI?w{WS6r)^_DlwFhlYg%XvlB7cdk$F-;c0e z6^zTST5FP(yMt$=&;;$oU8wn@_wXoBeWgD?qV3z$y$0PRsJwa9R3xs{rKz=&@t9Q_ z>@0Po+y3Fpgpp-$(fupX4kSTldjNGecZFMv??D^Rn7^qLQR&pX4RY?=K=*JO{~qK# zFVbnzo2az(=g&~4AE_`eH25C#CJv1$;MM~wc`ot`Gd-YUz@f?8n!M)#T9b7BUajc+ z3Bs;j@}{~V{Y02{##i79J#jkPG|fp(X+w1MtA(_8>X3vqHnwM5#u4VVd%hLo}J!_^7JAFt$1HY;9pUSdK(3oQDhJq*VtCo{KNa>>zX5JO8oasYjv3>x>%<=lj7yAgJo7 zWQTCbC!z3mc~naIe*#TJyD^S)!_ss}r{fwp^U`>HuaCR5@H@pbZm03Y81DI4>t>M2 z7TGoJ)8!Eb5ki?T<8*^pDSxMX4Yp(XGJ{v`j60K#O9{~2WJP=UQ(d`Vy>37LA-VOQ zhqeo-E}jYUwevwZPaCG&p|^%?|if=b+bZKWeLC`zzzmN=D7{F~E2@N**?xe4Z%d#&nmH-SVvc3$rDJY8?;> zQ~{xOH?{mZ5D@S%bu8oT%@&ZQPOq&TIZIBKt(GY#;Yyr=i7d#%gS%$yaDVX%zJY)* z`uTNR8)-`Q-te10?^otc%dFZ**2^pvAW`Q_za0kUCoS}3rL|lj=IT%OJkWOqh#7&O zH%JXx`~t}CT)A|YYUUrHoPL~*>-Z3qW6jTUr`uQUQ`ESa2WLJE+$|dmb7OHj2PB*U zl*f|W@!pv3N_i@;rW}_xl6z8;#(PDke{n~0SuX}XL631eo<*k{T++Od7V#E2?Wj8a z>|4LT)TXV+s2Q(}9w*|SsFwi7E$-$xJDslNm*+P92L>OU<#M$f5^Hzb$jgUT6MXxO zO%7JMpshGAm~1TwoNqyN?=I0nu+#QICMYBSQIyjEc#iOGky8{wfjSP`YYMw;za~2b z@$|%U(0YhAqash*IBlWhZjQ`z0Jai#9EJess8pRVFGIj%KmY%c%}j+&N3 zlZ(O*P_qxCXua}tbnHVks;o$P%mtL}gl$(NUy?HA?SU&~2*z;30nP zEWc(0E~DvH8ufCMa5S96Q0>gA{RV)V(yRg%1_FlEs>W&#Rt6;0%1v~>isrlS&+=)z zyNUsz#;WaNIfn5^qxLvhqk6yed_UrHF9KpsI;~JHfs;HZWB8+=0*4C%*YTl<&3?Nd z+xHSI97Th#wG*Ol-rtM{w^46)b*k8-EaM+%HLUsgiEMA5{rD_9En~F(4vy$geAW!( z-!y6+UIh?8jLx?8wJK7meB6*Uu(jf%p8UvnVC$9I1xQjAqj%zS@K?4BK|mN`JbW1r zw~G|d4Xb=bU%4l~3bgPKqQ&<)#J#C!4eD71q1j=+DI20X14{Y6~lg&YPI0oTsA-RaUp8qL{fn6hOAnwfCcl>-N?3!)I$?|l9j zyZZN!Fj1tf7ku?fmNp%4nz^U44quZ>oEh3uFwNh!C~da54T0G-?5MDvE4~3vvp-Lj z;|HJc@eGP;y-O9KDMOX7?!1&r60S2D-Sy8H2jV;3>EMc~euYRAL=`$K6=0b#9Lz4B zH;iVvcn&H({eOV{J5Tf6cZ^?GwV!ns0_+t5A~Q*V4WG8W_^Kdca=wsx`Jw(YV+}CD zAja_1a#(_0aQPn=f*~8Bj;)!WNNDCqoXJj3$-Z$W^P&>PDcs>2mV;dRGNgnA;>!y>L8TnHjkAvPmQ0Q!|QvPdZ$j-CyYrZ z<@q6*^BQ$dO6>YQy4YW%yr6k8Bd`6nWxNkt&tKh_kh2}S>wMJ_uWZT+Q@qZ`T|gG>Py0vmDyT35;zSbso8!^a|Da4v&?Xd z-!}^OD3Z}C>#TS|$9|0SWqVtrBoTMvas0r^}a*_NY0h+1xyW*o0&YVBop760L1ig}d1{Neyf0JJVLv>~l|JfGigP_?GD!#Z*` z_+&pYGu9DKhm1R3X-PC%Y5CSgB)=`~mejghlh=no@bp&HW=wxswa>bY_0W-!&5`~N5j5k)>+;Ee1em$|*}NX77~igo1j$0}Q#v`4-d$R)Cul;0T^_ z=c|2H8UnON6o?q~zTU?dc?fgKg0;iHNfM%2pP<$w9r}sPRg+^j)xLb2KiYnvA-Cfb zeeTXDw9~Rs^JMm4jtNu4SY}2x5R(Jv_FrzML3|~t0{Ge#sByzBWFPlwI#58+w;7qj zv;V&cIv^g1w{aKVbwvJbEOPEIF(1iv%-t-%aN#;=V|9H|%J`9Je2iyv6?1K((z&G@ zY9nn=*r?PKPru5=b%S*O{&T{hVE|==wm;erWNq963;AURB=K}1P@tCUShdWmB_q3v zE&m9A7iatjj!$D8oaQAE?2@qR&>0%RmcMerd6{&g;3)f%PK)o4O3Qf)e|RjJ0iw)V zxPZ8x*hIG+InzPgQ|^SC%MS&Y-wlgo%vXQCczmj;vXv=!IFJ?WRPiP;V9mz*NwVt= zyZU{EQj(C)LI^^?sY*njCbX{KYeytY9U{v{*}i~Y*`b6F$5RC%yR4}(KdS3#@gy(;6G7On5uPL6sef~Y?pj<3zg_B~Ocf)`$`nJw!FGt9iqA*7 zJ&hi_e0HiC=EnGLnDfuP&E6SM7kbCrvp}u0(7gfZlXCzcn8|2babFeRZ_f9+!NiS7 zluE@jaNPLz`oeA4W^;m%?oE2r?V9)Y;^&7O0_gw{)ABjn{|>kZNeK8j#>d<>q5)Z< zzSx{5GvoWt_i^@@?R_OjIbG767bo`xSTAIMenpt(trp9a_4~IvIW^LLviw;j?G>n; z+fPV5R0;&;FELK?UA+M&YPAhIK%p_Dyk$=tJjecx57?QE&q?J~;u}WUaFea=Jp)t{ zk!BIx3w!LLdS*Hvqe*6kqpIs zeva>-+PKRE544^B4Cg_IqA0hReD@F#7q_$Lp@j>IJ-;v+n3)4J^gAfTb_&xNdB1e zB!^4Clep*@ip95CI|Z?D*3+N4kc;oWu@`Ry5;xuLV(IaFmA`hz-7STeLNZr#iVvC` zVudI7u8NPSW?m_cpIyx{Io-M`We>VYOtTf};z~#33~eZHE?F-3 z_}h!3koQR8gJ3^0sa)@5xVlMl*?WYUHEj;CZh4MwfzJOjm*SVRm3D_IR z@hRwv$B&<_+n}hi--?FCt@`_o-8QktpBaWF7Mf*^Z$=rrb$CYwz=x`b^rEfAzgn_Wq;r5I)- z*0e^xQchE0+#U|&pz++7zDCYf^i`Z%AyYaEkjV#GArVMBdRayhNcD})B&t203OfPB zb&Mq|U{w#gy^}~=5jAsNJ7oTc)Y40TTSe_T=W5^2rPI zo=bQr>ptmSQRKT8BADq>5MG&r&9tO&F6%cZ~kW4O(e{#8&Vm;cXDLP^ z6IqjTjURDa1LZpSi}(Ven?pe-!;^4?d@)#OYR0{?Xm}wvE6Ua823>TMaA6k;>T5&e&>CA>Or>Ao)YAn*0TY+Lfm z@ivuLOH;Z6X^2#@?o-WJod_ZBaRKBzzHnB&02(234Ud)>w`{cb|8kHd(_QW>?z_j- zf+C8Tk=>|Imc1tsUU3NVW5ByXD+ziM&8}mxCuHb;j}g>;Zzl>Gx$g_GYN-~iIH@=d zl0o7#{%bg-5hXzp{q4#J#h%VwrCf$UVprYUa;!Ol{{G=iN*a`l34ph+WU;^N`1L0S z5)>Rq>zrc{VM_Eoo6})oFFgf*^CEWw4 z9cPpKJ&VEV1Yi_V6;n<>9=(Gnn3=M;J8=ng1`=S>SsAMPF1zA~csleS@(~fzcn;Ev z7R#<$=qbaK@_(=AF{tq$_&z7kUmZkzvuf+Bu*a~lGt-t}ldZ0`)?j>EEQ`;~p7k6V zNt)($fyP)&|Ea@TK2iw;{KUk}e&hhv(1w&2h8P6w(St_alJCFhRf63 zo2*)QPPj^0JGGqi!$HQ|8o3Dm-RqBOmnvbH-Hf8#(n!g|4RHB?9M1J(MVAP2&=%-Z zqK3qyPu!+sx3$_ydYoPbS<`WZhic`e#isEPHA$9@_>`C>?378E%=MXDoy-M3BqSiv z;lIhvI#bpM^;@N;y19`aHR3aFU-vA-U#k>|6kEugU!lc4oqo`?;LUc!WM0w`#|E_| zI2H$Ska(c>6Ceq0aXBwSR8Lek@vy?!v>9&cPQuwyo~=2DQ8@M$@6={P(QE#0EB8m@ z{AxYP@d2Zmt|mEFKgLji88)kAdWFm zWliF(5N3qU`+9kpQCL{3xkcUh=gxWon3O-!NywVYB)J}vnrfIdwwK<+NCZ1`0rPaX z5;FOL<+Tvi&;IB$F5$oLsA3fDdy)Fxkbrx`6)v0cg0-ryeG;=98b&%0=UA?Vq%Wmg z8Sb8@8Vb24*FcW2iLM*$S(?51PwzLyRm!My=3et0*Y zxV1Sexl%K$6oKA$*x_@u^%c-R9ne!_^W_VRP*Bng3WCu%MdXPpM3v^v+Lk6g&GF6x zpE@fXgWoiiOU3JGd=+=BWDOSAGf$^lOYpg$+oRt5Eom>+g&BSg^Y}A7E5c}9Hi+H! zzppSYOox(UtW@%-nvnT@-FW$R{N-Qw8SAFl7WYzB7>|F)b1-;{-g$Glun9!&b;d!y zcBnhkRf?T)`6@G~w=K?(*`V48lxjeXd4gZWIz_^Z;#GZMY(viuhXbV5+uAe zynd_io(+LmWGQLveipAD0WLpOx@mUoN6_C$%V}{EP0sv*?-I;7%HO7x@GOv3VegOS zEow@LV@-5gz5hcU!}aQPjTM@~5akEl`pIrCqgAHkb`@FWu+h@Dp!qQj*OA&lVLS0FXT29^Z#8mb559e=svk>0 z8Hei>1}{+>g6%&ig)bD>B;7e)X2eCio|a-Ujl`)#`Nmk}qChZ06BDj34`+ zXGC{ME-nr`8IutARC=xAhy7wVseHqwPz?RP^-*F`ggq$O5(;o^Fq{X&exQ6Z{%2m*sb~dYqpk+y-LSAp?T>I9~n_u`PFpK%4%~uO#+XU{chJI znVQGhj7cu?&_ujjo`$H|rnBc)99TgRo@apXo#QZFyuKtj6!9+l3 zKdTsNXCFvPOWV}kZ^}c9|2>)|XWp^^_!T0qPnvdX?3)6BYD-p{-_Hc4aUDQO_`0O{!_X{oNjP_=pgpsmNww#6vv; z9Ruy};!~l%b$q|oZ)B=HL+eK`A@&M$M$rn*GTSZ zabk7pT3?Qvat+a$ApPi{X_6~={zbQ5)#$bG=|ksj7spruaY?M#&FJ25GQF4W25l+? z6bAMBc4K*nvupSqut5p+lb}sj6jFe^griwBbAC1v&o68AgYg}P(BX{J1Q)cStZxPNJK3-i}5oKB+R#{rCgB1IPj z-$haP_LrMYGFY@V((>5pPSsY)gVw4-#kttH85JSZa$)VmrjN{_ZifbH5yh$_HM<1QO= z@uUMhj4;}@aQPWW?_H02(_2szHL1VLgwOj2`!OxY>;qw`9m@9QKI+|*$|0H0xP=ON z5x68#3U}c2zmF7P>k<&r`Tt(ySD3u0d?7gUAy@VV8n#R+qa+Eq`V@ZZxCvJ0S5%eL zx8(Dz8(w?NLo=yD@Y2;2gAJp8!Zrks8Yzv57+)KN>I!lzRDS2FKGf^F-GDKav>n5t zk1P57`J+FU6|-so^j%wIr_371{FJ}6f4)3TM{^M zP}#xW!|qaS4|Ze-NC}W)it@&3ee^W2nx9N{$k1-m1m|pj1#wDq*aQYqFq_w(WU?nP zk{j3N$6Wk>1Lus@BbBB2`x~Je%6xf?jcflNXZQxFf&}B~ ziZ^o!&m5R;%L}#G@%Y^#-Tkp~WBC4W%rf$U7AM+4&o}J@jYxNR%UZf=?=t5lE(+%1 z$!R`S)V=dj7#>4b2uJv)S^nZ9rxn|QnC`>-33j`puCCjBDRY-^Pby@ZxKm;@+I%?hc3qSw^@*T zs58hnOn?5W$yx*T$xVC(-gao@GQ_-Jf0wQt1m6UpvuQfru)TCjP|LjR zA=>f>jNfqUM58J(1`%ar{RFaB+9v{NEJP|yM!WJO_|O!Eq?Ck)58n|eZk-@f-2zA| z7{^-ntOdPoaVM^{AM;Ha6Xm)lx2Y*tzpn zZRmjSN)8lfu4a`tcrP1AyZL4|wK9Wp*TPhBc@vs;q^V`%>RHg?k|L55;K;zRqypS zflmr5;4RFTdGkU7hj;&{MqBY|uyYOM*v`vE$w?LM&@!qm_ujNqj1n z6ksYe`t|&4;?wJ>LK_$__KwQ10&M#OINNFAv5LM{&)E{Af4V%z^%!{Gxm|WtzJ)H* zh7JKB&mcIgH4H@?m4x{?^#>ZZy>?k{TdQy2L-qP!vL6N|z>PxB&HZ~}0py4;0Mzj< zug8z6Bx1hBqVc4v@7-W_IOKAuiwgaDY(8-kCJzx78uC?)| zA@oh*&F1l?yAD$sPKoa;lrfT?L4O0`X@%>lR!QY!*=BYYfnUS}Ce`XpVS)u z;F{q4SyxT_QO5N3%ROY%rsKM!XAYWyg#yjj$<0PiN9vp_Ak0zK!XeCh=H!0j72jj} z4$I2SY*gH3bbZpYzV;G5DK6TG!na#fkmuMnH2PGe4G?pX6DL|ao`9;OKRf)O*S^#V zxGO_^Q_Ti9&90cjH^u?)d+kkaFR#A)orEgjyfn5J?H;q@`gj?r4j zh?@@Vt6drw)77EDY@5EBFc@Sf_!rgb-lmX0vcA% zJJ7I_c`iQscmMjQC=Vk>rD!6_hfhj&=M}LwZhF*5Kiz~u zb^Z4Yuu^f5TtgYR|`Z7Qbb3b-@qeKfj-4Lc-}pK=7`SbmMKp-k(n6DA}5V)A**PJHphHPai+_ zg2?Zd4F4~U3Q<|`p}U;#n@^FTnsFY-d)}6J7bPiZszdJ``}sQ!a4_l zW^8Bv;CG;H7^Ct0-c;sWk-zW;pJ!H*8=Sz$64@zOAK5{+lHzYi6Q)}05I%E*Dc55p z3$Us`@c*!La-Eg*FlFu8E)gjnTqtU{rr^}CpB>K|vc!?nS zh!$Ix+ifvsV`GEGS(S_3);Mv%sKP&NaQMLh+N6bFDc&!zM?AcO?`cmf?&VVa(=c(@xIra77e7oaVe*! zb|J({1i3zhR@nbr0sb9xAyQ$RmeO*wylVhs+uXEEGtY(Mn2XPUHzGYOX+=k9S> z%!sZGwBI|mdX3-8bAyECrl9?h-*ET>A8Ofi{Zq2&@iA8RF>kV%pj+#ZT|txl?ABb5L|PTCjS9KxQDba6Hz#p1^HU z*Yp6cqd+$ix^BjwrO?;cOB7-Wl2sUgzO<%+?fV)%Y~LO%6hB}i{x9N)A&ZRvN3n3| zd0cHNuf)^&jcn(`jgUk9bsCx;&qwH2T-I}#vdt^bzwIV$yD>O5i0LdPC54fm6o<$? zV#fsJfr;odk9h&ZU=fxHZsST&4iVXbYyJ~Te2m#Z9dl%I0SNkk$op0H7SvA>kX%wJ z6U_S4!Yip};FYD3;cW9$ja`RTmUJfF*T~oVf7GyKo3(dVMx0s9oCxecf=1!<`0-<$w+_yC?$>i}0(~cX_xnG)F`Y%jcm9xRu>^NN+efJpxeb#K z^Zfp09r_tz1@|vitNp}^i!UA><=b9I>$!gFv8>nRb%M5oHnz}{(4KJu)N{I?)V%er zO3Fr^7Bg5APsw;LmjtLJW=@&|S&2&~-fbE*KoGqrN{KK6KxHrxYy9Fk-`0y`8b4`2 zF@!xU2GaxdAgYw5*VrQYM+oH+Mb_j(W2WOreu|#&cDKLA`*r{Bl@)d+j}q7i(+k9F z3$w^H9hLS9M@Ue+4nD$8OqwN)7eJ>rH!2-pS|2sXCZDJMjflvR!ZJw=4GB@^j~%fT zX19)Qa%v7AqefwQ4oL;2%rf6^B*-QSXJ}X}J-(oQ4F2Xp3R`(Pv+4SW^X+nBkfN!c z`C4cRp!HAE@w|N{g{mk@vb?JB2=^-Y07CWyx=Pb;-k3e*ws7=K5QPXBuS& z;n>udRaSa9rW2H23kG?FUBG}TwFQ^*@C@yp04V>$pO8Lct!eg^zds}P zV*!h=+70#b0`^+lq{*SLwZ1aNuxOUN%Ttr$XiC*%&qA}XUYo3mQO}}uiEbfX1ym<7 z{r&hg+GLWoUq9JJi9_^0UBgT#Mcg-RE_7%f|GQ*qJR=D#?AeQjIe0`ef^40(aAFd| zCaTH|eHXhPrd*xX)Hvz~Q>}3@-&{M!rYC}JEd35r{7omwd0JTcGKHOMmEO06m~-NC zx_PG7+AIV6=pIR(0A8Z8j(EkT1*ssp z=RA%Eo1&*1$c(~ZN2S;Hokxx*!B6BDe_IQLY$qJ`GI*aY3*Le>8Pt@YQ#kd9X{4T; zk!5Sp6Ek}_x7Vn#QlxS~b|0t|d^7z8iJx%eRq*dSi%?- zSC9%dGQO+uZTjEB$E>~4dJq(-Ml3T3a5GrGBjrW5y(OFVlfI+x&d6J@1>e@$ZNwrH zqY&`5aS~BSIA3XQMEI2BcdXqMSFvUOtX{r z_++?&9i#O*C^`xL_>A*iPHX-(mL6}@iWB9Tg2G=90s?)Fmr;=3LP%HSSL5G#dZ$&( z%VAL7r)&vq)zaM{8M9U#`%8MX9^f-$n9!72hCpZgB3JoYMd5hGmVt=4BJ%;V0*719 zH$jK(FaX(Ff2*l+83E9C=!m7yuGC11h5IUKUjh)k{ff+Z0_bea5q+L|V)wKE!LCJ- zwTLWu7iJom+R1BlDfd69_=WIku|9oi=i{L-?(kq?qP?|U%0{EMw-UaXBT^c%mby^4 zuU|k1Cexr%A|O~_)Iwr!cL${G5aWS$U!$@pVbN>gsK~I{B;tehC%^-K>@Kh&4kGm` zrQmMvl;jO4{~?_KFVxEYVCXfRifZ7cT`NzTj9{CjZz8FKU}w4E*86F*3?e8>CoEdE z_Xr3kp$Oxk`WY3B;9_VVVjf4Rw6mGFZ&e$1dcOVtlm%endT42>O~wE^ZeGY$4As?C?P^#K}VNTb%DtLnhBYqpnF!!3TX@j zxF0-?yZ zn*=%&w;N1UgPuHl7AuS5cM&i6#o)~jBEY%xQ^w}f{$xnPgV!wnp`q5}XuW$dm@Ca> zpvK;ul(XaB)Bulf9I?i?c$99MKSurO#k*9^`q!o+pjUg8Z~ilO2q0>{7F&$_G}$k| zgMhkFNTSW!#3dSsg12#|tHgSVPqZNtaTiqVTA4y& zNX+4Aekje>1wZ*+1Q_*=~Yg`B_ZxJ)`2BkettJyEf6dr-8x* z6pa~pF21?(TN{gd0#{I7gj^NiJb;FO8xOk1<97ZctpDA$Y3|P8FCj_|3U~W9j{)C= z>pTVn_nQx9tLFJz`7p3-ytbe1zUjjM^TgNZ`NYCqf4ORAC^lH?czpcGHSFd-u}HP( zO(j)kFoo(;;t>bS!0Ox-Kr%`F3|WqfxynNuF$bGi-<qq=V)Ai4 zakxF-c#A`iA|1!A2^lgQ1au~9h-?VEl3YelOUU(Nf=qs%Ox~rYm_ld=qjl+A2!aOW zi@&tU3Z8Gb5R%u3Z)en0usQDD)Vr{tY|bRR=!`nGY@{I9o|~I+U)eHsU6(*S>-BeM z;Mddc39b6SEx$1U`VM8E{w>>&*xQ2beDOTP;z3?ACxBBZR5_}DBcS%nr4Vvz9W?zO zkhv$uB4FS4;`00?O0)i0HxQI@_Z?9LxMM1ehh?E#O%bA&{|*7X*L%e9XL%){GJOZ8 z6Ei7s>W{V)Pa*M3Zy@uhS_FXZU1f#^sf@Lk%{NXs!xe!ppImI-%K#kI57Uo`sw@?q5psz-46$Qr7GVO_^s~J(l%Kg12x-`EcVQg*+$T2IT_sflk^Bl zIz71Vi@J>$vD2yXwu~{Bwre@*J)Il$ekgmjL{5BI6>8vYFNCG=R*;rI0Ykq&wE+Tf0^x(*u*a? zaziH|BSG_I(pcK&Xl0^tSRF1b6g+**07{DJ=AQ!@RavP_>wI z-7G|>fieZzqECe#3gTEoD7j@UtsnjG+JD1%ttt2muSD-N*56IG`#jcDn0Dpy4YAiy z`Dp>$Gbor~c}!7<(cZ1fXuPkIlm@WZ((^B2u>0a*z(B&+XP>I&Gh(o{4Ms`p-~UuA zpD^#XnzeZregfdlLi-6S&t#wrBcgOr#MD{m35{^%16^A3YM`^I_B+#jmLe)uBJ_ii?ZcEs~ug z-dFpEy@+T*>mZ<;iU9oT2KP?W1v|H$K2va8S<_=UE8nOZ&I^(-e=el-NX7f82HjyA zC93XXk%us(3McN8HiMfZ9Y|6wPt)IdRqNvwa%<{vUTkdNvPUKm{NZtD+WcBx5(OdRJUMLcv1c{&w|H<9a(s`SFXt+R=?t1{KX7JCpMut6Lu?%|y zD0KWd7K4}6H+`OE*MU}-MI78Vsx!6ohIj8alJZZ`eB9*>WgTm=MZu*!Jww;cgp3pe zWJ-Gtz^DM+tRhiuypGx1wU5L@Dj>^T|2eCIFK?4Nmv5$sh9dC~%M7+I!=}e=8F!j6 zO%k$?KVdAY*=@2lwH!smje~!g^4jljm;HL_U4*M{L|yAAilOHymi0y}G(x}UNDe1k zG(4tGWpgBKS2?9Ih^Nc$7M9f zkxD-4`I5Of|-{GxzyTk^NHS)sJHX!qmUzQ>`q}O0QYT(+Y#ZSa64ULY{(_eI-a%p z1=^*mNf!^UCWasooX44|^~vFC2h878aVZXw5yVD3cNmHY+;X+6*bM&5oxvlxI27s+ zD@fIojf6iYV?GrV++slfRm4!Cep4G2^P|zdXpe6=e40}PoM~y#HMjuiV z!4pl%TJU|a$dNp*vd$kuPB12@6g_1IMg%v$#y!>5P4)ROWG~4F9$19ro`=4AAD+f6 z@LKk9;-O}Oo~y`)NSVHoVI2Z*yAC&wT2TsXcLNaO=wG#V8~hN@MY>^I>DD;ZZZ^Zw zAe&I#c|kiPXgOC|E-6f(VikA$(HjrM5TvxS%gQQ2TAjj-z;L5a_>YDPr$uR~EJ;9E zPE4WkhXUcN1RT-1b<>Trjbj`D3nsrd>mC3;sviE3R+(FVx1gJ9#iCkel>w>UzlcLz z&+3UOywdS1M`j%J3xx$JP9|L!`NpzQsVv-BA*nK-4|W1cgfqY(uD>OJTeTQyCYft1 zhBJ9;3^l*1V&qPTML!0FAvgW7;ko}n8cMMAUpLbieT9OwlQAj z5jhyQ1t3b%|Gds4uThxeTl7@f%m%fZX|KuOFR%~zXrq4d{YIAF(xI)_75no>a^&~8 ze5V?3=jQ48S+txT#T_aG*KoD0>o;X6H&0=08mD%{i9I8SN^>|;?;5vj^P|>Ldpk>h z;ORqrS+QDg1#wFwT3K{NXrkSxdw)e4USHq|X{tSq7l4i42;X3mUjxki^Pdp2F67gE z`>JTU+U$E^j>+flc)kI>+j8718FsEyf3#srR+E@DoYe^Z7|+UxA1WtefgS}X6OyYC zc;3rU$m%hRn>}fP1ne=ZP|sw-Bt1Z9CG9dTWkXQ3!BVKF6uo!(t+701LeLxY1TITLVfQ+ za-?^%dmBRY((*5Jp27JS4b%@Qi|c`s7lcE$uy28xfRm&ylGj}YcTNwbQuZxT1EU$W zMQz(NM*+V9C{&&}|6Zi1oF-=F#`;zp|2W4^0F*wE7V`5E8bv;A_2zu#U1F_$hA>T< z9k4e;+^C}KSBXvfo(annRi%sTu-m)RoPJgNmIDa}9kNV6xE-RgVv=EZ>oG+nyh=qd z1Z$2Zo7cOXJZu+5;jQurv=&YGyR^WUG6ZwROPIx|v+~5(^5qfyFs_|Qqjk`^8u(df zaL&vrr*{aE6v~NcrdA#B`cb~7!=R@1;Jm@B+4Qi>Y^K!qV6|5xwneqx#?YA2d8p3R zUZV`ONEMo(V!S>SjV(fWe5 zicpF}tOfjd(OvKTl4Q02ev#nUMcZDLTABWXkdiuS{WB4Q38@~4msiO4R=fARs^JJj zf)UP4S7f-AFl|z#zO3?*>Gh+MVA8!5 z$T}FKzkq<349mB}GLc`o+>HJRLC&6z_z|Ey88C3-D;tG18YTj>eU~DW=^uP*k&RGf zFpIqzIPTAog3$yi(WaC*GZ?>thv&JRiJcpAdQ`m{2CIkn2NX zf$QZRCS26#0!j=-`b|Yfjt>`9j?pllhe1GWJmJA@7<#9K(3q{mY-Trz@C%g3@Ts46 z*$akStOCutu6@T@e*w@h{S6J@wo$+9m_k_dec-N2l-7K4*0>~$B~- z2gv+~x7$Z|2ny#fKU}-+{_??u`8Imet-e<(quz#ljcqY3VUpjomfuo^6YCTnhhbXLq&rY$k_jmqF%LYWt z<~d03L9|sw+eGm1agw!}+%5Yrka0yPT8wLco$wyE^nbMguxhs-A`MCQBLs3V5_PHq zWwe7ECT|$_%}s5QEzhm^N|Lvv8xon^{u$2~@#8bBmPp|2G7Kpxm|YPNwQGJBPpK!$ zcg7@U&RflIIL_co0{7f^G?a3LuoXke_T0gtQgEl0=tad%lP+P1^RGRc!th*^c|ix4 z62?TZcF~2Nt6v_^<5jtw1mw%i-#!92bzi&?MFEQwI;N88$oUk63Sr{laA9N@;aVhn zWQ$UdNKgH5<(!4{3}Ml#+`r3^XfsaS?R?;*K%}licLJJbdKMO07}XXhUuHPw4kL(R zzgHUxaMY272;PGBBWxdei??1KSdaan-W}G^M82PzmIgYV_pwFSj(A(unX>3h`DV2e zgq))xh*Z@WnN}JYP<32B{MS%B3`{A&I0by(YzM+#p`-XbjjG2*xF2D(o+NO=$v3~e zaZ@pVpp(ovpF7s9iS9+zekS~nKxo8n3}PWV;NP;AJlS~O0)N%>or`EN?!hUPAP6Bu z(bm?BA!jH@q;4V*dsxnZ-4Dg_Q?BrC@H!)KVds5o^sT3rcTG(8`KE?i#B!yBxPm@X$d~8seUe+v z-iJ6b52X&S!*UoRu)YRct_ZCMYb}%Fy+YTWa*J6;H~CvX{@EZg&=99*3zKySY}1C4 zGl&L{=(w1GmzkOQ;g4T!vD_PPfBtOZ1NfT`b|slTH5abrfGya6%7Q(x5rT>D{8OK# zv@d3g0NZ>7S(269Y$i@3j=PcN{tf`k#t!&2dBB(yh5P{n7;|;Nchkl>r^U)@FAi80oRT95k6#E#&z}I_XRq)); zN5hRCNfW|AxtBzOmuuRjWyHtK^-#w=tjH`Yh?>t3M%<|+HpFp*VS>1nho{9(|98gG zcq-CdTOvRApWHNB2~GhhE9)0%S9GU5RFTyh|$O#XmCf*}a`SLKc-r+D5-^ zZWW_em-bXlYq9f60+OH(p%MBYyoV(lBN4|TAFQVHglkW(S)3put+~XmX#|`<{cPDV zA{LKKb#-~v@a^gj_$klMlyKYsIkAhxJaj3^!$y3dkjzGGm4iQeI}w(o|GYe|`S4-z zF7jS(((KtON{~FS#BB zDhE0!Z~j@0h^wfJxQaV-)~oQrg&94FT7gv5?SS~Yn-Guc)!SbB;x-Wx9n{ixhIV_4 zGLT%DuJk&zK!6*c&fe%*ru3ITkQ3`mix}j$RmXFO%kTZBjVxR_oBGChD$HY2q{{J| z50s@7UD#`&&{9eSWc4Q!hR70)Afk|Ckp50>zcZ}uZrCFS(WhZBS>R-pS?B?}*AkHE zogBS_bfcvrFbe&B1Y=^y9R%Dbc)FYxdwU7uIg9@6!b~$`Gst#r?MfE&7JT59|4+3o zk1%O{4f%`$7YvmbfpNGXsS5w!a)f;f~iJu$q?;l9H-|>3il-Z^JhKZNE9^eNXTP6 zwYs;6#g4e0qh8k4cP*$fVSKhCK+rB~O^BFUr5vB*^Q8j0z_RCdbFG$uvHak5JuQPg z)8Ux=UjqMjDKsAtEex%w9iq-VLcBkc>c#{@>%S{+8MBeJJwdd+s%}cvk5iK7A%+PC zp8bPfw?>eJF;)2`Ypr8I48L{oaSVn-LUf%^wTPaf& z+H06S;9BHKG$bn{F{8MTNTmONSbGy_ zsK5VzoM{x1#!@QO*dt5Hk|7Psnh8bLERn3)Rfr+lWmht#Qj{eLDJhAhA|bo1Wlbq9 zwD>*mZ1nlOzn}Lx|MNfRea>4muX){jpZB>xANP5k_69$Pwl#y0Qr@0?_^W})A2>vw zlq9`V_u;S4MrtA7Jkw5lD6^9+)~KtXYyH;wcp7ZikiM#WCIJe%7lP!h3Nr)7mWx0> zIl(@>T*5vq8NYGg%Or8Z`if1rp|;Zj&YkAtwnx9em7yvR#*~wk^L;RL23o8q1CmFF z!p8YeXlNxaW0qjxc~*AogHpd^|!$% zv?`I+-E(+E0`M@$R9uVH4t>sqqX;GdRvA)pg+#auTZb3ZFH#j0bYiv|04G%SX-BTq=`y+Nz4@L`U*ry!3Ik_| zz93WLqVo{M-lLK?EJ@zZ+9%;K=$fvU2y4hmhywsh^M?5)EG7;PK)P#7vWvqZsJJey z=v6pFxZmGjz@ZZm!ssAAIK12A`r+7oDqQO%k2@x$aZvvo+~P7j{VKfcJ$V3iV|Zsj zdzh#>Gc0Co88y>CFu8$G}L%}5Q_R&!DB0nS#lA#+5FZXo%Wh|XSf`*5xP6v zy`Zz+>0)sqSn_K#rL@{e?Vx3 z*pAh^zvFjU-jU;oK!#;07?#G9M97>w!&M=gYq)1q;P`prt1s8hD^YS34z+)p_W5zQ zclt}fnXhV=l2q)b0!nX5YzhL$hkX^&7IkL+SD02$94CauKfSRhCtju9NT8nms4Dm5 znW&0IdrxQWyi!;HVXR1U=G$Yf*gI2F7)Y9xB_;0JN{BTu%^lbgEa)*`;`PrW^>H?5 zRl0wqPkM6;6J+5K zSkTHVU~+kvEpwCB^$Dmfy=IvFt?fj$`#TY55QYoaSgI(T+oAg}c4C9imfMTjZQmXq zle-u4J-<*o-B!`{q0}Dtj;X-&8}{Bu>E-9&e2yLhV4Qp_141Up7P}2y-C}!e?S8j+#KJFB$f`L(a&57W&pJ>x7hY zu)F#@ zF}Rz64%!OlU?Rwt-E4A^cVO%+;ZJPr!6mcxktVBVxmA9u?oK1szj!QEe-iG^DSW)l zo3v1X7cdp1!&E$A^C`Q*&?ZJU>Ys*99BRG4XZB(mSu!b@`YBFmb1&JAuj|Kfa*zb!b8y3K3z~sQqs`U^7mlszWQ{LuVXzw!_}3q)D^mH z;9WWLcCy`D-t)-mGu@6x4pHgQBp{(YgpX^aEbvqVDKa={;P%ZvKn6BGIONYNFmw>$ z)=)n@Y95?A)^-}s^F?E0!A+9R+w=Rp3paZxe#$r2t}Mt9(lfF@Z?5v}d1wa-?cDJY z#_^r}zCBT8r_C!kq##rIUVhN6$oqH9<1-xnvh39lPPFdv80?RU(s(xH_284X=97g^ zn>64cb8}}>?eATp`TSBYkr`q{S4xJBAL$(SS7Zh9w}1cSsy~He>$x9j1la2vzkQFJ z{<_kjB#)WZ4H=9Ne<5~OLk9Y^TtG!!Vg%qtKPcrNeL_>5Khi+VA4)zC1@Rm?1AmWr zZBwHjN2K|0?7SH6MZ(F%nw6|weol99E#Z|(zy65NXZgsc)->hAxsOsOnr7h0jWkJ; zie_fIeG8B+-icKt4_yl11DBibJ2X>rBz7Ww6X;mcGlH`NRpadGFYT^uH86hAJC42T zqj}l`0&Ff`+;_d~ET&ejI0yzJ)PM(=HYJQf;6?`Ei<69TaIrI+ox*qpvapQ+wM`A`Ux~Qj@itQl z1yAu7Vzu3SF2>`yxHP44Kq7eha&u=`P<__qUK0CBLMow>Ah&Y=D&7 z#s}QNa1Y&SQLLU$dIb053b-AoC>38x?$wo;AX{d`k#@KRoiz_#a-0dbgUIjQL&IG6 zzUfL=-=?Q$bnxK1&O_^$vL~jCA*98Dp4NpmD>@CfA(M&{njn*z2R0jAE(Wl6IXDeA zPvH)2Xo^$t#$KXck#DS`SPE{vD%on#eLoOX@uPzDCh*jtBlX?x7Hja*P_pZWPYyIT z#JuLS4Z-^z8oiNa`}qb|%W~t!)Ya_ay^4X*N*g*xpH7G4H~`APLj%N(&CvH5&MXw) z>pg?e&97h(8n`9(pyq^= z{naHCY$~Fx8$swZ4*^BT8U)RtN<9Vklf*lbUcv$+{y<_@J&$qC0eq9IgH5_u1Ut(@~ph?sBi>E5$^HlP`4u-@PgYW_@x z#{0<#gX^nMuCY3e;^9S{d)dB7JY99w<(nZyq5%H54n z&dlNM+M~+XPGrZ%*8&39^W7HcZ*jxn9ncy8=R41+V!9x>(FYr4>o@!T(QLBDp^1BV z1ae2+7thD8;k#q&F9~j9rja1KA2cWt^&$Fk4;}z-1U4adYGg0JuU77Y*Rf)|{3{PG zS@mEvK|uQ!3R?itD(#VHk$2y=^wU}5qI^}ju+*+BFWnvv#fS*19`SxQ3y!i8V1W|a zuV_K2e_G~vT%8UdSagrK&kNZ=B>)Ygi_68(Jz(>9hver!(D$@ z_fy-Cd!uO+P-!i+2v*Ho;`!KejZb9ciql#cY1B2qaQL>uJX*7Gv_V!jb~DLy8%7-W z0QnJbi-%s$mCPy*uxDcl_0@Hkg!B0O&Z&L;QCDTDZ^Bo~qh_f#>i~u>0%b=IEFoQ- zC$xY)02W#pr*`xeM}?f8!otV+N54Lw+I53h)aQ&=h55n>#eJ+ptJrr>`O;5Scism_ zQx3p}W+l?Qvg15EOWyb_@s*Oe z+3aF`$Msdp;jgDBwBe8Vc(GX?{>@OQ21{4j@CbTdquaZiiiH%#4N=9Z^bcIz`3#zo z8^8hbYjAO6+vj@yFSB^Q2sjS5M9Ey7#4x!rd7kfsRQY^k?P@ z&>zH6Bu-yf`eTtTg;Gl#pp$BxOeuGTG4Lo#jtNy?RP(V=-9Jw>Rs;tdnWKh zn)-KPy&;GPpB?4k33@0Fgbt{^W+o{$fc3I%)W1~EceUGfL$CMPSk zLvmGq9^Auy_^Tq})wv}tg)W@X1UbFWPhUUV_F<@1`-i?`Yf|72!wt4ke4Ij7IB2gN z5B(pde?FnWiJ9=Kb#Rtdxs-cE{qcrH6EZ$ViWlzg`Benx$9sWQPbW~+SDN<)TpP1{g|82}QVSBg zOI&)O1fv6qnxU5S3-c9yQYV#q;|ha5Sg%~~kO3{#Tbm~2KlF8MM7r12xe&1VzE_7D3RR&>m{Y z`qziMsa#i9mG)Gy)$l+Ma+%AQ^DEP7Co&Zr2`x}w5C>04zLk&nVFK7E_gm21%MQZQ z{lh)frc!Qq+}^@92p;m<7Z=(lzy-=u5E||bGB%uP`83mT`3aj2`~08S%k{D8y*VirA^@;q}^H~i0ZmbkdMK3oX;4!J-dj!dj-xoK|n z{*~&+Jq*!(}WBK@OlY3p~CR&(>b-zdVqT57NP?wRE2THi?$$siqyMv|RG^ zCipG6*2Rq$Hbc5Tn^wUo*j|G3av7~6%U!;F4*3F_g~*Kb<~`Mv;y2jFG#++5!WaE;FmI!o`_&cbDu$Zbo~IOO+Y_GkF)4pO>)5*)&-jph`v?daPB zT_JLupHF3@;yqjpO$oJohlLCFsFjYqked#VBMU)$>)4d?zqzqfZ-D%>AFfwwo#261 zxF!kp8)i`FYse8j2bGcNua%y%F)MxuQbgC_F7etAU25yqR>?tr7uwd@!JQ{!yz5aJ z`s0#0$%VZILKAU+28vURCTAMmWszi>qFfbj<;aQ_%-?Dz;}1Jll3V zHtZ(1F^c?4wHpz%E;=sK|1`J)kGisu^zX3lDm&Yf! zUQqUshopm1{WSgj8&}iO((7=mXiAekxFE!gULl_)fY(2MgKu$_8t&mHPP-}jjVTM% zl_Pd?Zp9ytUjpSpUO~Zipa4>#nSs9G7q~G!*kI`--&Pa$jgCw5wHg+%vOhi4e?lrL za&6J6lP41(HzaZPHr!E;Zn<$ZJlwa9-Ww=j)JM#^{BZG$uL=);XhQ>Hr(SyiTGif4 zhQxkv+m*%a@el%C%QJ^l9}=LSuMd15e!Wx0yX#!&@4?!wd1w6rQLJ-w7su-rj+$^x zBpJBYjb!L^Sq(b1Kdo1rx*`$A`fS7DskLxRlYu;~DDNEADPct)T?WZ!O z>(MvstIiu13q{pm{QZY!7%QtL&KwpBAIvF~p| z2>F&f`26bw$!{`{Kxodh(ZgrH)^qAN;Navm0w-X~?K5<6PivWll4I?Hr5e43`3mK` z4B-CTvO!Im)=Q{t12-eP7q-|6k-1;c} zI}_SCu7y1I_Sc6C{e6&^TiSZ*&YWipaxIADdWmbSHbgh2NUpnz_r9BxRG%J>(DnPs zE=vRy(o-m%t*^~%68HswtRoA?Mr(b9o}?X=Y-d<6GVta;L6dn zUa8pD)RW8X6X0?wJHBH?@hd0k9Woo_&vO`nAYJEQJN)JGoA=Nv0nDE` zsRC7hNjP(-6j-+51?N6ErXFyw3#|4^^`P9EYZazz#l?5w+kCkqehE$O^!P-X>2_|GzjmfG6?jBAimH`QjT;@gPkl%npvtOe6rzX|W@s6i6Cf*D=XVOq)8d zp2vRf-stHQXH$nue-z1xb)V6>O}cMmMmj!kgo;-XdJed9R{DAJv=um(C&4vag>vbR zz1a&UPl&zj+6T&NEnFkooK|;!qYYFs#uDt`UFpw{0J1$l&hyc#Wq(UA^cpj}Vdi4& zoRlyOXX&Os_Ph#O%qzic zeg$%`r8ol+Tk?WxxZ^X5n*Tm1fI8x&?VxeC%j2?vm-UpJ&q#Nh(1g5*3qvKCkP35x(sj5L5$l8p$Y=fzCRtcrJnURcgLnk z3}u%cx8$Q8SXz>2s~|;GxJMRB_5XEd-a!|QDoEC6x$q164~XN=W@GZDP&m!Or|HMD zDB}ZwXL7v0&9*-J^R7{uidFh`cCYtXOCQ`Qw<2|{^X`Gxt+E%Z&TUv@IM#AeZQvJN zTUY4aeYkGr)7|nS5Sknp;Zp?eQrU)iQ{uUctCE6SWh z7WnxQbW{{-ns(qXyPj{|9tYQV84bQmwJ#Yz(9#R#^d#V!T5x8T7~{P=0A38II9k1q zu;;B(2{q=^zfl!1{A*^c4&kTwwARKxMhVy6J>m6#}|<@V<|moklcSR32SzOMJPbqZ>X`bX@gWUUj5oN zM*d(fY0!%j2|l;NGhV@kNtdYCu>($f5J;GF?*?RV>hjvK{;T1F8#sEYerRU)_s4KZ zd?;d61@)jC=K1j-tgOaxSREZUt9e#gIqKG*0!Xy9Z`D77Cw@sn&nP(nRq!8|3>C&n z!JUkTa1(LB9E46N=>qJJ(igNslYChORdBr&rfb9595gev5)OeSK7C2rs>{9nco3Ao zdmDv3HpA6XE7klm73(5c7eIqZZq#Vt0(aE(lN(8bz9K)YN<42otxBx0gQalDLqGi8uOLF5gmKL8zo=qB6qFSUh^qJf1xOUa6Et!rL78&0;H?PETjL~z_Zym zyX{w+0U!`ujtRa@TrG5^+vYD4xpr}6HYi-{dcG)~bGvT$8>Ov~F-oaX);o^^4e&L* zq;)s0Tm3x%wm;JcAP+w$A1~VY98&Y^z@d0Ou`MU!$6BZeRM^$pIS6lf1qs8><=$$a zA3J74)&NjW!SCSu0>o7^$0}{1wVDEy!V>^dP`1q5s_s#Kz7LXP>t?D zvqL=6Ee0+X+vn9+#Bo!|wVAPrVA02~;|=EQIetAy#kQ=STQl7z!G&tc*UBFF5#msvWS&^D8H3VQ%XqJBKt2;#uu@|!E0+pdKMajVmnp10@6#xbdx@6!{>@O= zoFPKM3*cwBZD|7t+9Crp!vwywRGsr5endo^w_H>SfV@zabJD*~MyRU_^BZ0!b;t?8 z?`qP-{=&wfHV={h-D)1Z>_K#Ny(rfTbe9dN{>4EW>03Ck!sr?AS~bVHEyI(45@Gs3 zStvSy(ZcehX}I6Bl%1Q#L0uN7bm&jW@#j zek=sFmN}xym!gNcq2mCjagM&Pzb0|)0_lm%3o}>zzG5g4$4ZE*{GFuXM9Dv}{1sr$hLRQk1lgC%$w3{Hw8-beJhZ)Z(94asOdklaZg{p)kMByzJPiz)XX zIDF-?Vifu}+wQq~isXK4;g_F+sNzs7Dy-)POY4;B&N=2JJZmRtF~1Q)oF*cmW=s4X z*z8Z~MCe%u*TSawK~J*J?XEfRAVmJ8!4U$}$>c<#-zk?t@i}b;Fkn(NGYnvxOsz(8J_N=j>9v!Fu4EF;K{7ZR| zi@9bqaJMN*lY0-$-)LYx(Dmv`Z80Dl=a85GGUwGjrhE897|GpY75|pWvf2VXuf7CB zmA_m~&vwX5ODF5PTHB*G{Q~^wsYl$HW4?qpJ}F~wE`;*}#XoyD#_LJQoOGfK;>PShYKI+)%WeJsVwg|Dt*W@Y}KPSi}PJcAmz3`g<|B z_t{5Pabvw{Vrv@>2BQ0XSatAw9v^r@fzP+a(V zPq+WKPd`b^D4+j)+-+}vhYwGTU}44NDd2OPW|RwcFj#gR`iMNm$v&3(D27CgAcnCW z4ICZw2%>*S)WXO3To0^Y!8K4tQp*Y1y=!Fq$8;<9&%;*+9*Yq1FEDUdR(}2+9@?w1 z>_w3g1*CPm9P;_VQU#_!5MN4otLzpY>&bI#3()j?}OKWXrT!R2=I3W`#fgvHgU!JNyW8PQTI)g^d82CFNA&)v9KHkFq?PhHXVyNFp+f0_hduB;8n7l~U(jx8x^ z=zhDJ(4n$U{d?xcZTI}1%{WE}7VdEz5~B^kj@w@XtGlz#A&f?zjCPnoU4Fz@4k{gC zTA%Uw+{9xt>SC;z0a)nDvll}c-z~#5sk_FwQ8vZ|SUvLgvTQ0d+bz6fDS0uu(VK8h zVI()PTyLADRETn*D05?4cyTRmd57DU&{uqr)sgb_0>Ck07P z>qGa_=-;i+tZ3&RB_haN#ExeISlbbpSteuoQk6 zthTU?pFS9ooLNx6t$gSh=K;+}9+LDTDt-XNJUy^~A^kNK85LcJ zx*b0!-Ea}DmM3h;-HXGU$eZL`AU!bDql|0HRTA3s@)DVQR1^ag^=N)9Pv@8S9KbKH z-acpY%QWJb9eI=_;1~GrbX}bt?K>ig%?T&xE*q&KhW+)%GmXdqK^aii03BO-)~o@lP-dI7<)9ZapyX93EJ+Cis5s2uGxKu z!6NLqwaaiVlNzwK4*OXhg?F&if9Zh5Y&g0x&tKQPHC^IVNMx*<7)t;L8H+fm!2Mfp z;qIA*Do68OUL16>1sV^q1!*v8d>sgkaiP80K^q2=OlopwZX=eLP5$?@ z?@v0j#y9-vpmWuwXGSuYa+6~+<)J>7PYI_jkrdtVhNK6qFBg%Y)&h( z%E~uSd97$rb3K@EYb%nQcuG-SoJlx77%}bm0XNzNG9-YG)>pVxv>4hV&KKn4v_RV= zJ8s2YxGB4o!1T1a*_1y4s-`;!448e@3V2AnWvcFTrVExv?vz?9Vn`qCP7&(A+ zI8Hp6c$^v?ps^17R1&ymyy10;ReK%h&!2&_#y)8rl{|3e?UyIfs+W2S!Yq$7o{u25zz=B3%i#gyq-6nHXcLGdkXJlC z-=Zc~^=QDduvDswNP2htmQ>j0z(pmFm?g`H_j_=5fB0aoMJZk4NLeUwbz3T5i`(nU zHtKF6O-I}Z)K%PRxJZOX!l$71HZD}U%g};A;wT{TiiR%j2x30sXmjslG%=fAp3ODF zuk3`ES{THI!9G=J=zhI=dC^@dQggBAp<%%lD`rLa>6Zh&M~#MAnCv4b54yddt#C7a zd-GvMf$x;nXe)wQkhMTWmo9*R#{fr99;Hv^yg>G{M(Nd|#vh*-+z;ONURWrOSkC6o z7KlXj;-4d0l*0n}ZC^OYGP5m944J%Q2-~?UrS3XScau3l4WGyBH_cn0F43|^OdH0% z3}n_2p-q&ykL`MJJ(tb)c0yGFDUYXY6#nF2{R6 z8pEQu?*;q1QY^g_o;?H4dK-SopgxNi#26sWo+3M8FkX;&CQ+S@{GPKaqyA?5orS86 zss$neayBi$&a`Wdf+(5oRuq(EEK@%oUUc%LOAc*Nq8&UiTIp^!Hz?B7s~5w+x#Zvu zQ4aca%|zDk($6}`LJ2f8HN3F>1~FJK#IoV-p5mVdn@^b?7VY%D4lmyLHP4tS5`jR_ z+}&-{GzyaYV5Atn&xAQHIRS?H{0hB0IB10f&yKgW)KZ@XWr2jEiQw9-@oFZ}<4M}v z3 zkk?O&Ps-p|gL9CO$tM$<7o6;ov^{yIC_fuKO9JsX~ za_%y|2ree=~4Wv`e5@VeWR`eOt z1|$C+v=P?<`xj|18)uRkzZ8?~NKtR;UKRUrFiu0LdnH(=^`fwJo(=ctF35^o$QDVG zc+^L7C+s%79qHZ?0ZP$Hl>(adaR%u@`2h!=VJHIWF^sh3f%n{V>lFj0TzMmD zj6S7YFnG?zh4tGQ3aP;z7MW+k^HYSF@dlP|ohSN@rWO=s)(7tiKBwU_+BmL545*_c+a%!G>iN!z`@JG=ubLgERxLWc_(+!H8{f zcpN;Su?Y6$FQc*j9Fc2a`&c^9YP;_yPIuFheF8^q0muKKC}T6R z?9Dsi-A79H(FLOs`IE7Pb_u5Dn}MMk?RBJJS`T5RU%x7=Q@?u_P27-q)t@B){6n1d z^5Kd7OF26_b4VFiNheRGHRgdQQ{+=Jr?wZvrtC_pm85$z1R!HGPir%it+0kbI}Pst zT+(SPymaQybWiY`swN0~_q;pWX5BdNwv-BP`nYVFRtNsvPQ$E+Fk$_pysfr8CHV zI9jMoXGRO)9JhC)ZGsF65K~~Pg}aPhff-k$-7-;@qU}}zRSGVW^rfC`3ohEu2rn)p z<{$aEG;G0w^P5t2GIxTKpakCWU@kQPmYR8$6l6eW#lP7F=3ycOi$4U5uV{b)FVgkA z!!D3a5O&kad*_fw(q;!6uz>MaR?Od?B>6u^fz3MtTQ$|%z`>5ohOmqj7FI{1&OlL$ zzExsXarIZnRxbvt;5>1fdn^6}M%c@XFqBz-d}Yt00@72?@qoE8Kl{6QvQNuU4-oalNbB0)}S<$FSlB?GRf>2Q`|ObbT=! zyb$xn*q@X{#oQ~<$02(o$*9`6cB5r>`gHq{QArAC*N~7h|mz%dXt@6(;Ih! z+m%qgc|`MEH{Y`pL^l(u)%R$6N>(Z>_@xqTP5KoOj~n?Inr6hir<)uF-{q#lxyC6h#hwh@)nd6%azwqf2<#0VIo2~ z$YmF3wrjghT`|$jL~l36b_=gu_(5=%72*MBNp~zWErB1atM3HT2651aF(r?_7H4iO zHU_EZlxJ*M_Cb{77+M-JfXo{42#SDY1Db9q2Q=4yywf*0!b6WIFJJY#Q)oPvop^E8 z`@k&lM)Dr`(=8!bKI9$oJa3^-z#8R6l5_SRU=C;nAy}=MG9{XYd>BNdS?5hU^*cNp z##O;Kt<*?_!q5L|Imc0)lZoq_aVzuXk%ZM}m5>x&HvC3c7_iM2XumA#A2Rj}MRscq zzA(-8oMxXN+2OO0!JDr$9x`0!5>qWo%dHE-nqZIz3>X9i`P>2{&9tY{9Wwo6AWei( z=Tzp3Mg%}Bg)gCi>odQS9KsA8PRxi#j8?j-M3?rF(pK0ak(bMv z0T3PqR9INTeA}qF7FYHjOrvQ~;fiKR>`Fyt1&Tu$iFg8Z-?XmJOf!bL1*v;`H?jQ-G!Y26kFE&_!(Se-UWWbwRV!W_O zE2dlcL)6g6k+E0B(NA1;@FR6moT&%6HourXcV+!E0<`%Mf3vwq2ToW9du*CX&>4Nix{mQI zgr+BmE|x>|jMAmVCUr@eOAN5;%D2y!QGcFsjjeu9sgiH_{gI-8cF)*~<5+cfn2j%LNQvcJ8g(M#RqSL+m5X$sL2(Rg&P}t#{{CHpMRtbaKdQIW7#_JR5G@ zb@>{{X}AIuO%73YiTT{6#%Y^1%0L=S}%v5?vC1Ji1%sI_izV7D@qYm!}bU5OJJMP--!n z$TZNOj>c|{Aev^BUEM6Z|A4}R5y3Xx{S>*Vt$2ToPS|Mhd+srmuh73&@nOe9^{Oqt z5B4_MEJl7Se0a7Z^ZeON*E>NBYj=)?Z0$$^ujv3@iOF6d@0W;Xmad%>9c$Xu9=_=U z*Nz;3+`K#9K~y(Sjc094Kz~m)kNPlNOq0mRS|c8(z^X+Febec7X&J6*WB-{guLga zohxgY84>(?6|&R!=Rm2g=29Odvx;_g;V)t`^{CM4@v zN_7VohhTI#JjXBEe=Ce7Tugny&OSbPt+C!9f|$mZzjBj|#-E|OMQ43XI)f`O?3sDx zrkPLcQVQg(t*)WL0ml!1N5kA$6sjMIdLPrZ2= z%YoZ}8dz^{dLAwFq$?ZOY@9XqUb%eTlhL=!#4f&yhM%G^R1N|c51%`c5XWAyBjpmg zK2*KF-95`8+*$7IB&ACALBTb*M6NlIf!NJvs1DufXJ@r%f4>^OzB4$GlP&{pxY8-H zo?A0jAC<;CSJSjBb%q8GofE3p-O7+(LyiG`taQ3Vn<|P92T_JYhigpyOq;3H!odC> zSW`PcJ?x0eAgPRv?V>)3ufrtIT7ZhLd1TyZYZAgt#Q|>B@ z@aFt_Lp~mS9OP~(T+83YBc{Sr7X3AU@$6Pa z1L=sz$3fifT}geGF`53D z3`N1(b|0Hjv!7z+f?rh_2$3s>;osH%TL>F=yPD$vs8*mikZ*HV`P2)K zd29(CQ3NvVePC&r z;6m+LDAK8}k1N-q2{kZ{G`TVS)(;c%_T(1Fd4fluG?i+{Fp(Z=vNM~`=0??TV(lhH z5aq#M`&k7YL!to@eU<9HYSVSc`RoN6-S$Q28fW~$2$l^km8a(yEoQgnF%&gK4zqjC zvuxctr#t5j#W2%AllL%$qP^21WJSpqyvHg2hEE7a%fA;jej5izZ6Q0bu6jU`mqke< z6jYa3w(~N#=wJUJiXp>z*^|0G&&r8h!&Mnjr*IUu3-Fsg$nq>6+*f+->s5T@Ri^9u zFLRz>y95(py!^Nb1!}Sj%SLv24^6T8r#;Yl!G~{HOP+}RR*+F%?y06l(H()%l`h?L ztN%YfQ3A&y%eTb>W%kMG9Mo$0C`ga7tMNxeyFkBh+c$Y1K{hLFfuMG4q096(-+4LW ze<4epDM$(QdwFF0-uZb(UO-<>~67=ep^?tKgSDwmxGn|zQEJxBG zb*Vp{`)d0ya-_8@{mb-;_z>`J+H-Dz&;$D~wW#LP~?E$=07mIt=p% zI`wDC>3`8z89A$~{*5vYgFqRvyMD9;&e||ZYLi;6v8!c|el%t1;Mx47Is^taTfx3tt;euELjxIuBK6&&F)oCeV)X+2D z!g9YZSD}|A8Jg$626F~dOBuZhpr*pJPZ7tj@b#QaQEItmBj{uz^~VcVW>0Q2MG>D1 z3o4$r)qWJ~qgev*aUdJL?#6<-VEn({Po23isIk%G9_~~_@d!Bvj*xe*iWV?3}Nx4Cv^p6M{4u!(G6@~9>Y4_}XGA?h81%0X z`AqkhoR;+FJK23d3-JFUkW5lTKHeJWk!QZN48^77g`AMBLZ!kT$K_)Is}a!P<|8bt zhxfu5*adL-8yZ|ebmUiJ55RZ*JvCq&i^rcIwzXG|z#8b+(TMHWUgASNP8 z$zlU#YM{h*M7k+6%`$~(jU{w+RH9Pe-7;F?BFp9HH+ouc-o)pN0MvqBQd&Pm!8LEb z^tw5uNKhy;tz1CO=It*anyFIyms177RX7T$_=E#8wZ2nJJsV72H_6_bbDI8X8!`+q z25a7r{uVKoiE&}by&B4~nBEqqq8PNZ-xy1E3}-WIhX0EZosBYDLbg5FA#M{Gw2H1Z zh&iBtimbPdQ*Gy{BoySNf|^cTN^|;a)x<*LFI96PFktxkS`M)Vbwr{J7Ujy`*N^4j z+x=0%VB1PBD$l@|ar4Xc4zef_WF2^+ z__E9=JAFvJGQ>s&9+Nm!0Z#kUww5`S`G_I1@8YT*h9@qaGZWGjc1h3hmWtzJXuMBF z03106#iR7}w;LGLeYN<<_oR8HD&`7V$V!XFVZe84p+wVF>pWi3C6e3%VYl1wRqY~Z z0^2i)zp9cpfrU9&VP(86f@pzqg_G$>>9y9nKc>)2?Ac(%2KxSK&w=a3MsDuZXaeqh zYm`ue=~SSv4DybU+j&4Np%y-_3LNkD?_Wbsf8BWxpf`S0QPhXjG>vOX=JQ2qPRC4-GJF2)Sr z34)&#+So_|APR6O?!ObNvn~#fZt$_h@JTZCZrxpaJ+W<-B!dGpk=ZeRQ@Hz|#*zV8 z2c9(f#QrMj*X%7wHT8egG}8Ok0osd&vDr{9v4N1R>!>8!3GzO$jc&Q6YaS8Zf?RTU zK#T+7Tq2fxyWbqHpB)RLJ z!u=s6Cx@Cjc3!^+?8fBc`_wvinyAS%XHw~jA1`)6S@H9fh5+O>Y;Ox0bo>WUAjdJ=j+bi*6>ysSYqtZWqgw z(a#*%$3nsg7x@JAK(M{CpOkZ*H}l|f?oBu@gjMONkE znroZ-LrnTZOy^bBzk*)6aqOlVb)=V=NdYnB&i@F0w+V!9!>cibmfW?NfeW->-ZFWb zDGdMk2fhrpcs|P28lfPT?s5mb}+jQWmU))%a41-B;FsXN_paOcy|oSjk18sMTvHV0J~sl z;wtJ~M~6c?TV=GdDPNqV<7Nv(ZC#m|(RrIQuUbaiC~L@}WksLb>wy+>3I&K8G6$A8 z96Gc(J&`zoWvFi(9~38gRTwfR%D*`2Pnq12bK2ua+!?F+ivz(nA?sc84stIBojl`o zI0d+x50u)?U08&JRT7D^tgu<>0t+0!a z+Xf6PYw_m%U+bWW?ovI>g`FNI{`bNZ$Gy_A z^i*mob0GHZrGQF3nwS=ces~g-{HS+r@@uOae>(V&%9-RTR5Q%7U}nAN$wLIu3xEzg zn8>idsIxu8g67~FUdbWVuV$~2Xb`8WLDlsQnQ!Eb=$iUjEci}A?jz#!wv-_fpJk~b zRCte-(B$uRkVqRmUp40xjEX~ML&~xVNx~~k@_~LC>;1y%Ls^yW97=l_DSp{5Dy(O~ z2x3d>2zVXH%hor^DeF`RGO_;i$oZGk52R{v2ELXQsQ3)Oen!m!MG~xXioW3XdyyM6 zNL_9yZBp!M<#C{6fo}g%_=WlNdD%m}G8X0zv=)#*_W4 z=v|xqribzCP&`irQG~=~s$4P6EmL(rggAprJw<8UmUNLno%N4CDq8J1#_-k{U&DZL zg5)~_@^b1qr2c!-<+RWj7P|@xfK4Xer@(OFeel#^w(MByfgW2OE^`fDLB3IF48W>B$99R1wRILiQG6dV$ePCkR^qKuUZcjO5! zz8-vSf{fO#OA`d!{iw z4r{nbjks4>05Z5g>!kP+EsEo`EvT^tSQ+)P;hRk?0Y>vU@E>*^nX+`EqcAejr?1Ly2>u^@Nk6U(!gHNXB7eC1jl9o_an=RBXf)Yk#+H&TaEW|Ik5 z<5A#mo~-zvZ=lm5(;S@8yyQCv=+mDEQ;Z<$uo=R;Z2mmWprYg>0BHzN>t!XcL1ixP zyuIjuE*a9oWKgAxPdcAsE*Mm|bH?#l7@K^Uv&!BKinKQEzaSXXW$4E)dc8v>t<#R7 zlIM7gK56>G)KF~Y)+bEE#e!-?z_*1_HnFf7c`~?g9-di%Hom@TQ=|YhlP-i zzwZ}2f}b#HJORaU_GSaTbT)W9lmg@by}4vAbb_t4G?tELf%6xzx67rE(k&2SRLQV| z09lu!il-CPE;|>GZZ9gJW8eRo3}AbfGSJ|QmI`j^QaSQ0kEyMQ4p(3)l7@{Muteb7 z3_9P6FQ#d!uI*y7=z5g>GL5{Kju>Wnwr8Kky2ceKEiJe*-8>XWB zu+TZ8Yz&f@>?kAKQBwZDryW5&g_4VwTAqx;-256c4mrUxaRa7vOJC25xitOKwNh(Q z9T)U(yYytGYx>F~GfkJc)y9T2zuzl>8>OOWf1J(I4h{}3{r#;|%0b}Fh5lz^XSr&G zzrX&}GyA*8ar*PRC(!BW>wURDkpd6!={}V;+Io6=`#n6?<$h_p^p^MV;lq`QJ2b8C z1{!NjVrqo^xBuu}barw0&(5C8)JLIOme3uiO2G-O{=aFpJq{@Vt+cr#UvLH^_88T^ z@vr?IT?x2EZS|G8w*k9IC#RBbMo!K;+k<`nj>#(=4!wXL72(q_?0Hq!yVj@sv{+pd z4*hT|{I|!!FBf{u*dJy(KKPt`iwm0hA85A_kRRs|unTy7^uTNt-wt-EiwB+BoD+?2 zzCSYcTJK32bgnsi%dOI#tZ-zK)A3T+H;zfG@~OrbPxFZNdaD?r@`>1aF*!ABmLvy0yPlg>u`g?_1fGCRFRf&0;Pomk(N*)LlzJbZWQm$UIXMHj<%O{WUy>XXSQ zLdw3>t5KByo{iL`{Gq9UPm%OHYu4IQGCAfPB9(xL)G zgLEmWAR#q$N_Y40+mAZv`5dqJd%gb+F;CpFW9@yfwb7A*^}jeDZ(hTmb7AWobNj~| zEY2652FRmW3oh8?w%d1SUtVzChF^Z__G?}otrqf)7 z9zRHcS6%C-7zwwkJIt`zM^H1IHxlVs>P8^CB4!<6;x+x3I25d_Gi>|{WV?+Z zg+LOR`p@1-HQQe(sEoz+-rJA^5oJFZ+JMC&-2 z-v4oLLFt6{_WJDRh*-&FUph@K%qZ7BBQ{r((KAUz*fj3n< znQLi{iida77Uo<<77K_%j>k#forlPw)j4t~FS zS9fJIBi4ppogPjp&s#c;AZabfK9o44&0sunX|vEKXx^9Vc-0buzL`py6JNJ=lQ}GD z#bnz$!4dZg-xAfOB|4inrp-~wL`-q<-A{346v`Uk(wqbIW6q`)o}Vn*v~fpmI!#t)zrT?6U++op zGN1iK;6=o-YT1$uv`yA3MGn#}*Z|?_+_bxw&^f^1>*AUVJE1DW!?V|_Nt)ji81I9V zg>OfowPqhtxf3urF&2J5#d+hR^5px4pj0Pb4acc05;0pXs>0dQRIikjl#Icuv}V3e zV;;7^$jC@8po_#f0t8qwio!4IKYif|RwR4nvhA@dXnpmzI)qB2Prx4~Kr^=zWRM zbas}~^sZKBWNk0TiFv@9u;|(l9FLDJh5akfD@(lBd2^7(LopFkaMaK7HaYQ`H0!az zw3Qym5KW%oI-whM4!99l9E)-B8YSBw%lWsBbNino^Wp&9K*^act_-39ek2o37J`N{ zdq=|X5bEm~@|sO1g}OD&l0Nb9!g4AK-* z*cFpHqaTx;oIJVyWb|g{m(yX#^1p3%mTYxi&lX8Bng#~ESs&%XH^%;8f(0gvY)5j3 z{rFS0wg%H$l++b#d5l_<%qNm_{2X1#pK9F&MQEJ>5gX0|Zc+VCvy!s-(*>h%h{|>o z(8}4=gHgez4F$I>(YKVtTE2U0Pd$uZ+1nd0N*QpoUfkU?r~h}##p%JG&CBWO!v_5^ zB!4*jkGcEUeslu4w*_O7g|D{Xx&?p4jG5eS&R3h(WoyFgXu&u_k=b9PUCg)%MvWBQ z>^9wDq0#8sZvpf)T8nR}6s74O({cCRp0NNLIZx%{`5?U-*oaNYM!cc1-tXl~H*j1^ z@*);!G0$U?aoxRJngXB?TZ(LBt*vooFRJ2M508!58iWnU%W*o7CEU94OMigt=3x9V z-It00(R|Tze)|xAhV9iVox1Qm=Dea|UT)(LHdCn>LszV(EtR3k1=EYfkB7ek`yx`o#1G|6Ev7@4bzI#@7+(;#C#Ku`w?T4@t01@73QX|#v_8Zf33 zF>JIQVzOB{K4bTZNQ(?NQGzonY|4!imv&QfsT`=EYCAOjO4t8T7_dtBxeDukTpWwr zraf@rKxv%lQbOrU`0r2>AI|;H*|$GiD#lrn(;96+jo?9nl7x$PvE=0U+k&mh>QtcWuGWGD8_4ek50Joi5uw8K>bC8Tom{6jw;z+zKH;)b z(CfDvu5OyAXZQWNq@h;SF^#oaGZ*srvc1A{k#26TDeLP7Wvnp3`YPZ-o-n;@{Gw|f z7pH%FwUWP>d(wW8<|~jd()kMv%+9W3OGFDdhd4RA7C^{NbaNd`>Lol=$fMyf_ReEk z#J0y?vc-u?+v$b?=c)WPihKH= zwK`i*{-QBIk7?Su*I-Mz&S^S4KqAM)*0mwdnKK&7LZ*2`!R9bl6vfscW$Kk#kJA{i zqBC0Xw=J|%h=@h8PD?PXH}vsn5-(MBY>mmf5&|{$p}0r~!&NZ$=M7)X^`yZCy&$AK zb1On9OKye6tdKk0aoS!H{gf$zI@-IphWU7yY5s%$<8dVmG zwvZJX8|(*1EHioW<%LU5O{*Y1No4jRmA7Z5c*P^tY3NZJ*lXfEFKWx&aZF>g4ysrQ)9eSmjPdMTZ#W(nKN z^#-j=@Wz)TpS4_0ZYcMAU`2nIRXzL0JZN{8Dk^KL+DUAhGavYfn`7AKhFN$lp>^Uh zX}x350%5&^#n-Tv{>##XYXjb?&Jf>{c6WC_v8oW{0Qa7T)p9)SX@CQp>xIO${mt^S zG^UBM4k9U8E?b|=Br3kVC`I4+lnt+v=rNN?t?}f5gx@#o8lT-hdNnE37m|6+h=nTr z<(!?JV=WWVb6Sp5ilcj$IJf7o`*Py0wLOT7z`S4WCv{aBWjt1e zq*7i{Ta&?;U!8O5P1eXekA&p3mI3d?2z2k7_d55=FNaf@xj=M7xIkIOm`}``>LXah#fwa?YyyQTky|Bo_>)gbZO=`B z=9D*wcES>(AIki<6;Yue8o5HW?MH12NfTB=Ss7~{;(^2AAhhX!g00}y-IdX1!H(0I z7`4MbbBh7dJum8$?6^X2u|cnKPx8kiA(}R0X}jwiNhnMBy+p!^{*%WEk_kBKtjz=i z>cT=0XLAMbHQla3!uk_aAfxsY-@Fh2U8kkL4$?i2b1b$qi-zYsya&r8(5cp{^_XwMg zyy0*l(bSAtxM@*dM+pX7u|2HWwoS)tHkKh7^Lo%%dlR(^Ifc^F($=!08LTQPOffD& z)|pMien_$Gj>|@8hzIon>M1+mEG+}K8fd3ZTvh(;t6i0ndXc?CH)~*PLEj@zFVKjv zQ&4}Rd3&H^j%22Hqel@jD1_X(M|+L8+v_8F1(YY{jvA>Emn(2su5_0i1bRl=P0Pv8 zmY^@9h&Dc9Xf-E|s|nFo<+*3oV?RQfUj=*_laWUxj)IP@G;v%7x)^)})#U^4m^}#x zc$5=`uMxkDux`8&d!BZD0K%kXGL;iGZNT$N6mfaLTT{feWNTYVdclp9R!KmLScI5A zu$bV*^b@y|{Y3eavM&DX{bnKj<}lAIn8bmJhtpKEs$18M^)Tq zjkQc}G`)MHcY-Ct+A?>2Jdr$yTKMVKYE_Y5YDJx7kXtIX;nSGN#<^IAEmm6Ft~rks z)tsIV?Sq6Qyp-(gmsi)ZUa43E_Q93ep%B-&25J4=nM|HKfmkw63@=-OK*daLPLWC6 zYdWv|f)OtWaEp!eflts&9rUm)p0s^3Xu&WjZQ1I_J6FvNt%_(0BCx__duDYpZM`kz zdZ%Ql9xHL1D+-}PQmN~1o~WYVcz*fg8C=)#TJK)7^sp8Kp>KZoU9kUwP#U3t^y0l? z`!7ZzaYkn(|4s~rhP!7xKM?Rs5d#P#Op9+}kj_1y&4 z6s0s0erj}H*VZ52ioEy^LvwBNAa~%ia&x1LTZ>Ko$!6X6b89@Ew|veP)1U~vzxl5k zhdVE~>$E17Rhw>gV2dds0Oa(f#^evbPV|~&oBOzz)E)e4i8+8iGgKVKs{`Lmb!XS> zU0rmLP~|q-RZl;(ZR@0`uc06HKcPZPdH%OK0aziT6F|kI;?sfu-U}FJQC~MWi@3)e z_1}^+*!4aA6kQFr>S*!eyM;tkKe|lQ;-nws4RRASOM%h#0mJPLc`8F3m8JSn5&VPsML7+H@EW zYOj`3RcqZOJXtyO9F(SlG;IOdaBn+qv^37()h2D2dqh|>D<=TAVI6NTB^*kU<1)8I zUHLTTYxVPE*L|?GE~`g)g;_Nn#u~{A=3e?kq$lV!Dp93S%kcO%f0y}Ie13EJ4Z|(| zkse3a<627?<)CEpg5sj>Ew7Vz%1>;4XECbQO60ZeGBTT~jXgYmj3uBu95wsxw+H`~ z2z*B<4`yT^F5v4Oe=sfF)z}ksDMFwfApx0qD%ApP4<9hV8si@=lc@B$80;yRiyH(y zg5ge<_WI8NZAKY)IHcW=3;m{$L4BRNR^$e@(h7uAr(<6a zBaW;LGIECE^_Eo6RnP6lxvxC+$MVm1Th)v7&)}x@BH4yAc$?x$n}_z_e6=oM zoL_3uMkPZs%3V$-KejsJ>SEn&_Bu*A zrs6KOp|P5hA}z=|Y-mCE^*M82Kf3qSJLYv*U*t>&2(Jlw61G^|*m$0jDps_@w2hs{ zeZ4-;WeHi62OvHVpPMqz<;%+mo(cygQN>h~f+-sheTX(Shil!@Y&A3N)lk=c-)KS@>lG6CZXZ6N3e8C3U@nJ!kVmc?#-AY)OfjhRd3* z1o0C(hf${f_*j2TzVY0EnWt5qp#Fz<79xM55r}90PY1Z#O~9kyRqOBo3TGz+wE}W& ziq^T8JD|;5#k=?a(}#I z)CKI`{Q#Tjwb#t~EzXfFRakU9i1V;t5Y^zlI@v}paUp2%5m7PiVbRsT49@NP?M+z# zFu(K5Q*qvMV77OfN{EZ(uqI_AIZz=|Rix}sJ&?CPZgil;X*9;mbD`dq!%o4$xl5in zx13Cdn9mq;2_48jlG?Yv>x*ru3JiENS8aWE-rndvYt5Lzi#m9Ju+py|BoLif2N|}h z>Fw`n#)RG2banmCyl|OVLeXD)bAU<*BC#@!iV4(gA1qbc4^O(UdXC$W&-j|H2GrNL zs+XK9=b*pAEE-ltuijIekdaw;{Uhqx09?>+wK-1a5KFim-M$;Ib^j?l#(i%dN#ZOL zxi{xQHOLcmeuR@WH{?_bY)n)@X%(d$HIZ7vVO%hkt+!Cl^#!7if%3e;!W&GF3#RkW z(pt-{t;mG~yCkab^qH3wQ31OCYvu@EMPzgeeR~bC9V(mdr77k!H)@Rng(Ay+bs5L} z%x>WXDYOHOT!(q$Ji|;K_&&@nrMj}y+VxpoiW_*eF_~#*2g7$18*s@N;$%|A^7UJx z8G7wYpeTI^$xhm_DC%2abKkog^^@j~xM^EX(isPAL3OZi?+ACFypZAk3xoAv+uYt4I9~Fe zHnS_EDFrv8uE=%nfH{gRP5`p60FSkW#DY_F&Wqo4md59&1uGl}R<0sER@DvqcOIVY zAzu;Fuj@(!S*DCLfw(ZLs4uHUe;)k&X;tUpw84uNmXB-rD)}ExqgBR+(g2@anlMXFCP7-XtK42h+ zAn0@+Z~`pq%IL(zS;yqoB?8(8is_=2JhFNy7tjH;7lN^e^n07PaHVL&11v77UaCp{ z*n0%3jw|o0u`Su?o*k1_wxvEXm1$kei*?tuY0sO?ZWCm&>GqCz43TMXYxsGad~Z6= zYeEBS5SuRaJ3`>u$37RDKN6|k9R5%1vOBrVImb-rpwdSFlwalFFJ(MYCbO7d6ns zZTs)7bg#s}9+A=(5~H>JoY}hoacO}~$AgiKS&ciNIYde4^5^`&H4Au7)(H$g#mH5H z&8EqQ$TA~&6ViJ&YaYjfAH zUxPb7;j6$B7D~KuRfxrY*3El*J1XGsIdr+3m&Jw3*_K;BL`on)cM(fr6OWSOkx{)(&Zq zQ6#@jjkhKbo8+XA`QkS#2#z1$NRO4$vwi+{*KMb!H6yKs*BKjwXjrC3igIyw-$-3^ z&lgUam?d`92#$o4Q*z>lUl}PCa-R`D_LynLzTQ#t!pe1FY&m~T6@RU=znUh`0D6vO z(Tmp7z&pZ$t%}rTc58MyC&X3{5N7?-R&}wJenRe7WGl)>I zRG&>HmZY_~(vqAmf2Rwp?~$@<`CP1IGY?Rv$Gu|N41zH`^GF`l|pq zaU#xB`=wB;_p9g4F(TZdzp_@K@934VQ*KeQdB4?lVr&vUHj| zaD3k8L#PmMUTkw30cT#{LX;TiT%LbOd6X_V7$?=wY@P~A$?H0bQv*xiPM@fFc58B4 zLGV{*Q=(<%2mr=wGGXQ*|7w4o^g(svtBU>v^mdF8G~pg=`}a=Fl?ZZwRu%;GD1tEJ zqYrb!U^8SnM*zlOdF|-e-Hr;$ZoIw5;{wns`{u<2T8pV%uf`)KR4GNvk{)-JhOQI3 z`@gSD!595^yz{-Q5LccJvc|JSi5nsGF84Jjec+_!uSSF5k}+nRMpZcFCK(YfO71TW z6ve-4Yrq|PP{f&e`@`nBFahM)^gJV1Bmc6pS~>r@k{M?L_toLCfz<{t(PMz$K4r@W zHB|TQ_0;W~-<_bSsx^A=jv)B*t$wgNw43vRr+Nt^A|mucz&-q^3Q33a+YtLiJib|F zPA6iTKUT?wHbhI-QqwwsM=a+M~F$$PCeg#ze~sB@37FKVzjXm{X=XQcR$K6U)<}e{ zR_^M6)X`Mcbkx;0`;mG6jH=m|MyLr&apavu;PodfbS|rtY3BW<8CbT0&JS5x9>v8` zEnS@iqUI{z-M3J3v6d&%^-nYi~1Zd5Lm2@qUN+AJ445P&wQblU(tlc2KS_Qo7??}FdrI?R`kI5@Y6y4 zv|8x2-=c94_LPW35H{RpeXKvDrdZSJ$=ejI8~ufihXRGllGJlJCbO^~KGh7=KVjI; zwf!7hp>F~)HvcM*H7xprV(Z^98IGPr)c(yWN+ThFuZtp8(I-vi(>Kz6?+mfFa@t7v zx&$%TQH>(-55@x)z>S6%CkcsqUDA`;{>i4lKA#1x-94H#1?;Qyg* z?5sBDd_G0m{n6*~3!ML408KGHQM-_;){o*yGtvoBTznsX{^@dv z(JzA)T8wdh=}`P7n!K0(xem_Sxb1UOGaH~MJbOSIDH73xdR0aZBpf;BEsDxY-9idg z4P zuD0tPa{~F7pQW{;i?>HM@uCA{{HfBGi zIzxC|3SLL(JGZW6m~OzE1?A$_=bzQci49&eRGHXdIxv93X1;xR-H9GllW^rT z0aH1>Oyx3{El#3IecTG5*1%)o2(1#nWl)iqQ*4-f%2RhRL~FgvN;opAqTVxlnG{g+ zyY2MF8{IW!1wBhC8L1>AVa;ER?%rjzw!X+;j}H}|2Sc959B`4X%d&=j@O9ZQI>bMU zj!8(D+q=LzymK{>2HdOL4)37VeA!M5jQgm{kMUqF(AE|-mMy5|y&R{`EhH2=z7 z04i-^&>ZqDgp z6>M5V`DehHp4^q%y<3V%`Q6pv?3;=<`(CHL zOn!Ld()RTuX_wDte0X@-$l?rt_MStmjEOHFJ-f`t@o@2yIH%{LH5)Vq&mt$2j|by)A#-GCXFm1+faj1xQVDUHbb3nrIT>nsi3s--IP zui|Pc<6IVnu#L`~(+V#zY+AxjVQbts8 z!|{~T*D*p@N;Nt*>^^RDJJ^VCOVOe12N-q9GW31Lgi%8|8v0vM5#gse=d(TC9?ZF| zC@}oWwn{Z`J|PXaKCv?4jZ$Ha1R9Z&RSo>%LOh(*91kYck3Pu+-|obQFiu@QTzVeD z&4yE4HeKapZ~&6Q54J`YTSF#BrYjammm@zoBlx6xA7Fc=VP?4W4^#tXlHA&Q#@%rAuB9YZ%SmW*tTcg=;%$}>%>Qz3i zOaoo(LTsgo%cSmDW%>e>*R6wy5AQ(7g0dg{)L(_RUHCF^bFQQ$cYqq#wz{J3<_Zw9)9Vir+?%!qw_`mR=Gk=;KO) zC|FY8qMTqs(!UEMd@e9CUvBtnsA7zz;-)vnb*QQLp;oPyYbjdu#@>gOhU!Nu&bCo$ zm|uD&gudB><34$`j6oz(2>@;gTn$Wd_gm2$Q#r170M!?i;1adjNtq@6&)JDs6tD6O zzm%6DfzBgsqT-Zi{vfn<_{C|u$jk{Sf|Ztfv?~^$7p{_1Cbref2Ju=?-GGS5r6r9P zRaBhHbG8Fl^E;r3N{^Id3p;cBJ!wkY_2USt-xw`Uwc{Wb^12lIKFb+^!$> z0n1ZD%6P~OwmdYlV?1k3AX|H zOtP6vrXVx`iB&tQCec!yy{?ySSIb{)q1KA@@1=V2)Eif?I}~ybY<+97RHPa=nzE?1 z8gUWRu>5k$8NKwI)fk`cSnQcQ9>G0$+Iq&(eqtak34MEj)_CV=T=WG1(%)I|;*SJ8 z6sLFliV;fhObc!=`~gHU?a3<@DpI^Q>N^CbRzeaI64h=LS!_&y6O+=&jjf#fZE5E4 z2JVn#JXUqq)pLi+4)fFmR5om&RMW%|89;ims+u8BEs5i3;?qUrqNRFe?m?&w5^l_t zxUw0OGw^LUMd89R^o!85=)N(tjaJ%qM$`Z|;LGU=AY6*$xRW9uZna9xQ)FdxW!h!V zZG9qj`;uX#(-aIciEh<&CzgA0&Edn-_af@V4=T8%n5H&*qi7BbFcvsmOTk>vs|0&% zR0ggfWa*Am9}dF8x;%F<)E5Ag8&XXb*_>t887P0!tzFN*41HFCnC!-ud6B2ROAbj6 z>4W~l?`l^dV)vn}Kl(CDcDzEl({nwqabWuE(@ zs`G*e+F?Vqp7x}Y#RD_58babq7b-1*vqOtwMcEDYTRk`2I|yk)wv9^$A5Cs`m)9<@PPVz2#(j z#PT*-y8ePfUtjZ{HM%iJ4Ecdof@5@IMfJ^s$w1AZv6O;oLhZHc0=EQ}F?LUf^XlQk zIHzvtvn;LQEIEd{KK+S+f$&>NVg;K3^cUI)toA*YNIJIsj+5@V7PLY%*U*@JxNSF? zE@_2xir|~7iOI^!at!|#nPtwQTs3F@V$yKtRCcGQPMN5J9AkFd0(OTA8sAVNX9CN(#Rk=i@12fENTtVx zI1_#6_$c;))Ta$)FMUP@J9cJK==2wY9s-p*S&i z(d%1%k4e8ft%`^;(kEJ%do=^bRFy;!KImjtvSrieFkzBGFH^5f(Z4zITz{1q&SDjS4Z6yJZ4IQULZ0+`)vTP;GvUk#i^}8u5UWqU7#!PB(s82o$|uuPlva5OR4v3->u?LD6YOZu_B&{pBHPyu)dS0AnQI1@QZ0h1mxvRUH1AopX=PWETK;KE_u#hc={l08>!ZO zv84o&vMAhQ+@r~PbUqZGO2H09E?GK#be;dsod4LAfPS@IVRST3Z0muN-g|=+CRSNJ zm=AzWDj`K7)ex9?4;9V$#0no4Zmw^cFv02xLxPX`4w0k?x~ zXe*|ZX(U~7pkuhUfa56*KS!LZhCZ>u)hG>pa!wzLJE-_n{Thn#aR-r}#JR{?V@Q&8 zRMH2!&84st^6zhv)CB8ANyqT3>kd9O-<+$`M}uJbNLk}MTMuxK7brgZI8G*3eAK(| z@j;T_UceZ1yahS^T0Bv9XI)bDl$yu#`=B3$1VW#(2iOt9j}YXAH`iBduzQxjzZH7) z{#OC!q!QeEwcKI6N0G1bo0IST|s-N6BNG97q{3KMg6K^lcN^1n{}Z)>&NYoPv07#qyOcb|TI z2Ce_ZYe!TdZn#1I*(tUJqpSCdKj+i~;$^6KUf!?Ja%1#UCvxP_;UFyOa>893(_tI^ zOP6QoMaVQiXN8g~+iGUE-w*SEhAFI|mv_ry)P8rwujl}xA?0xqX-S>^xVPj>yV!xt zt!hTe)7bF|n$q!>M2QrFa@`hBa8+@_tlz9YW?{92A%|5i#TGATO?xF5Xw^HVZ9>Y! z>a-Zk!0sMhFPgsRO42^=XThm~H#NH{_mdRd{Zt!MTr8S6vo@kl23noU4b-F}i+3?M zucc7dU+CDzx^5|Z>u*Edl~cb&g;zJH+eX%rUHeq**Q^SHP37eA&ot+16t}*=@xAX6 z8{on}+r(?pWku`CIE6_prh#7g`XRc14zv%5U^D0M+`m2wneJhA(@fm5j5_c=aFYn47ak+LrHFCKi#Xj{RS6pJbPomB=8 zxFsJ`pyK+5zx-I7mm=*bV8|K#5l8-?}2W`?C>Q5J+CM0R!S-VIm4%r2=QrFWH=gM<-|xp zd@5A;h3ybo{1LaW{A(8KHR@DedH|i=^U8*7&8-L4mW3CBXNjkHZGh#vDgf(b!w(u3 zoEX6mg<+KBZ%s_ZEXGBuJty3=*y^D4;mliC+QdYMx;IySO968a=iL;;%^^ts{Om@r z7>y^%h8#6F>dN?~P4?`ze<+StxA=?LIH=@XKUk4USCdG(dFNIQF~w5hGQDrZ=gh^vkB=!ffbKCecvH@+(w#XerJ__`{-2}RIjk>D^t zoPn+S#=QBqq@Fa*UeY=GiFQ=<*#iaVJE7P~Rr&OInC?@N(f}80^;YaQG@`u4FmJ48 zFXDRvNZy(huT@-+^tm=)wsXI4bl^L0KYn`u{m^@P!tqS8U_ z=kFI%K`arV@cDeECNw1Ij5nY0Abyb}TWns7pc3zI3BDgqHv_9{zCNaZPcvku=gC4Z zRFrfoeB$mfdf^H_!b1w0rY! zFL>&(XTGRIZG*kL9uCW?jEY^2Rz_&-?QYfbo2<4yc_$Ms#fvX#J1?~;2%u_T^Fqry zNc6tCFgB}&MK6Acj@+UQ;}s zP>P{51&0zY^+oXrYvkH;=ZxHF{*buandzRe*i;N?d&5_Tg+w?mxG} z1!AJDIPOIXC=sVosG;?iC#A{&%?Py`g@B3=X#!A9t%ypT8!;-jfjr3Tg-mrZ!^T2U ziP;BC&$-4y1h7+YDm; z2nF-QojLNp2b$ON_SURHtr$t8^JcdzMF7j{2Y*?!9!gITu)C4kQ%yetyK%tvF$9qG1{b9xAolTo=4*vspuW}7yhD$$aX><+`kG~;&8%t!Kb>(7~UZqj7tt%db_Gy5byB`-VZ zOSOG69B_jETK?WiD_3Ww|7+3MzH)DJgNFv5kwcAg&r{h({Omd#Lk$cV}p{ zL^>33;cTPR)6?s>Jt%o=^)A93JI@!vHQ(vLF16{9}X=$mCyf(lcwY zI&XV-FXJ^ESd+oIJ#h9zh;PC#MNl`W_T;O`Rc!&`@|%6<&rZZ+Uc?XPsKQm=9xA_i zm^+%BA+2~i-`+m)>S9Q8N(x8$%>!_}L6@4(SjF%aRE9+;3?4iPr9-L2H6ZwkJ9ubb zXg*@NS#VNm%$syZZ0)b?2r*cp$G_~>|2iwmxOmv3YvX4lB&f`GWhmXpPk1+_m+rS@G3 zyJ#-MUMdlFCep!`$=+5QGjILQtYzq66sRd_SgP37YEeZ;g*rG2J$(jsHK?}!-Z{3WXujp2&i16PoA3rPv@P?(_J1t5KUR|b33x>iC;F0wU1wPRNN{;`jX|CGg$T6vF)6hn z2eJrHaA+&OE#~mw-r4}zno>wucw>D0mo&kSL3}9w51dCiYVnm1&#gy&9u_rIEI&8I z`DE0>rYLG2)5{%7x^gT^&EEi(d8Xh--SJ?-T;)zcFQNw^m$e6CPG_W@^iE7f@7DAa z(XylIH_KjeM;(GWM%#hYES(>!DTStH?irk}`Mvc;-7=E??j)_E zeAfEayQ`z$V&#LFAyzB#0%Y)_+4Yl3<87|nd+%65dLW%JPQUj-9-Hein${{KpZY38yJR;Uv5QP zclj!$CY{Y9T2D>H-gB-7LelnjkFm32rH`1AHXN=rhf2DKO5R`2vfDwutg8E0@eJk$ zIqZCA{6yF;w{oJdhLKVC8RK89s_*%2$lSE{qekICfayVX+*GKUn`eMaTbLW zYE-fr?Cmkp{606&ArLx{GU8(Y{32t*o*CEejWU>EitJvO z4X`+*?nE59T$|9VBE#n8OwKs#x(6BsGz<(4Cz}SpNivXl(VmZ~cM;loyWbyJ72rMP z&TQ>G(mKdkH8JpFcmCzr$p$%PsuL3v5}o694Lx~6AJ8_G))6v)_@|Q+eE>KghNFD^ z>s^yuE89ZQ%G&R|+_XcP@`n{bR9!r7j_r_KG%&yj;NtU;V|T|Ppx$_8L4J92r7({j zGny*_&joK|bVA=+{R2AX@F3N>yT7&N!1tNDAxBuC4*ta1=T_)>-gD~NN&o-fG}`N9 zn+3W2rHGVlz#7=nkO5~L+v7*F6q8U~9Ck^CWz5!n>-U=ZOep|O1!Yi{Y&=`LxwcRS zE-xV9_gcqK=ErDgfFNdz+iAercOGnD7s9el4TmY^;*8=$rR52@Fj9zlquC>MTJ|sr zx~zb|sjG)xt)s(8*UAQ8MTEHVWAs5|q#MhPz~zNi5)%JAL3XN_Cxpi)?DD5NP>IR(kJ_YoRBxNjNbZzxP^DF=UXvtbu&Tt`p3Q#SA>eOLIt-fsO1Lz^XzOAbG-bSI9)oRwexw15el+A+WN;T z`2CdlWd^?~f>+KJ+u6l8fwD`UO?B`nquh%<%-8%0!Gzh&NXx~Y4rqV>WARC7YO0*K zzteFLGfup#`Q6U6{r!U+;;I9`oxoRAp4q|A<-L2r7_tKIjtxq^B|W1}Y6&?b{LjAN zTamOU#y#VfJAN)>@^rH>{2|nx0sZ#}w{hUMO-NbDP{WT2FT9aL(*srfVTkCP`%ch| zOE`{p$z@kMV%lxc(YKurrPF&qae#Si zAaw(UAy{%NK_(H2Wvly%*0}?v{*QY+6kmh!t=2cepKca!gr5U#1`s4zX7?pJexx^N zowK1$j^XwAL;-;TXf&{!Ol>hV1pS}SPYjQ%C(eiG(qlwC(;*SN&eP(3ns8}bL@)2a z>9qG2*E)8U3hiC6-jOE%>lOsY6iJx2i*Hqr<%}>SaM?44K4_HZv_T}97$6mwFj08}# z@x0wU4TOS6_aU6_nM0pVfJ3`qJjrdB8|J7=lK`_x5jbXIST~AU1it^rK61MUKJny2 z*VOA@qbP@Hb6c{8;VHD_9wTn3LrB?cj)=0kPsU`YP5Q_FBF*z=ut_gOTd1_=A0+&> zEC1#6_F)jHWjvta0R9~)5GB!cNnIV7qs!5WKK@=lH)D*zfCQd?rm7g1S0wV`Ll%qQ zryW!Agv?v#XE_;tzp44kWalq^#(&=~9@qvWIz+V#@CKjw9ZbMchBUDHy6 zUu4MqZ58+&nJuVCeOTO?I93qPj-zIG{$(!cg3y?GR(3d_PI%0~IG%JWB2hwJUw?l} zUEt139FT&SAdqSl_}gnJ1j1`5H9k-w!oNl~yoTY##VOBSZp4T|YP+gPm-9c9$*KXD zB+6<0r3CdaTjVy5JoC8V*|YeERtQ7T{)ksg;PTSI>3>oX`uF)d$Uineb&%k3+&?b| zrP0-HefCI<2tLqeLrMkb>pro8ue=9@7?%$4(*A4iPpZMgkvnhl;Q^UHt`~iO|K@b& z{ead={P0M?*#MsJl6Q9_>+i18!e)}r2!MZJ9D`9pK)NB$cpC{e6SlHp85;KT;p@BC zGaibmu&gak{pTz;Z0;%Kp{F8~Bk&LH2}0TW@vXUTE1iQ;AEqLQ|LAy>$7MF`hZ?**RDsGOhNVe_^$ z=VwatjzbLhSiGLU@sW_;E`BhhPmcUB2B~Fg_gU`G@Zm|2uV;u4GbHH)MtYt; zXD^Sd6e{klB4VG1q?Ocv#)N-(LpdhIK3kFn@4Nz{nRMc!g8DztPP(l=Yk;h#bb(Gb z4l^46_Ql(#9AGZ5X8++^UczvNI3D`VX^4NIKQ8}c*dL#URb=q;z)tM?8Hq`O+Yo>e z5Gv~BvdyAy4&~2zXSkw^gbDHY+$ufS8x*vAi^{X>T68ua?GGxvXM{Ia|9knJom&J3 zI`<>-)lM#OU5Q22rG?K|D=Q(b6y|us60wULkb+E7{I( zr|EVAqU(g`k9VY<#P`E375ZXSj_%&$OmwgKw!bpsCxf#%#y3zR$dg1Hxbgb&G^QMH zJzp@&4eXA;ulBEfiGL|JL;@yIrc4e08d*q0tUmI9f8S9ifF`_kvv2I#$9DZE?2fff zjsLYN_9r*o!GC98H4_)=dpz0p+@<3~4SYL6-=Fs$f;l+jl_7}#&KHR7Zw%*k^15Lt z$eI&6Vl&MlG*pR>N#OwFoZv|C6W%ZKvAs98GvXD5PwDu%pC0FDLbpR!q0`e9jPh#u zPyfe{x26C=Ie807)TLkFc6*HsW@zk(BYUzq8x}rAr*w0eX-<+m8*PtH0eLDRnRg5N zgeTS`yCve05QRB?Fn;h@bHKWUsCPN-JjWj&UP^qN5v_?a-s$B(4Po{ig3G`|39ik$5{GETPy6x{z`w0M(iCr{Kd}J#~}3JCuD11;|<^+Z~O8#ID#5) z6ShBx0ROPnr~7Q9^kVt&Au^){yzLWVOB(zJId=M270xFP=92i?gwr<+XhlL>mmFc^2foB$Pe7ow-sf^5igDtG1lwU1U*IFJ;~ z4M&vpApE(_9HgpS3}1f-$%^bSB8tvVlz(w(NV^_rcFe_#;W?QrUx) z79bmL;=ZVIV%(~7h^M_Q>|JqRpa9cmC`#)~_ zN0Ma#-2f#P4}ZCks{uoxJ`bM%)yLPB#~tLcl`{-ZcpG35A4hd zd|*VZC+W^I{lj{`6d#rln?qfScyka93jn|&h|WE;zS1|(k>I@&qnzd*k;Hz75-m-A z{j9%D`*-9mYW^Gm-r0FY12g{8qx)}U);o^~ALKHyUtI9L z0QX@boqrsLu&5#r@Qe|D-ba_6d^G+Fep9_=uVq-3M)dZlTIr&>_p3qb6w1+zo?63FUS%~-# z@q#cAuZ8c>;2p36q7Yd@>)}Cm{rwLBk*3%3J0IKl$py+ZPzq<0yZhz)mzWq+J43x+ zR)3H3gB$mFt8 zb9UZC+Wj~B{N`No&4&Kfg8G#A7&||P+$p@LsYX=k?wdCqx&8*3$ufBH#_VZwyBBsW zZ6NI(#DOfA@d?kk;P%E=t+AS{9hDom^RVGx9_dv0+q}Mn6LE3pzMjSp+(}>+W=WSc z8{AjT{OoLe=Fc=X{vr|AkJRYcIV2*q?S;hGGrO+tQEJO`{9=sG2LEfg;WUNcmOIl5 z+x;lwo9)Jz@#K!Ih-&UyQtcIRIC+nRsw+-U^QGxP9CBqR-pS4@(nLqF2@2QXk-3*h z&O=*0{GTa6wpBRH=}WK;bq@7^YXd5 z6W$a3_-m?JV@_lvpj$N+l1WQ1Ep-!_{DiE6O3csVr-FJnwV|tPLR9mrcii4E_OD=t=tCAjeisDU>HODjQr;gCxuvVu4dA@bYgv8#G~*4^kLM>2LLiziVR!+P zXT&h2_rfzZkFth}J2jnP7_cuM93x|>ezI-wV}L}E0s5KRXo=9I-N9eg?GnW+z9R1O zm37VJoLw^a(^#~clymjvsEoMq|MKq}gB-DKIN-SYinI(LXF2fRkL)^#UdWW1?doLV zBHJCs!A&NDFat$f;$67Gga%zDr;3~)QRY$nmEAHROTq>adAC?vItx@K4X-_zSn!(6 zUO56O%)kErb0D7Gb-Rq2Sltz{ zua_|!8dv@`JcyCGhM+><8BKPOKm?JcaNr8Q8XdUb(%B&;TIN*Lt1P_D{`p{T1N$r$ zu}_#U_Rd+if%|1T;NXxA3)(kK9M5V0?>iBv!(fmxJRHN16fdAN@`D7%j{6S4p?LkY ztA77upV(v>mC+GSiHp1GgKS-t{<=+i;ZdDw#_+C{vB4GA6|OTcP#m6bKUxW$L=3>} z$+}WcxUC$~9uhP0#^>@ULDZJTLejJhuyC$KdzaPNcP_A`*_JYx~}*2x&3~BT;0xfobx)b*Yo+*1#s*?S7+1@f}BLjNJY#J}H&|C$wbd0^#sQVZQq z_Pq`1M;U4ZrDD)dERtZ7evUEAa9{bv-7$+mjEUc1&fB8aAH6!r0sz?Dj%CG|4dK_{ zGa>(uyUxqrl^K4WO8~JyhsB{X)NWvtFtp_K%s}`{>sl3u&Ik^CB=`+e1T6onJ;xq( zGot{OSKK+HM(a*{{CRX3g~rw@1b`CRxO>F%;wISS6BAWX*J{Hh>tgoKyMy|%JbGd* z;ScMVF0)6bl>L4C`+6yr0sC&clC^T%qjyQ*7kb!hH^)l4N&k%eW# zekH~HeiO%kEk0V#;e|K`W=_~x;&K>Nlpd$rqQWH5kP!HJU0KKDQjj%Kv_fX)Vrubz z<`7I8i0Q(CUEdF=y9&(R{!2mrexewN7;CLwFFjD!VOnh{BA7{&&}d5IG`&zlu~z-LsF3q@HB6}D0}daCH)UcsYmL9 z942Uhf*-Vs{QGhWY%u#VFR@PIO%!tf@0$JBJ4uG9_6T={nlS1?F)^@ngdb;Ia9d4JOKr& zU?@im_23);q0e+eV#}3lXRlm4!G`jx|Fv6tzw|%tOEuX2xJF^oQcXd@<)`%^y(=5$~q>x?gZ1%@=AwRNq}52_xjY zzo9?>-!J6YL5w=B<-1Tw!X#wqOI>G+G*7*!4;=%A~_sX9)JgJ;bkIBklR#0RzU7 zIdh~_wabL0aABXJzIOlbh4`;khtL4*f#{NeRl#F)4O1G4My&Nt5GNho%X?1o!H*kE?w`>k=$0|%*G|;e&uA{8?$6>oSckJ%s9tL8qoz$^lks^1^yYh^rsTfVK8Q$1qpVxToFJ>{2+`X8x;!{=ndu_UKUT3L2D}|1RSG*Q0%L(R zHnHm1Cu_W;LE_G9cYBt;Ab64==!XyHu;{U=`IR(v@iB@+lV%~dD^4aDG33aON-G?(s3Pf zk0QFXfi-=IMhl%g$dO>sM*3xu2cXqPT)J}hX+IvawWw=Ag$?pVpAxjuiRpk}i9dBw z)SdR!b5eW7_A@^vxX^@v;z5sNe8Q2@N`EdQK8~ZP!CCg<%s(y(q2Qa^ZutS;T$n=Rad)AHjWa1B<5(v1V4Am#rt36z~bx>N_8xZ_aM|m z$nQc527%ldTiVmg$2f|uXSm*Bpl_$1rbR_5GNazN`JsrGXJ^QJb!^3>=|TQ1yF&dG zqx}4IfJvKQm4{;?qh!C=%lPm0J&hok8?PJ{BfEga#8UQuNY+{3mT=L zrklsSME&Tsq1vUifPExwF%Cp?SM*(6j5Yet^a!hX+p&8{F`Hqp zWszSBddKQe)Ft^C5P)DecXrJ%kM*BI#i!WyEWYczMbGT&>n(Zy@r+3i;L7-7G!3Yb ztK&JSn>S^gIlW3{q94=h)HJeE&+rf&6gY}_JJK%ZtFqJp!DZO%|A~#47%dLnP0sa3 zHRcg)dgFu;=E9A#6H}=2)}LLqMv!=%2Z92VoE454+7O?A3s63WCwy$MWTsL81Zn@X zj4__}#$bOH!^JbQnxKl-nNK*0t_{%0wIcT0MdZ{!Q&I^iS6=vZ_^#vrgMuG|6v?_J zU(E^ZVBW(7dk}bcc|6{W9bO3=8#C|ryGzUoJdE)B_y`{C`32k-MhGxSb%!oJwHuH1YvA-RI)FD~s71Pj&otQ3c5nDww zCR4EkO@%9Sg%Ma5hw9eWDbJ2>EX9%^*5f&pJI-}S}e8RP<0L7L3u zBWg$Y);Es^ZRJ__5lwkne2|&PNW+t*EDf=t_c{g{eU>s&$o9`6r|3HJ{_m3jddk1W zqhNaC)NjX1ZddZ300JPGP+YZAM~t5|%N_(^4S@jIih95!#c+bKmOm{$NR00e!J=dF z^J2p1(PD+yf#hd@qd)$q+4G0o$)Zgee>cn5#iZwkt*%;m{pXc-e8uO@I zgdWjfs~ZoA7W4l&A0Zr`UFA`Svxsi^*~q6gdoUxXhYqcTmm$7}Ai*eY2)%Oe!1 z#-L&u{JoR<9pS+}BQp_6Dg7A!;RO!z#r+SyKwtzTA9`)`@NR_AiDass>Xr^lA zb_ZXqMIs#q^reZsX+?HNXhEf zDO8*eAW)EQ*O;P%q*VMZBA8Fc59A#Y(HF>;HQwnxzrQPxE0ya^wA5b7w)OhCh+WMl<~HUFtN+o~PV$QbM701?kPW4|-STNDQ#D-lIi| z5BcPUzZM);DRS)ILCYBFN`Fzv`4vW5yFB!BKN1V;&+$eJ zEOn7FF>!X1e6pm@Xp*j@CC4&&^Opa)xQzN;PB1eH=}bobe6ZO= z?_*?TsB&ab7bigmzy2OBa6rksI}iA%fFnJ;=pRE~$l2|;0#YrAPNDlJa%W+GI;~`t zLuuSI#NKC2sGj-B#J^V>|KsmW5u{C!Y`Zo}m#|ro;-viTFU9D0kQaB8kS*N6ZbShL zq>w?&)=fRKsE0sqf@=hkC0FcIe=ZL1z$|EJKT~G2AILo@IoPuuStZ`kgBT8%2G;A4 zNbo;DW`((fCx6gK02%n711^aEh^G@6>_nRG!!^?_dw0`qZMip}}9m;rA%7OeGT|N?mO2z*?1`7fpA-VlI40Y8!1dv){WM-s9)3QjE zFt0%V)IMdXg$>>AyDJXsfc4Xc8+GMF?oYrx;_J1cee0enCt>tmcSR|8)yey)w#I?M zc0Y>Y#%6ag0rxLZ^1kI5JvFyEX8kH0<>hPUV;WFcfIs)20%IW)$z|)vQCpPmWcExJSQ+F|2lv^Z@;nm^7TNr3t|Hr`33zseQpgZ>Ee;s9B&?O{0r#{bkWjG3%hUz(^lc1JO7!$Ks1N%Pl%EI zv)kMq{nMSya4(2{DuLO%-d0GeWS6og&W^@d#vs$L-xu1Vj67i15vT0>q3Aiv9qz5Y zDKgjn3(>a~)UJ{u@FU&BdV+cEzLOuXUIuKiO$nVGD`vGPApsC2ITa#je&ML15^Hbv z8=fPtsV)g2Z0|o#M6X@_28Xz-7n>`HUDv87tAW1oP6BDE@YjTJ3vz9@?d3V3qxlu- z@N}Dn+T;3*nnb4*Rihs&5Ma=rPMY%TYMA1@y9qpCP0lxw*N>x$)J9 z0^P??BYE9rCO(=2nS#@2XA$HkdTroE?J2~>OuvGK*E;|Ppih8^4_dD25m|2ZF+!Gvr7=!bUEbcnM~}HRLC};w(S^np#YB?-+9WC=fTA0XrS@ zkNFzvX7yemhNyz$jGf!dfvhT?+pA(R5w=)iW^HlR)i8^rq~`fx0e9i>riFMK2h~;N z!9VdL*~*{2au)&hviMim%NOkAaK^qULe7lz$OQAg1_C-r%t!L5k{yZ5&2J*^DaT>3 zT@)TNm8Ybef_iHZsFGf0WZ86U-K@0=@AeR)n^C7K8phH(zGtHt$h_9>?;~10;c{ub=@{F55^NXs6@=7faX9LA$s@$ z)02L?ntKpl#A8(!IkQJxlUWAmgmoL(uLn3sdhmVx`T129(BDQ}>_ZqLKh(%lr!l{l z`2TNAK((^43bgi@sgfA6X7d;!qJn?UK;G3;Ini;W>3c7;`#X};aM%@Cv@cYh(&%^w zY)71>z!->(ZPQPws>dP{k(te3BEz0*!P<^$2vwLB)^hABFak)JO}cp^lS<08U217hRK>m#?Rj(N zWyuI|2ok`U42PyyKUZtn3;KGtdthE3ary3ck($foME-e;HrlCeplN~L8aJFbaokcZ#AjZuFp@?7R`zO1cVzl_L!}aOl_W-7nT<)!kHp$|h z$g!=jchrpY2$r0ynUWNy6HT_USu|*rabJ+i6sLR}QJ>yg7sEp+fG<-xtb89dQo4?2 z_iI`H(2lU(!5Lc_lkHug4u5K%@4){_b?&avDf*Uyfm5DD)KOilaI&2U_^eodZ@Ak| zuv4C>@$L+rDP3%W^W02F4Z@yeekIkMC|-NX`s--}M@gDf8UcrZ7>pz1Gn)dM1pE&| zcldHiS(Tp>^7ql0C#Nc><8MwahMKCD^D&~!`zQ-0fk2E6mKYCAA2`_ZFri2sS-b>9CB;u8a|&?vXBHSdxSbvZ{F!!VFC+lcorOQPtZEa%Hp!K zsB_*T=A6@*&}0U!p19fk&*-P$bBw%zVj;fv?dISm+|PP<*>XO&v;ZOLX)p(QGr@W- z=0Ni#Gf9r{;483C1ML_g{Hkn^HSp1V=dzzF@65)TM8oWh0 zL`l#4u4{kyVpkcWRReVLp8bgV0^a=ka3;PBXrcvc)FwYfx5ff zM89)p7n?Kj5$f``jqRbs-CjV&Q_yTyA{61aK(dLP%(*fww+3Ygz!@jRL#=!ipQeC)C%lw5wvwJ z*3uDqfq@X1Hsfy0*r zOg;S0<2SFbn3Z1P=z-(3KuYi`yI2$mk#rkFK0G^mr%e=LbpWc{+X>}5U|0snN>^>3 zwd3k<&$9uwmm5O97Qnu9>(N9qshYwFK4YNu*)NPO!LZFbSF}+H8AO;|;0)Z?i%`dp z{zN#Y(EJ6oLkAk|5QvoF$C(UVlvrDqaV;S-ByLFf;)YZlPv3V(1{mrt{hoF_d(o5R z;&%i8h)!8q!O?&Ro6`?R%61TczBbCKk)tiPvv#5ihPU7kG$FI^RtB7Ab9aE&{~l0q z4RmYQVk3+O$f5Qupt*}n!T-#4^{9#njK`!^KC}UArB=(u*UhBzdj#SDd|+8pu3K@O z&Ko2`SS7pGfH;eT)F*i6zL69zj9n}xz44Z<2K;j*OB?#|9o^K_96TnPY5`UB8qosc%{=Jvup*QfQ+Md0!Aey!obCn^vs4b*_iua4*vo# zBh+jAK%;{y0!WPmyxT{)?^vPUiL`hM3fD|OJx>9feg>CmM-tclZ&?Uutrx=ChD@$}cABMVp`O)= zZvbMYD@VOPm5rJKL6owwg~bst5L(wFRLZY?ddW$q{pd1F%Sra1yFP($kWfo8YZ%y9 zkJ$XuJcAtTf3orcq1yodKN;mgL7QJsRdlpYAY*Xn-oA0>Jxa0iE(^vvKZWJzH|+TS zU9DwpU1(Hj415^#RDSPFfAFWFdZ7BaNdF`9v?ztJ{VgEbW~y>o?T^m$o*%C9nsw#h zEDyi5*(N$$6Xg-PPkuSx&V$QNY-Q>ihSE`&%@^@TV`6mE=9d(d!!qs7* z?-X_Uls&p7e?A#B>4pPg6jVOFi1ZC3p?vFCuR92aUy4SjGdyD)26J@(n5!n5je;=| z;BPK|qcP`0*ey?s?f90mmco`+lL1G|`-gKN23#<%yHR^=#;iq^bPNa_zkWAI=sJMx z^ehtDlzZMNbFE_terlGnu6^01M9AgWh3F$N1^vq2lyPKE-~;gHPYF090t-l_kCLCY z`gB~kV*AUp^!y$cgn8qKO2vj!i16yBOVcJq00}A7%bj`;#ugeXP5__Fpjr2k6O2y< zZ8CnF6{dA>;#)5B4gKLTOc>PXwq06i;PgVqN+VRAlcG7Nd5aLHQ|_8ub!OxqH(|V( zGjPK-CtOj3$&H&;h;2g%{1x$1L+n9PpT+tJrEL0jrmA>tYM=whQ$o$Y!3_Im z#8+fBkN>>%pqfzZC?Vna@QS@|X@Py8RU)y6;d&1;{=(#9ZVv>E~PKe5;Y5qErjSgX<3j?6e8g_JyBWD4|4^q9%Ur@}# zBj*=^-s0EUUR*fIRZ*F1(q6Sb79DK!Q#J1%#4Ja^E~u|gt`55CusmwZ*#>@zV@^}^ z$)&F-o1$dd>NTKpH82z%c0Bs(C4~%`umMQ8tV1I)8JTb!jW7tSLrFX(5Es z=@R<2cy`1U`$?)0zye^ses{o`e)%nz2FaDiEN2m>s2z`0(QVN@GEWML~0bJ z^W4}KO^JKGx?X3i5RxjGoJ{ws=XRPVOtv~3no{bY%E#|^fs~RZ|MQ`*${H@E-x`WH zE9R;euSGAy{LR1%kJ`Tp(7_NuD~FoRU;N?KV1+}vW5yWuq|AY-Gv`Zv+Qk#86Q3r8 zLd{2N)2*X)nq9e7b8W{tucVnO-q!${K7o~QC&KQ{u7AF7tq z9E@Dwz)E_sn~LnmMqm)f;hh}L)J0#9 zLmMYGvz!GU`lDdCFYBRs|7f;jz|y3kz6-I_h5Nun<*+)x)Xuuc@#Q*w0atOKvA^V5 zujQ^)t3I$|R~&;uP9=fvKM!?l+Q#qoDoVEvt&Sp<{6|l@N&X^(71c32V%O9U%)JzP>!m7I~Ts6BI5UM_8Chd05?W0$w z%QvYIrme=}9nk1&2%qFDDt_FTdR*~h=8(z|Hj8ddTCdFCvqRSt)lqA|9_ zmWlHY3nPJtiKs#bHephsNY{X`rV&ezBp_4@;TY)!}P+PwY(kd|GY+-iz! zju52lOzqC1blb2kH0BHa&C92mb@$y-N&zk*uWKtZFz@(I8Dp_bGe3-A|ANH4fj9)G zp5wZ`Z_l^rXEE=|wR5Wd;!P1J_wGv2>T+q*WL_%$GGw$;Z_nL1oWi1#!3Hr$)}`J4 zl84u-ZF=*~gMl-%ZgY0^EzIu--{?`|Z8J0L&WLQ)ebD=Gpmd$b*w=+LLqHJ2gN7Ij zW9Kvp7VoixZ4sv-$9!t{-u;@&{yzQLl|$~2i9z!)K73^(LG9NZGi-}!-q~A>%mm+* zI?uEu%E)Vz*jzt^L4B&_LG9;PWNhvyBNt%a-6$}to!_jucY?;N$bS-%rlcTVUz$ z&vg4tO|hBN>df25&sQ(F3$0ER0Sz0s;^vB_ z^zP2K3_(ZbXQpU2U$AVqnxZaEcK+8w4)%!ue#GpB5ax@K#%h=NLfA z%5y0ianRdYNe#QXHk^^o_%9U=!6)u|kxW@bi{9c-k`Ue8UlKUwzRNVU;dT3okS00U z(?Lu}1=sb3J1U#c>nK;IzO#bO=00}zl4Ag`*}Y>0#)biyIQWug#wDfWiW>Y@fBKmJr((}x^Vu?c4V8|N6)zlPo1aOQ%4=C8rfBClg|Q;wS2krv)XP*I~jvH)FhN335`82Yts#4 zO9w=vxusf#4~}PgSXo)IbKznR;1PGtOTG}@cUs6EeA0Xz?yF}}^Y*Xz5GCAUsw~sm zDM(DiLv5-QK3HA9Dh}@J(PR&{tH&OTnmfBT-6F=o_DiS)W6uU6=y&BAzFHNH)ra@2 zDj8DG@JYSjiEd6QB&*I3?oJyIy6u()uqX+sCuS3f-|s8DBV<1{vMfNxsqG8Y<)1os zXZlMd1%@Hp@xT8)`$i~F^D9qfU_54SC{!=HI+lL<^jN+@ogTAFM#QuTeAejcJ>Jtw zxyE-ge+jv4*q-4t%{{K(I``71ef?LnPUY~r+xjROv;3zWX9>@vK^M)_D)e(rWt*eQ zUVjMZHDLprnb*i~)^%Syoi-sd7gO%)rwXOe2#r`tZ^OZI7Yi?$K$gG=A-gt`yRBG0 zXN4K_%qmPN!_QsA7jzNoGq*dRepD$f#Km{4<(-EgtpsiZC5uuDTQH}NkMs6O-3dQN z8FGaRi0bQ;42CnlYlW}Q4hCEF7dOE3vECNj&OOPdDh_olzPf-q^3!crTB(?%=jFsN zTJBU?*K?MCW1H?RD2SAXjKHwYk0F2d^Xfp&QrM5e%2PCHC%$bqPaNO(dl=Y1bwG!< zC~R>5rEPI^U>(P&NH1JN#WY(t0IDT6&1iN*5o3EeG5_XqBN98R2ucj>EtMGQK$h&g zADQNhEA6KT0$|)(n?TJ=(uR0j6C18E@bB^ox4s*Hr4`uRSKa*BHSF8X+7oY| zAiOJBkd#7Zau>W-PEd@pv2jwa#kS@1>#4LRO7z`9#O^>nUj89s^(%jG1F)<2ZzYkbZI+K#uWX*Y*n2sZX@ zXr0RFT%PQ{Io4$PJ&NCHWhz_0#{NyWq~h5r7LFo{hhW;<%AAOLfAlH!W|z$9@#!9u zq=5$3uiO7DPO)1HPi1HcGXP_4!YZHm@LV@PB+iTwtIZ4KfyZx%x$MEVJ#|IE zzUjx0AAUl1Eba4mlyV%PBoY5`){T|?T7k)PNe^*#_v@_l0tH}<>%Zr8&1Ei%3a_eE zfWatC&krPaqV8XAr-n7t#QMt$eabwf8@aY@X!~!WW@yRZcKZ2UR(iwAIVSQbAG5u$3r*lNFj@vdvk@e_`z~QlDP)QFM8oJxgEmR;M->Br&M<`_4 zA!Ra9>X6%%eEBuR&Qh=)?3eMz+&MG_IDuw5$?$%++|{d}uHA?dDe4vMD|Jwi4p_7F zp1DE^@j!q#1^+dlSS7pMVjnY@cwGT}tQT1Fo`svu+NPuEw2>S9N*G9y$=g4m^sDjJV{~HSJfv z{wtfoIo9SFK8rz7LU)=PJH*Eig}cDEpJS4ZnEO<-iASFVn}R+1%iv(6FwTRKIm92y zK!@IeJ(VdWeLy(b7|AEeaZQyv29NeHt+8T;uW<04VZDpJL=>r%+WEHOxRFEsmC7k0 zf@?JMIP&q*hKeteRWdWoalC0IJf&4tL#|%ES{Rw8^*~a%8{F!#vqJZUEE+?&AI;pJM}!L>51)v;?13A!Guoiyo2ruONSUZp;oA02!bzc`w_vswL@>^I-Fg8s zuzHwkzyZU?{9~`bsRvVaYnbt7&@FP_M>4=H%LJvzPQ~wEIEdddg$Y>sLGOxtA;P}X zJ%WkXdF??c-{f#@@Rci940g60g7VTei{y*#r%$ar2C=CjF;U7=i43U1Ch#~e z88=LV)d^&j+38P>iFuN3((dL?bA1d%p>{H}VD9(X@MHFYCb=oCmF`&m$IJ>2n^xTW zs_wA8{HrbBT;paQ)!AEEYA_c)%TQA*HlwrX`?Ke+JL^Ww>bW7wgOJqH-;NSV=;qKV zXA5Le%8zu57M;BTPbM>KQY+1>sFb0p))wz1PbIQegEtl-;vk*t*Iw?T6P|II@KtBh z9SPo(aW>ZN_llQMSODJiUvWy2n!Wppk+<{u_UfioqU8u@Hyz zq+K_T+BJoa;6y~{$J|zHNn~$ysBt?eW^4<28-Wr zqT?;-4}~QR#gf%_$;;O)w*8LBA@HS@>#66th;U^h$DMVa1;`1n)n#0MQ|>bOCN0Va z1`#By&3CrJ?Z>37Tx^PSP~Chzo~ zi=4C@$uNDRy-9KLzAxYhD0CAbtaSS6H`#3|1w@=O+f`LpAcwQLmR{}kMykKq?vl^- z!`MHsR4x;p6DeckP&|W$3K3zicEM{t;QPyG?iq!-=tH_^GY-NN_fN#oe?C1Cap4BR zQ^iy>n8jg%wUK+H%}j3rkH$9_dnzT^m@g9IDM!uc%7m>Gv>9JHhUV6W-!vpEFvU_T zcwmw9KFEc~RNPqM^CAPcNE6QQ_QO5`lI}V%-R#K+;k7eAc6rx@0{JKYDd83TFT1Ktwin(fV1?>TQ9v;N{ha2_4@ zgiT835yBAXr~~;ucLR6)O)*RSFUCpNJuYyYlQXvBbHfOoRR2L&2bfLJe=OO|dF1W& zbE`8=k;3vZ;vYj~Y3mnZG*__X=Ti^AYT=xDxV;=^Hn3$UX#3kYLiDT!g?89Q^RU-c z+d95o9Hq7kBR7(jCJoz7a<6zRr_6UOg?;sB%DnTeW@gL;)cOnMbeLz`fKz6{@qo4z zp{8jhA%#cu4(9a*ZN%tZb_k2^--LWFV6-8a!qP4{fI$dQn#-fQw-X zJM}2`Dm7YDF|hSGz%Ku?r+k5~YwhjckPc?X;9EPpzR6J?r$*C1#W4tXbVvb3kt$qW z>MUQ$k#r*8(EF){+O+?op*$YVP&8)E8)|>3;#cJglpWEr{IZ%bJ-C}Eve6uAkYk4 zLUQBrNll09`Fry}KO24}pT&A1bU5;r8Nrcc7O;`4rmj8X5Pi$Pzxu#nH{ZOse(0k| zspvpPtS1dbD!vfDTrW>|KSh`?PynM>>S6F8yM*(~_e6P^f!sccB&67fgL5N;amC7a z9HSXReiW-fxElx*&Yz9VS(Vd#XZoGKQ91qmWOXC_V!Fz(DITNK3Z?e4cS`#^iNg$# zx+QSz$2ivnp%OrS6LRp_DI2K8nY6x-hbkOHwoZm-5k0SMs`*;{pK!@FHd6~H9FkXOF8 z*qYQ$jUJ)?O#|EtC~c!jCID|YAzZt4;uf41I3oG=1+jRghd8Kr{O#z~b58wbPAUsy zoW$$}9*2pfpg0RzWFA^b+Sq{DLlLE`Zpp|Li=eXx0G@#aJFw-aaUg;T(FxG`A|1~^n`^rhkFi#NH zoMc!3^gH$Aa}dcQt;}N(hLMH}+IYs@3T->BV6N@lN+~_hNU6vF4y**3zwz>zwpXx_4X-D=$Nj^v!04`!Vy%fJG;MLxpNabYEDR)i~g#t+s!_6RdAA(~Xx|Bv_ z_ExsqrH@{et`1a)c{sBFF4juba?s3D@S5z-y!rWM;4LLPN=vbg-?30FQ=wv27(cDv zDeYmgpC#&sR+(k;`C=VD2Ek0^2n02%17da)0moO~(fSgDnvq-ywK>>R>}-g3#Rpuv z{KxngHPn<^T!?(gO zUdn#*ynq0}DMp4cO71PG@DlDW6mMQAt9~%^p?RkK>Vp}1%J1(v!Ny#4DG4@IyvDx0 z(md+h_dY(s;dwMi+`vWtGK9}8p$4C=>F~rOYPYQq$4#vYE?R!oYrpMr0S51S^%dJS z@Vr*`L@Ht9c7&q+MK&t(iL!kxqVCk zVtN@6s|owBNO9`L2{GbGFPnBjcI{Ht0k?|Gal37Yo^Wgey}v9R++AJ-(Fa1za8$WQDhxx3z1%i ztgf&uV#(|LJ}tq-@X$eus>b{o=FXzh8-yUo=QIEedwmkGvY+?8f|0s~* zTa2}*D6v8ok^}*+a;~FaER-7u?0uFeD;S~C^}5OS_bc(&PzwGO;)C_qX3R-L6-won z?G;p6>Q6|4cL<|N=K;JmYr z-;lgyl&D#>aD*GD$^9`c*Z_h98*s>d4Z@)l|84(wIX+{B{U7#!SgUgRO<2Gc?}yfa zN5jO!4yfcG)C}^Xa;(k$1uYXPsak>?P4A$xnr+-_l;BXYre`PUv~u51<5h+1qn_Mk zK)z(E26Y$PnH?b{Tv zsnb9gKD~V7fpv{0PF*WmJ>BOHrqa!KZGSHIoB-pYlC1qqAM@An3M#ThsX1<_j?-zC z*z+|B+fDGQl{-5;X?8zROoj)2sgC5Yfe}e5)ujHX5$h@uX@d?{@UZxu_q5ShJau`(iKHBX?pL_rKh9F;8h_=};GKh;4ye8+9597DA;9djykGC9j#W1h_ zIjlVdu#&*ciUA|?HJA?{1RD=2!w)OBE0R}f@R1r5)j{b3@BgeZ(GEQl&(Y5pNfI=f zqR6?5lGCkgu!lp{*2gm&Tf-i)}Bx8wyZVOPNG-*F%c9H=ihz3%ktN)AztVO;}Ch*k=|^{ zD+Ot?x$6(~Q1W;RzXTkprC$Ju0)3tqxGnVWAg^em5P>+sR(-H_ zj~F1(WMve>@q4^;JjXOo=>6{3_LG<5Ox0)L zr`)kYH2oici@-LCm=_x$O>3o{rtxkWi&YU7_@5IE=>!GQuq zAfAT$(=%X{$1%kB>t(>=iMO4?JH|l^F=_Bfoj(OZk=t&$Vy3plxpU|IR;ofS!Oaa_ zMryct=*5Q~A}fU4t}bWJqHk$$yJ<}D=J=E2KHP3RXeXCD{oXT6R3X+;v*?GcK?KIO2|e;Zz6V5%CJK3M}vb)Y^j3CQ(JP} zlt3u~@!S!(anTQiw=n;;V#jebD=2hc3eedYdEfYS#>({IC&W*Hyc!EPIhy0_HEMt8 z{bh(~ToDO-!y0=aVKhTfA*jwfumF3nh<{^dta?~+s>bLXPoqZty3`}MpCgt{XI$he zm+xIJk60gK(&>!uao}^UXpQsw+I_U}>KwY zta-TdB$WQ~<4H2^^t1~PyF#!9E+p<%}(K5cwvOm zog(B|jCh$vwKIFhK^$jgc2^y6b!Sd|f!rxxCOrSpy017SCHH%BTX|xk6IB1jJDlWx z!tvu|41hRGSR_2dqX-$F|6QA*tFWOJm|O1j|8jT#YP}))=*~8@A#OBtoZ!FL3={v` zwyqx(E($RaUyi9=$>IFLWz!x?GKQ7CYb4@d!#L{z{lu=|I2Or~eNU!JG4DpSXc?ub zb1je8k7}=mG&Sl?5;B0>xvL`S8V;4$-_{?((zBetzX3$|*XGg;6(DBB-c0Q@>TTaR zu#@!3(|Xz5q%kS7w?+jJU3^|9jzY+|pW@IwzrC^iX8Xu#+9qs>2bmxa%6c8_8)yd@ zKu&;I}FL5JOEcb1zK9SZ)jj9CDMkacGM z`5GEsxEz0p9f>BSrBD2As-B+8BAAd|-oy*#TP+JRadWEQkqI>f+l9-9#k#pGr-1tS{|D^LNnw@M{_w6TYT&!eDAUU?yLz2_k4{h|Hg>UUh(m+0-MIS20O=$86pF{zE^m2Ak|rOsj6{~(BT^q~}tCH*ON zuE9Ivo;xPaNT-U~%@-se{rD}ZAm;Wyz9L}9qS;|E-exFT0EzrXY0woD?T0gcZvjE% z$k%N>T3=r>Yagih`DHyo&BYu5uljlz;3Z5*?GgaGSJj}B8K562V5QlXAY(RRv$=AX zS+%7^@Dbk~PGRpq*dl|au9mj3Xg3aMMP^+R0>!hQ=~HXn?K(x}st(J+fJEa9_iZI; zvJl7K<(i|8mKOJ?WJA0BQFFf{j{n2Gh*$E%XJH&67XkXSi0Q?`pc!(eqq!}O_n6S= z1%$LVn2>GzLgh8=m)+`&R$X+3_>*S7n?Y;~T^f<2FPNfykJCxkLwSbHCJAO@mTs?T z!18F{2x1FGI?vWizwaO&JS1AK5ln6Qlfki)$r>Ol@k})6UnEhxyX|miqy?{jJ zLL)RRqm{oo8ni(!Us?pKR6fojkgg7UDMN#;M|tzEsy3gl%{(ez^#vagrN7{ zR3g6&mIQPXULB^5Nke^AAxgVZ|So@ z?6GmjZ2u);qI8vm@0{1>2@i}E4=kOR6fQ{Ypb1jO zwIrp3?uQ%ju51KQNp~u z2ts4niW~?3r0=y?qW+0dj`jM3khR`~l4HN8iESE`8@Rl4T(=#^`_|c8Cs?7OzVavv zEGAM(G!PC3fL|>f7hVv!{qQWW)ca1)()6vL<5$U}T9+O56ZHiTdqvbxi8>cHhAn@X z9tW%w!z(gQKlpA(yDi)7!|5I+0>n={7iReP;C0g?qPxK?~G23WdA)@DdJ7G6nqV{Q3LoV`?OukNs-Ee6U!e0C=cj zDtLC|Anx0Xc_A_~Q#BH&ZeC@iugn#|L9eCwA&5ASRLU6D{kaHzKqbtU<$$7fZiEHk zP5>dDXZpgX=GfVun}AhgzZ{+P%N6XB$+xU`09mVVOaD`!Rc)UXwx99^q}B}pN4ENU zCE71fC^74lKZ)Tgj}BKbIsC6t!*G82JcC&LAFOt@OoDZ?tedL-dnEgm%7}AEV z8h#Ta83pE}MKz&=C3yl4;|B3P#2+54ELcAe_^9&(g%A?d+r&ujf@wRv?eKl$x|I3G z;5!ZF%|&@N3i!a#BBp)kecLvd%@u!WA9qY|0YsDzj?%qQmaF0Z27R&kt1s?0e4DKp zye?!nVc@8hI&V5JYW2y_8Q#gJ8p%r#@%>60{rPS^6Bb)4Gu6s z_ORfixtNX=rG}*eXuUH$oB=e;lP6CM+7rCWd1Q`W zSEq;Y*S};JdQP;R6VjcgcIR%B<{J_cImzNZYC0&_+(jx01l5_?YgMBvy>)ufA|%2N z+sCe^jXWbusRQ?9uCg{?lM^RNX!DtL%!fQC0d1USn6}^1H~&RVi{M;J z%N-WU-YW$~gbR5+4XIXuVA~H)0)%c{Q|iAiY??IryOqwOTC3Rejc6Fl<;ytz$^#Av z@CLXN!kWxf*vFA(5p;ON0s6@>ZPpDIika@Z&+ATCYc9+kq}#3YFv#w}sIK`z+VJ1& z+$S7FTpw`&RjJf~P-;NBwRsFjvms>Sse{myW=o0_5Z{ZD8F!|{!^!r>?D{7-9wQL) z3L}PyFx^T3JgFHmFoM6i?_;kj|Adqu7cwqt<06+HdBI~b!xM2H8*?q*A_`HCC zDje@6`j8r8vlo>OQVM;~XK%FL*#}3PcPHA2+D*g(cAgUzo4<&Y!EC9X9DSfz)lW3TP{Ej`)iB~b!tlWM&EO{3UgSi9`STcWb z+o2nKQAVbA|5BKiczsc|AHyr8Rh~aN0Rae6D``$>#3DTS(4QR zPX^}iXZ{$qm}}{+C=mwb&AN|xYpJlRZvARqq`{aQp_kn%_FDmvS^qDd|Bqi&!q~wL zRD9^;fR4s?5Em%6%15Izx?zBQ?KB+45m(PbYQB{!*r*WX78Ag+^A$Q+1j)NTm;rq6 zEr(XI>Hv#&X(0eBdKQY6lUk+udXU~5VDn%2UBeR1Gc98Sx5%?W$}E4iD^tfv=ogZ# zTzS?=YB=?Tb>>%!2RN&%^?^CcsjaJ1RWz;r4S@QR251wT*Jxwt&7GZ1%ibSTDaTA| z_96FhDC}7n*ip4FMb7Kl006p`n{p@r{`WY=WO;e0N8E_)A(xh)1SISW&#;JIrWX7Y z9sFnWzFB8XA^*M_v@M<*Gz2L@H9-x4ORtdwVYP6!pYe^z!t>*E-O-n8n%{|@zYFI} z5O_3Q$8Drc<_9_!8JDZkNczuYg@yxrjf0Zx+NuK%e zcZ)Wd4zkgV{gVSHzt?aCyyriCie0^tx9J(d*fq-y8;j$|j{|H$soe}93PC3A3FQ30 zB|D!y!LI}NK{d<@=DIl!T4_PU;ImEo=*N2ytS5xg@zd*oDK${!v9JKX}OxJUu4!1AzhzGsvoC9~p2s{zDc_U0$ zH#>Z;Nbo~C9mJx<@@l6X)Uzy}C$iw_%#)0b&2%W3K}vRtt^+~@N~GUs|E<;)tYq== zD*%-s3cci%pQifZT#x?R0tO*Dudi&RP<=HKlRp#zYir-evc1!_{3YhR#^nH{G$Zg3 zJi1lbfI`dS_xirl+WZENE2MO!s&p5ME(x_a=M{I83s4K&M;s@f?@Wj`XH`l$4@rKh zG2Zmx(06}0;h?@gxGNuW#mI;6ch8bdwzlQ}qwBllsqFv%?=&b`3E3*8A`-Gkxe*bS zJwx{1dsRkuWRz7IC)=?%naQ5VmW*TXJ%6uL(f#>89>4!QIL>ul@9X`3&FAy^LS>FT z3{ITZ+mTOKro6w0IkjgR%3^%fg85UlyQhFi`VP*`Hw?M$b$G6FxNA88T?#Cyhl{N0 z2Ra$vyHeOfpc66P8m9qak*O->bf;c0L!6}ErM*SB!%*Of$kMp15smS*OMC$2K{}Xe zReT!0Ncx`ltlFXmJ!#2Z0BDVM*-#ooqmbcH2mKDUjerp{_EXOH-6`KWYm!C&t)l!2 zeS{vITT)kw-YhUsV zpZrEMb=npJ(~zRuD;k5-rROUA*MAZb=290(&B&(Pl*_Na+0M}fy-`7N_-d0~jZ*us z_B%yZohh8k`*rQPRjuear}EKW?!mczJpi)E4q)6um zUT`T0rik9CjMCkFUq6ICLoI#s8hFSED7pZ%s@DIX*H`sF$Jyjkyz#nwu(vARe$D|> zlD81JkzWqz%?GqLMO?|6^A2tL>(7}6TlY_5>I>P?S~WhS9l)gPoaL!d$`J^M@`gSC zH9STm{l0CUZ*djw1K&Lt&h*d;1Jf-*t%Q@$^|=>1N`fHyx72@V0-zV zWzKS!E;@7y3-1`ZHTDokHu> zQz{bx%ZNU&%K*~SBZ#~fx~Dn1Qby&``H;dotTu1FWtm>=g6C@hp)!5h1|M-;0zs3# zjP9izHK_HJAY%a&B>@ov{j(%VQI0EkAzOiUbbc`W>ZnQ6yDoZ`BFq+ci$Lj!M>65k9i1@zKU$xl|VObwS zg*To`>s#<;NLd(zP$}Akcn2W(@ah}ue2RKY6sHD&K*ud+j>>8~268fZImzw8mtQ4f za<|q{FNZZjiolK(O{f8T6J>RlZ9^VBky)`o>GbI<}aKbkNL|~oO!iB*d894qd7+y-^B&(AH5_=SY zNN?3u_j&qLy)sY~O)b+;RVfH`-Wv6$F{AIIyt$slFE)^C5IPl_9up-%&CkcDilJH$ zHf@5Why-whxGjFZrsA{z_C?@_YTiS$(%Wjq@0^!wbvvIO?yk==Q1k5BX=lX>n|*7H z9iqVRFvNz4sDO*xR9_CeY(v#7rTe6HtD4zHH|mz`7kx+N0*s3Nr%p9uXaWr=rLnUkd5cUjbm>7k~8v<9V{++`=$d@07x=|RB5Yf=~< z9%RO!Z?SiJb@+z3{`S`%AS_AL2gdlX8u;kW_S|O82p{D$*ysEB_4QbNkXU(Iw|)7} z%%k`eHRGrQ0hHkz$I#Mv7nnIvP|=v@7In{rp1q5z%{Io{HMT7)(8vi04LXp{&>+%J zAfPN=b7Km4(U1DdT}tY$YV@$vB@^y@7aC@CQ_c&df{*0=en$ihLjrUulWu~toLL>( zQOxN#$xIy?o3GA5VQQLr5DHQBV(-Ov5 zP@f`9@ulN=j1{Q!L_axOXFAwhHB})6D*%IY7$`?>YYnW8V7q*IWtD^5Fuhx`x8nDX zMdR{!>=VEZDLYFbsI#-Fh|WN-B!z^^>&LJgXyqQ_S-ZIZYwW^B^Z(6;L?D_Fw?IX* z9hFjkj9`qDliR+lO~%9{o3FCn%ey9e9sr$reQI#1x6o!*9%9$@snGcKvttx%PmUDKbOmlCPFHESo4zilWQ_NszMzoa!6XonjCLQ*qaUwe@Gn8OZyS$lNvqhC_d0 za%0|fXa9Qge2WQm#h#|hOJ&;CS!e?(!0Pr_dKay70dB#2Qt0&60wbWEajHYPGe-e| z;jIvbShAPwZ-a72f+A}xC#TVK!r8M)R(aj zgpkiCwDV0E=}`Nyi)7YKH&jjLhp% zB~?VN+gYn};B`!)&xmncDakT5Zdu=3)j2OF=65SC-eql72Z*5)JL{CD(kf7?de4gj z9^)%93W>lJot9{Eu=f|z4S8C|!kCmKporz&8(g!~s~6`Y4?uT~wEFh82B+=dug)c06GH~KF1BIMybh&?G}1nWyeVhyFeYsuU6k~BqK;U>(&xiK?kgb2ujYi z!*WgM$*v`zF?3t!PY(0R1->WLvlNmL(R?V~TWJgz=G$i~8MrE@$iT!T3Gvjl0eQQt z2JMSBOg6>uXq_8mBOv64zs}nS-uJ2ovwT`4k);)C4gKIGZwtZctl8Ox(Q_2+9jl_< zxgjkzZ)ojFh6_r{bDt98dDLK#CcV6s+kZQtzxtgQp1M~EUQ4)#{)-@vL@F*Qi-##*Hav2M-#q_5jNBIbK*ss=xWvnu( z)ip`e#6P{ylT$@qz7o_;2`ZUs7nLj~%QhVwL3CAzyhNkoAlrQO!GP6FfZxe8Yn-O9 zZ8G%7cGl*;n4apUoL=*4gt%MW++2LjO{36UK-s9vC<7p}bph?Xw!MO%)Xiw1PgP`~ zisvX5yY~swkr8|4qKx}!kTM(jhK|c*kCh)T-;b)p>uYIVQ1JY@;$O-|c@RvbsitqTfLj*{)G8lVRE_tF90 zJM6kk-FbQ8B~O2Y+O<$1OG!W|&V(@SNh_M`*GW$xSIZG_84d-?QwhH$+6LXC<}m(t zJbWp3R?p22)&EDhq>$gcq;(4LnGbB|dWrP9&7v88Y^9}+c&?4&04`qik!9_dKyta+z8r-0v{rdwu3Jx9-)FuIOs_+?=~B?h zVlGdJSMi$Wg_aS`n8TRWb6$^eYC@r2VgFJ`@&AMnaTk9$Wl5Xjh92)_EM+h`LS_|* zOzMnts=i!fg1B1OFhK=+n33u{^zP-9E-BAW5X6vgITp9yan zq<-Hq4OwVLg;8;5`)za|7M=;A!X7vsH|SXsE&YYFb4Dr;8-FeW5$D=(l=38Av=P*H z0uFxTReBJ9PQ+Oak*x6(6gjxF_9_2>foe;bLtX;z5&G=@imydpPUaXSlo1|lS zbOy&=p&l7;7~38UEeqTpl3&gn ze_n)f065j>5pkip)5Uoyz9r}G@AQedu^4S_*VCGP2Qv0GWs|FB!=*qlvQS9{9^E5Y z1QY)e2vwmht#5qe-_&(|!eU|Xz!i`}jH>H67f7+P-o-v1uSr{?tDw}zVLj&Jr6Wz=Wu@m zD}^x8%-hr|)ztlbE@EvnZ(?{=geP`4-EOz?t@;}{Qv8GZ&W|*q_|{)!KE$~&_Rr>a@rXZN`1(q{9AG+?Gx30fyiNi1qu=!dhmQXrD`9A?rWQ^p%q?P zKzq|?J|tjl-O9B2L*A8C#c2+ek3y~u;kTof)r4qeKFU?EIok3gjBKRrh*M-fy<~#s zMEy4CLy2F196gmE7x+n=n-i~3L~cDv#E%q11TD~>snCQQ0a+k{J5~Dqx#zyfk9WBz z$al^F2e7e1jD}1Y_?q|Kqu%{I5qA0iSafH3_rDANC=aGwRC@Gaks7I!DHAJowL4@^oj@`lKk@iiAa7wI@x< zgj;skP*FlyzTIGl-Nq%Q*vc<;mY!$zBMX)|AhsVr?MtqVM94pe#d)KHgsyw?+Po27 zHsF$LG&y=1Z9^S0f zl5*k$Gk4Uc=I zZ|erF<=)s%DfZ(hDE7tm^Hd!KtA5m%C1oHVP)Rd$Bs{CK~p|E0@NNnBy) z2wE(`b)Q`a+Jg(X&8;;Nfa*bRSegg{yfr37@ytGRlpxhiGQS_%aVFtEY$G{GlL<8b68sVXll zhp@WC1j*Qk_{+f;PZf&ayg7Hl>i>YTfSTaamUFP*(7yJm6(GoO{675;H~A8zhi_P^ zvH#%A+zYTMeA~=lmn3n%oNl(EIA>u>!9~tx{t+sOYEKihEBc<8Y3s4pgs|%a^oMD#RN)#!mpx%|EI)I*PMfp%e*pQXddj?C;{psN&(7!u0DQ=*=6X zpL2dtb^4^q$J*Yd!_B&y2QsNJ@W=1#Iw-v# zy~Ek?dL@8!DjK|=`+#Mf`VweDJOX~7l$CYHo))J=N!tU(-g^e!DRM9D_V>*9wpbe% z8~>DZv`s*o;QHBfx2C$eC?wjR}Kf(euh>~3IFEGn|@%>!`dlR@NXhoxj zTB_AN&!um~%ej3vRIb(ESu+S{Cb=d14%%Gyc^kN7Ed%oSxx9ssV~R1I?nV`$UA}|D z@CWsvTM3^@xUGk5r5FSCg~kAnx^8$--qA3m4mD_u#sgKVf#HqWuH;Zt!NUM*-p7s` zbDAn;qS9$2*FaV7g3@S>!(A(M%Ufutyq`qG6>y||j@#AhoV&$1V~=5E>nC1E8OH6z zdIhr2A-;wX2t?{9N6P{r2kdvYjxv-<*;DQm87a3Xf>TH;;JEXSB@QhVN3+(<8FBki zzB!g(#S1_03~{`6(n#VOZ;aLIy&qu=(lmX=Hhv0-&L44S{+U;{-?}Ld5_!|NQHz`{ zX8ouAsTPfDsraRhGpRF4MHo)~di*sHN7>(RWU+fIhWgG3>KAXK^?u>`Yj>7J z<1}zEJ;*!x&&}kQ;36t+W?2eF*VtG$ch$pyAPZ`W}vXHpR4D-lOT=kiF&dAW;^zSm%qTFv*+>) zJF(eLw2M-%{#%4FfB8tf7=gp)5Ob2N1$mFf`7~+o9)@r|*u{6dK95qBO`8qztUim6 z1N14xGZy2WW9v6pcgF2pUglbLMx9un6w@OM!aZ=4=118K@QV*ylnR`10iI)BXL(Fi z3Bx-eK~`h5J#GS_if`BWIwY1Vb?v$Cg9?tfO+09;F=c0GO) zvjxe_N?n)$4m}4Zk2a~_b1JtbF$7KrG)`0UaZw{K6#ul32(~nHRX0VX zJccYZyEIdCc5`Hra-y~yq(i1rt;$0;ZhQ&GzzWpWA|XQyy?FC!t0_?DRBv0Xc#*E725t+pne*#U@m$@ z!)B^C49omdUV@=pi`r13CrwB)0dw0hT-LMwIO?91Xi(r#sMx&gK~IJnqgjQChIoU+ z0pRe`ocGrSGO5;g)w)wIe?GJ=$%jDnQ5d7$C<27k8UUVR+&V2)*_PVSarXbmI-qOE z0BfTIHXai2NE9GzYv9zQl)!_?38r$o#;I-(Tk`=Oy7K*{$k!Cn>$(#*uyC4DSS zC>Gj+fnADV*8^btSINPchpFcb{8Fm?hjV!hI{rP2k8iu1U{$}h^V)&~LxM&bF@lmC z25nb3y#=CN*^ugRx%uYF(?{~yq!4K-7h5MUzqUf1lOC3|A1cnP%bOU?(5PUB`s(VW z`3@bSITUKJOdDMszrtI}mHpJ&$^ddbzVoF+a=z^&7N8Xt$TtZ}e4pc7q6eTi#spCr zn|jAc{56Spgy}S&_B#-65*YE7So|H3tAgLmlC)=`T=_rQV z2)&IOKX62$Q18?fg{U!-CfDo}IQKwXt!hQdY;l-BgxgBDlN-o`za$Rs64;Sk%To@O z59uYWHRUIxfye( zpBVxHs)_jvO#2vd7iDC9p2N9+`aEsezsY_H-X6gzd1Ekf8E>>CACwNifpVkIvhrGG zou?a#)xD7x~B$Jl8VKwxb)Wbn>ag(5*4cLzyJEs*aNympi2~b6*20} zmQhj!j!qs9yRjp$z^&Puf`DNe)l7AMpmRsOX#UV$$ok9-zbndxMcqFbx-ms%Z|;_( z!)3;11MD)SO;C4~$Lc2crM_&N)0}d~Tr_i0=6Q1~vw5-yPP+Tg{_J+ zyK(YTf~=zW9I}ZX+L{6$JQzwH%9BZ>SJLl{^<(PCvNF~v_J1t4mM%9AP=r&n=^DQ~ z^Sh1N^c5RV8k86Yj7lJ7x#U5FP9nJgV5A$rF!|aFHy<)iV1kP-6tl7Hrw6B%a;Mk^ ziq@yvpM|q%m^wypj1Zw|@b#Gn=}f4J}F-7_70 z2+DbS?;k<&Uk5ONPcEb@A_ohJhKme5s{ya?BxOYQ0XV?1fFx0b&3p?S3|m0r<>0}h z=D6v(6nX<2nsoW`L4d#B()s=l*)$KuB+)D-6_5+@|Ni>^{rllr9tD09Q|1L{)Y-Q# z4D@`wFWgt*=Ju{yHbu6f&^7)X|I`jgo8^qG{X1=g=c;a|V=3GU)*Zeq6fNea%G`+$ ziL4T9r(y>dMy^sLP*m>WQLsH0{09d zW|$5j2fP~qX&-0bhx~zlia&-3HDn;WGA7GNjymjzpWF|1f;3G2zr=B(B&uXh zvQdn_RqCR*mzTtB&z>MYv6EXUlx{~y5&x#1G3bn(b~%}0@7U|Y3+qit>u)HZsh4%qfU$#1IcY*H*}cZAuExvQ~soh5wzalvAIeZ zis2@NeRm`-5H^Db-CgavriV>{q@nciFEoU?4*5`DYhfP@q#n2i0TJM?=+iVcUDQVG zOq8z)r|NKhaDD7Bea-6h<#b<#JVUugB=kQ+Aez&580y5^hsuMfP;DLwz=qbgzqNd4 zKA+zG>XuygnYV*VAUV?TG`;RgHyYxw@Zqv%4@zpWS9}yx7P$cHm+(4IE{T9$?ygJ!c^po*^wHCnw$xWaCpqbsGeHq{<(+LwbSETiGe1AyN* z=u6)o>QYR{#MH76Elqhc4hB}BZ7W)CylK^!_=v21Mlq1|qO8n4lWZ^Sm}(EOQ6Q4~ zbJFPpLP9*&*q+v73Pa2aBz5^eHKULe_gQ|OKXl5F8Oun{8-tf64XEE+|TW1Jbs3X-J`cwxhpv ziwDjTLmErpmzpOx!gYVA*1i@(8VDwZCF?<07Xyu3ca-H^Gg^-X(4R7u z>0seyo$ryUYYeZ^*`Z=Ds<{WW6m>ML zTK|ssF7(WNu}z@nIPW|ig7&=<%S^SO_9c#-IK||*C#9=Hbk3T73;v|;Qg<858~RH% z&$>hY2x)$f4poz3yzbZ5W;@@XiJ|NGS93JEY|N22%7A)QXnVZ2-N{OfmOISL>g*kU zoiq19AmhYUJ0p!e!$i=4j~ecA13)No6wIC}n@BHcBKi9H8A%H0UO?FE=0Q=fN+oA7 zcp68I@onGfRG!}$Bch^G&`}kKe)jFgZeJc{C9x>^?RG;!qzQE8AR9nZdyNq-L1jD; zC2RdRGyN6vJgmaX(2U8tZA5R~I7450CF!?LPUT_!aGAS;d>Hmg^&Z@Gg*HRRd8+qB zI;TEiy_9%*k!qUY1HS(aVwHFI6L093vA&5&{z-S8?tutnpl#jvYp*zU{pk#&i6jY0 z{w3PN_Bem|M!B?$sCLI`cR$X?_(4mHxTU2f^X^Ip`aOc9&nexkY~*nyzn#YPHmV}mD>Zn+`)0@vpKs}7@i2SY7td(E%*uAL z!%P@GGc%11fY4=3wVH}(WNkZgcNY>M{5y2c0@43{fS&WwQ(Hlu2bJVwK1Ffc~}2HGtY1vfI)yo}T5XJsY& zSYziZvpFaVaAb>tkLSxv>e?J(y^+kGO0Z^$U74-EX~G8gmrUxcXg>V?m)rE zlNrXq5Iop(c%w#w2(p)|Gh*Z3*&R{6nW573Mb4U74L&Mv^_bF4gKwujD1+y^?HRv{ zxJk;q9)B8X$Bs}muT9IMhWXdin%ya5DS>Z1U+ylt38}5p*mOM^y-K^iD5_wx{r#86YPjTGdO>bPZFAhjrxLF z2kW|!`>%Oi7e$4Tk2Rmh3ePbA;=1go=gd#a41=PmM^#34Y`0*l2#smcM7WoqwGqF2 zEE8z}L3aJiu<5I$*!CMdRE&0fsfmvp9m8Y2aODJF>ZbtY2QQ~o28((YtnsxAm8QYT-oNZTxb1D zpVW-vTh6m8Bj;ehu8#t$F`W@%Ra@w?o4-`UAYgHSATpIvC%uIS8F{15x$$;^S8w9< z&TM8}F)3-bdHREoj(%;E%IK8TjPueiGtcxCTTv59J&Ug#4Tq^Vdg_dF#o%Oh;6C#?q_GeNBX))8pzN3uPusd-# zezGoCoLH_n-6^)B$70*PJtZbc5zaD#unZ#9epAs3lSE>b7Ei!rB^IxUianCmC))xZ z;pcAj7~ITkIG&q|8SzJV4XdFw=AP-L^St$Xw)Kz23#*Q21>FdC*NVZq#+#}lRlZL* zELj_M>(#G4m>@-0uCogVPFAtpdHdx9KLW>e4LAAT++<|I4vRi3HtIGZ@0FSbE&}_? zlQRMg0((V<-MEYdi{z(pnwUQ-)_1?X?tv|^IVbw<}MW-901QZ@g&E$7{H%QaapjZoTgcs1}GPz0QMkz;PHQyU?3pDkfsy@(%2 zDt%n9BBP-#Uk)&9A~$8alzBwoC=qRONkF!Patua zNvox3k-(RU4@x%jRTc?qXyHR%!hmwm5;(TK(%p-uwQ6D=1)F`&Zg6>se3sgEE2kP7 zRQ&7f$`vOkXQ)UIOIZ8X=YSf74=4292PfZ(ps_`rS3(S@FV0#&9qB(F>xTKEjrqi8 z#e7IU{?Q!O$_MZ4xT?;;lraIFG-kh}w%8rr<}VOZbXRG3tpi<}?b zgSkIGcLZGBM%&|Q*1r&zRMGnhY1zdf1Z)lodEd{$d663hw|z)-FrjgvBkH5A9j^%q ztei##3}~o+D<)8|lGhuykeBIVVqeRsE)bukvy#v&HMKRSA-lO@G@*0ZEAO0;>uS`< z>$1%ZW;)D-uEM3N&1N4XOSinAibX5E5_;hc%O6msNh@|<<34KcTAUyI>R>jh%*|ZP z>|R5f5ZFL|%*8iqu1t>Gqe_NJ1lB&`*`TfRPnMLFRD(}ZULB}_!G_gVv8&M%WO4i( zz44Q*$SnE;N%|61a7QMiqXce>-Q3VPNwKz7p8LXKw^!oA^v!Q5WRe0P@&DQn&8jQh zTu?7q(72`c=6e0C#^KHqm~zv)>4ITL6|SAB_uM|%uG@_RMx^!4cJorqPg4~cZ0VHC zOKnd`l-Y6)E(I(V*7Ik~EFqD|wt?M{;-sxTbSA#EgM`g_WaV|zeo5LKl52IdKLJoq zK)XNHM0Vw!g!GpWW4??TI3P;i=3f}Q^3KJ`RyvB$9B6x^MXmc|iVRMrH9;OIoesjERVIs^Xcq~%dQLTE@`tGoyq2DkAAyFFl8Slgph%_ zSJtsQd1sAj`zZ1QjKy7=u;_m|&QPr76M1@kW8|Z(K@QfK5U4uSbv<TraUBv_{q2)C&E5E{t#mUNmBOu`etJ|DCIb;wFcng5eI2L_yQ2{ES=jFm z944+*?x>1Dao0oD>;`BHl22S>cQnUd9RoiP4JGw*uruDHZ8jlXhyo$ zq2ksO7`zo9-sO%3W9v=ex-_CoMdpXlB9codLI(^9YatSfM6paa*NBv_ zMk`cLaF<(COG|5*d~?8T#C)+#FmBl{srSt^gd8|XVF}Bc2Z@S8?bend@rOQWaUbd0p%1 zxP_TRqe@jW;-It3pkUDQ5e$GvOYblCzPWCjkj$aA`7xq=P3IZPVUN3L>t|5&4gELh zx~fm$vKN)cA_d&iGnAIm9I+LVPf|Y`*xPeO+xAb}NU5a8%Q?4yo#`%E(<3 zY&F^2juH6TYPv`MIIxlB&>l0{Sl5|fy#P5TyJDxcA{1jYBa!z?nF+$S+RCKA^F(38 zc?9Cp3tIJamtU=kkh}EPWOhJ|$AG6qSY3*>d>hB2hVqX`{T1_h7~|gPX|O$5x#W_r z6{DDS0at;7I*&#+$jYQIJFmk-H-m^)Km$Z3?5l`lWo||nZ4Nu7;*XR%SaHo|>B+g= zwtb5UsPrUxQPqceS}+}QtSW%h)vB&wa+Z!2<*HR53iCrx%c;3t4-$kisxS>YBRVff zvg`nvHTIfV-W#(1iib1#1EzS(pLV9lQZ>p1qRp$$vLyu!NopyCI*Kwq1;*MNIq!v> zR)JAJjlBoWo!Xpl6(3%}4Wrqg5>xQS(n88*&wevDva_*4$q_!2o(rI16L3{%)=E>n zxN@-RMu!>Q{bA+?F|OgyyxmeXKMN3}HZQBc3iVWN`1}FNt*>EFbe66n;3I7dr#=%% zxh%q{M!dMaaRx&g$}zxAPRN~>MMSTBU&=3N-XjmJrNH=8%phs>xDR)Li2kzPcB96D zWj7Kt*chgdyEd^%#F#=k9+qkfh(aa zxaB4stNfSseZ}0}GoLvIhTXQRX}yH;xpQX*8m^>FkL>l<3y`+(Z}N`CmECBa6q;Q!x5 zLFKfwd565IU-xC1R7E}D298oMa^(c(7|~j`i_o60r?Qf4HmfsbgcZ5(qo(x}#xXK@ z^^%}reAhWKDvN)6LG>BY;zy7R!?;5qFcH3|$~)&_3Zop&2Q5+!GZa!8TZ}3FP7@%B zpIfeNZhFEj&2HQcpIXf1aWhd=6j%EbF=`TlmA3>e_U@7==8v!dEq#wM*@F6X&4qex zn$<}2PAN9Oz%ysgAbd|JwG!jfmhTP?pqbnw`2`>GzIEVV|FZ~Cpd3&Iqi0gdXX2X{ zcT({6kKM>c$k{A!L+C*>yp}YB&$ztfi|W%7ovX2$Lw1XBFhno?B=qZr`J!gtv@W4A!PR?mgYk~^&#~eqk*CDD7kNUxuy4t3fDRwRjUlI^du*1 zkE$|#d=w~5;38Uxj&;&?c7aijxSS(<^B<@`gkYm?_4WnXiZ=E=>Ak*T{PWmeKG4}4ZZ+V?iwV}2;aRYQm^WAz=u zH*s?m5)sbDS5ru<8ra7QR)L_8FR1D;p*M?HWU0y|!Lu`^ZAf}pvsf{!rYA?UV%AeQ z>P1*F9M}E1F)8vANGOT~2iv3+l~E{kZ2Ud*(b6e!A&#%|Tj z;EPxU_FlV1+PF8#S@E7#hv`%?%px@;ZRnaQzN>Osf*<)<@Vc$L!z5d3iBj!?JhHJ{ zs}5Jg-g&Ezvz%4im&L^(q2ukBw-!Lcl;`={#e1-@c);wuRZXc&r7kUo=xa&0n{OBk ziFxbg#f+<_rY6@Z$+DKh;Z~}fZ(O7`{NPic?}*mub0#|D7d1v35D=7iyfN7M((Rf0 zb~{G+IV+a04QV`D^?zQeQmUPrF?8jfsy zO8lc}JuR)z*#(b-5HM*J%3+)eJP3B478nm^4rVMR+7$*h_e3+=iutOA=u#q2c9ST` zYN#N28}DON^TogF6sB1ThRk_TRiQqtXv2Cw59Q^ySO4BqC(~(2(3`V#6c5kBxPPsE z3CdVylSN3b8B87vgL_Ysv2X4U+i60@V}_;cO=vyVmo45q?@{|3PKKBQ6)Sp&OwIZU zgd6c>7srgTr4RXGW^Kgliv59#bRKI*bXwnJWG3U2y&>x#9@yP{W$jpvTTT;S;t!Zh zmpoGeuoUF!f~;(%Yr6^OGA5*e`RJhjde6C!=o}VHGTfv}!vAJd|9Ch+I@R=O+}##e z(#@Que44I&x$^Ie7orn{t+5i?SUN{<8|FiO<+_;jk{d`^UAA>nocmHxsgrMGT0EVU zWjV2b`Yy)nYz?9_Qh1!p1BpYi-6q*o7^xo_1}M&5pw+5?XhLtul8J6>Mj;c|Z9BqE zX*Ck#c1MR6X>j7UY8Ym8o#u@+o=sn0g_>OMOmyTc%*^F5!HWHPGe$M}pK+J;njY;r z?p6GXfFY$Jg*>xt6!RjxsLY%O1|H14V{;j_9}9r3B;QOv5VzkK=Lx}Gof7Z2CIv1y z+7;s=ezVNkTQI$ydU(l;(+kE$NvML`mFhLhEYqziJUqZ|dLLE%mCWIJvg|npH5(oK z@vF2&AKz;UXkTt{Ko5;LOie~rP}`2UY^FFc!_e@yk;6#`G~_fIG{bw72NIgkO0P2m zVP^>}MB=0aQdr8LTPc`x@yel#i*yx?0wJZdKefAe?#!1)IkW zDiKGP<5zCf4}&yMQS;79()!x;UpB)#vOOM-u(!GxCkpe=ZvBHa=_0*y>`wsTUCTqQ zSujO~wS0MZv*K|7Q|fG%#MkM1Zyag5x{?R*?}Ya`Og!<&Cja}-mAuGaMCPo2q9ytDFMA@)K!jAKWUjDX~rBGUIX z8w_I>yF^Wmi~MrqV6oOe(p%BJ)Bj3x@6aTpMp1qT2YsO8(rTI>wLGf&09iPuO}neO zm0HoCW5|ms_kw##O)B1O%0ua|!As@l+Z!cFbxkc`ck8kISa(B!Cj z*{zkPW@bL@^ke$R)Di%x&Zw)eN0)8^`WaI-V^4YojP# zc<4Gjp4g`JJy!9krdUfTllIJ?Pfs_ZA?nMeLQa|&*Ilsw#wjuputAF>sot@9)bmZU zLOH+38^)sF;@kR45Nl%>T8l?}uq1Zt^qri5*x1-5A2HxUI!wsnE)-tCC4-^|`gJ}3 zf<+idU0vM+CW&tYJ~6UCPwp;j?x(8=vYFi%vpmG6GOv^+dN9XsS&hfZ6nVq;D$mr% zS5<6r+f9TZYw?#?Dh&e*1*0UeV2f**F9s3Z1$4elCCY z6|HkcHfH#oSFeddK9OI23*RdB=(Mz(VsysoFZfAl zv5GP+6pn{HO7%^iS@)+7*CP8YxZgQ+ixe_0=`w`wN}(c^)zVjNxni=~1=-D^&Bi5Y z<#6pm3HQ9#}eQZck-%18G7`3YJ(9?x4!n~@bHC&@Z7t0|BSoofg^kABoA#L`=-`r zTm`PTMLTZ&0*(nVEaJQ=ZSb?$5~(<7U&WeZ=+xp-2tSU-b-3 zF$L2R3#-!ie0SQT_x?N-y3zhJ-d^{-%gf7|jPLt%UzAA;txo8_@INk6^6XY=L+!DV z3buvkF=&b^e zuR;(eOc97fys*^L@gSYkv(TTYLaHJUesN zQDuhX>qLYbU$jLXd4TkcB$glS`WzfS#D)KOW_O{P(~kubS(w!Foo}@SP@bu7S*A`7 zQgJ^n2GkP4S&3FfxFU*Y5(Gs46smqzRWHJmNolg5J%Pnd$~F3$%o`eA>%WRpTNQ_! zhhA2#F49}y#02iozo0+9l!rAf8(rs?ggeI+24C=)xUwT5^3Gjg(5~w6a zCOd$fz9wOeB?CR$@++*pM!wMeNw`7&;PUa3!Vexvxb%xosP#;P9g6F}YiF2S%DM!% z@~PfBgc18Q8a>zqV@-Y%(G`N zKKUViFfRRIPF06+DgooBW>lj7$4x2UB;=i`Y2oSg){2q$jXM70M~p8D_`;2t#wwm+ zkLh~-YXP6jdn`P?=^x(s8dHU4g^y6j7ulC$?|*Q@{9_cfMN_AV#Anp#PCpvI{nb@Mm1_sP@Vs zw5QxOwhtM^_73ao>#JwgH+RhIsqMxkzuUrB$*96})HavJ{3o^-qFclgF#7J~H6Qe6 zxq=DcQ`_IPlkDCy_4Em3a~MiUE~XD{3Ob)ZMxL99g#O!iEg;N z6UdwkexX7y^w9*1z^S*oDilJyrKpiT9zIC3&S4y6HMG7cA)1fF)YI)u{Hh;d7AjZ( zt~TBH#^>+4U{VrpxGBTHc%|dtDld`}_I@q^>!V)p-R zK>B}A(Fv@1G9PLiEl5s0=8Rn zbMEF4VV%mp3=cr*aN*kV1LVVhzTkEH{J)jPek=h!OP%BUe<1>QBY!t6hYK;nZZdf1 zl5ODPw?2Fm@87%Q8J$uZ+nsx)XT<6|$kV>y5PnN{CwO8RrA!H^h~FFR;agaMi`Rs2 z{Cz+0ux;!o;@CI>I{rQW?QK$-v`($(ovjxi4&SML00m3!KX;K%HpLuPWxhC*>?ztv z{7Fy$%Q~)#2Yh?yDQBObmVe*!kxRlldd6ROkgM@y{Mk-bhfX7cbmQ~X2D;Ms_@e7) zZ}cR$HawBKxb$3|m{scc%Ot(=QBlw1IX;8X$7@&%XO*iApB}CE2`o9xMj<@@2Fwq^ zVjkoB3&jT<4ueVj0R8y=s_i1%xNa}*H$y6kgPkKQK?wmt6!VCNVf@0^ZX z@P^ahHyEhWd~Y<+GV#Ma{oQMK1_vmMA3eIoO5h*h`L!J>S<9jb+kxdrIo&RI?#ZJE z^H{^J@s9W@EE%dj=Jhqp%2{mZB-!QvSxCZ2kxD&D=D!g?6Xt%O=HI1B&EO%I{ zOF9KCmmxLHG(IBH#tGH=MqStwUhV`>v)?j2Id*6-RA6cHR~a0SuKw#fkvD&^T~|1L zbWNdf3|8T$3VBd>^V2%w8_s_}EOo)$td0I*=kYcrj1-1$P2(RM@#hV!T>qiYX-1w$ z`{y5p$}vZlw<5VOCW9wg+*9ay>2R!7;fCs;(fxCD4G%0Fd-vpj#Pg3&8s@exNQz85=(UhG0)L6{Nr&Qw@^_3?w9Gm*XjcIK9+s7 zOMX3q2j5w6?0hv&Lk&n+`QAd!JJKq-WXCrXjag`=2c5;#1Y?ODY(FunmB(jX>j(*w zm+h8CQx`X+-GwMm57Oom4Lm*Gnw}&O5%hKqa2?+b{9t+W$7-T#Qg_nP4Hdz;3!Pm{ z(z9A_KC|4a-nenrE@*@X)9#Wszw+CepC;+;F0eI3^9x6j%w1gL$uhx;AzadwVCYI0 zKiaD@?66lw8VmQ2-!f)5j9>|9e7AF*5BzNm;Hbc-9GX&&kI5tv7&b2_1!-yy57G`tv`(PjdE^V5S}=RK<11 zPky0CfBEo!CF(LcZ&1E_+?ky-rx{vC)E zi;W#71newp0(p=*f%lJAQ;4n^zP}?n#=oA_EF@47d3TT>umkt9uZ@)u3H*AY-{LCG z&CI5lZ@=%4)QZ`g=nce4kb`Zk%yyOe|GNsmE?-N!)b|v*YPR`jc|Fnq%h6ysd6#f= zttWn2>dbP$nKQpPO)!l|`9en9NH+&bo4&)9wext13_@?gjeqXH^ zz8BpzI4^`N_aEF_p_e#*lt1mPfB!da+=NP&f%RSrnOMnRCz_4vi{UDLf9~HvnJ|A< zr8@uZZSui4!M|<-%z*8BdV2r!*0#wz%FJk|PFpc|q59JywLC~%y*-)Q{~XPa4)7K} z=owr;x{F_{GLOE@aesoB^D*QWti~Rq_&pqe1RFfePI`-1~n1G_EkJMII&E{N` zBPHe!9_P=k+1Pm=WMm|N!>+5V`Z1+=#_q}sF!S_3gu|G09>H*}Kekc{j6!I4)0^Xf z_}BY`C|yvY|99|*_2ziXLec>-y?{vHIT?i*Y3T)Hj&I`-3Lj9=F; zf_n-lRyo=W5FLY!yYiofgnQ(}p<-GB zi4^y5_^apx*Ynipwtaj;%|?~(KmXE&S$vvn^?yBzVl;fVcANWu7pemjMo~Ku9($?E zD!4w=U=w)`CS=T|vPu5?6?8I>z$?wZ{JWmB@ZsFy-gE!CGTiL{W9+-*sea%8v&x8+ zQHZD{tA%8wLn$({Goz3l*<@B!va+*9WM-3HDkLMv78(>;k;v?Ky`1wp$NTgCe7=w0 zA3Yu?=e%C`eP7pgU*maQ_kHOilSN~O%4dB9k)RZohYcOhsl*^N=PMz2!GLMggPps8 z#uY>w{}1m+tFAd!Z^FtMm0;g)q;M z%ZE{0d=(KLs!>5ssEP*FQW45no6g0O zqc*}k7bCC{Oq9ss_FkE@Y7qmciZ;B|PpLYHz^GzXSTPGbzl(B<;j%#YXrcU+`!qLk+IF3wOU7)Yln0(x5E$yNy2cAn-w z1_{EXkq-yYTng9^Ufym3wRM2j;ZLof3U|927lVbqn99J6dqgD!Q&<#yEy2BW?^z#A zDyR$ANCytW>wK4i$%(Foq-{6hSSK@^upo{RKhYXydmtb8>@3RdEzGoT;o^r(;7c23V zuomqYBz9V9?9R6_X?}I+rZ91ubRZPtoL=403qA(jLWjLJo>LLTJCEr#&$JtuWPC8$ zG+aFMI= zwj1#IeIDvkF9?d_k!fpJ;WK7pBq8lX{E?~E2e|kX7{$p(%3=BLeBwEjy|}?jD7tr8 zXDz=4w;SRPbOI(&wWFNg5t(NcE_zNkD4qYz6P!M!VUlB>c>N0^IPts;@QgyhRiKZ`2?4@RXwL`r;6EPVd95%znC;0m!yV2W zhG}UhxJsp#Lf|UZo4<%pqU{IRB@-f+pbn4uWEzXf!|wlVD6tFhy8fy4wTC z;BXr&{H%QUB1IfG?e$)W7&z%>THV-SOuLxx=b@o?1Kkr$b#=Ykd-k$V=wq}FVK}3a z*FM&YBVg}xkAdY4ca(S)LhXoDf`u7rfy^ACL*)asGmLYVd7x^Wfm8qX8)CR|&ZBl& zR~0tote|W%21cNcu+}5^zGE>!h~N);F$67#d|>8H)|=cXr7ATjzi5Kh((dP3`2t4gF&n-^|HndqATVM`m@!dU{_Q3s!zbXB z@#jA$#{Z~U5CbnvHxji88h=3rq*y-rc_sh6i<94nxpRB=veBfD+Q(^?ZYLpi1Jx0} zxP5?7o`PXTAVe;jYyaGUe$-%DyMI5|8Dk74%$^f3Ph9?R>0|a6}EK z>^UFIAy~A@37gvlt~4Uj8py3!{tph&2kYOoK6UNQJKhHyk@a!(o#d1zh$&(&>h_KK zTJJd9296}V_Fx$9X$%i}`tFI>zi;`UoXEpIIHw(sp@RWOj%AjfLs=9mj>x^j10Uim zfgMI1z9t2&3y*RWFFSN0?AhRfi584?UA?EKE?iVv_hD(<(8Y~`lEX>stE*Egi%s~* zA=mrfzl3T8QSdc$7SO5(|M)lpa<^995BLW=ukS`|=8w37Y~{&36i|U@8lM0i1g__{ z9IvFZ6e=Ixcnjtt;sYG}Sn19e3(Oj0VZA8t_zE;|Gi>_8_|pXh1C57;xRr^b4KIyh zl#|N%_Ov5!6GVWhFZ+dl`dfwB%K9BjDr}zwaY2q6Cqz{bxW@CsZ@;GW#n1ukM>rpp z;z7lj_5lqM_b=NkQwFeE+H=+)Hq6hQqs#+0uPJLf&{D({ zJ;}9?Y>RJC@5YC)#|$``Ctn}eN^I9`cq;8v_XLP55Zg{ppc?r%rk}~!s&jPcKE#U; z<-sJ)hyTf%D1|jt?<+Ocnx^fpJusW0Tw8c(vA@H;HeUC_>2el2VUof0x zFzAWUgIt?&7axq=Q0?3QgcK%_ZJa1QyIIybjgiNrj)8lOY9TdX)*#DGj<@V8`XXOCO4r^@vau*) zB51>c(QE+oN$g6&_&rCH*g!>OPsbq;G#i$kJMdKCe1kxYDYeL0)fr!P5?l=avEp4 zCr#wKiN6OB91?S>@7K>^^#Uqpo4ll^a8DM?7?qcaIz~q+Dh=WgrCjZI@vytJfi=P5 zR;YIfo+X&;@K_DXyPHQOzrGXD@(z@t^o>O@FG({Ol3NNxe_pGXFdA%0K8xFa(x}F_ zEW>$V8li&&he$|Uz`MZ;$p6~d3L6xSF{mTElt*wFH~uD9SvMc8(4buwVoeKmWND|Z zX9*gQ{uyGT3>25nG~RPwYAayh5HR2C8<~~(iV5Mf-FwLcklI7-rBXeDh_q7varxU? zt{3@_1^vL8F{7i=dBd%b2%<5Lco6G*OSol3rEIR2Qf^GD(GS&N{mnS%xcSYH_3bUb z3*h2fgcNC_7Lz;x{QZpHrLtS-=*aVF26f+ME!sl=5PrrCiq5Ryw%DoRZO&qHPXux5 z;Ux9J%85W+-Ev-kb}fEYve0SE`a{rO9fv**uVbTj&Jt+}oT+mpSaCM+DO1#w(w}b9%2p}O%fy!IEkI%hC zR*VDn1|d-X7b_tN5Si8Y=Ju}Fb8Y5{l{x0&AQ&g5Gn^ie+;cjvY+I~sRr(qibaxy; zV?R>XJ!~|0QjN!{R1N}pbb+f`*aN2sGml3i2l@RrMEeD-6JwIAG3pnx5>qO+K}CWZ zDQ&N73ej8izkSf z$TjkUTbfe{e|?N#TYa4(Ja)E+&CzTkW+&dlT*d)bDf@Tz3Xzb~QvcbKE`2Hkog75R zK0pi#BQaE?;nZP2->1Zq+>guzMKOkWV-%&LB(S@V6h{|vuQr6uh!l8{wMeyn)<}? za|&!cmd@0QFdidfTptzL&JgGTALPn>+8%Ty?E64ipYR3vnZw?e&A|f+DGmKkuJ@3X z#-*eXkxwQ8r9_)7hT`TpGUy)3ckG*^N6NkcRV(%|pdpM;Z(arzaek;r$SAAhlj&lE z3Bde+SE2h_!Vr0A$`VH-xm2H(F5&C4iiB0_s$ol4nx^wZ~z<=nmhgbJn@?jvSAp{W3eQ9bn}(cT1d#)0cuOeL<={5me- zfL0E8Si>BU=6tqF2m^u$@GABKE(7^tJRU$}X?T+H;D=JeaGOd82(B_D2&d zsa283xUtv;u{D1l<{#D{k7;8l^ze2emK%?YXky3numT3Z=o_E=?6@`;rzb&>K{=?< zO>i8MX52IgJ*QnQ#-qt`GDiqG%yfqs)?tny5_KZC$d+&EbNsc0+Xh=oaX5zVL3EQ} zpJD0W3mH4+-0Qlep)^seBU5>-7?(^?%^*0TR=y+O&q6_ff5KM@`)ac|0+A2r0g*{ZhqgITTrADS zSuZ+DG^B|9zlJd*{ns#hBu8UgZ7gj9QI%lCj1a3rIBZ9~4K6jz8|s4_hJ-X1Rf<{M z3n1>13bAnj@*^g~ZtgA-Q=x)aJeCXR+`J%_JtT{ne)1(jgmteXZpm$Z>ZZ7a@+t6p zs0(im;61Q@ZeR!&XH=$ihr}V?MHu2A#;t*p-&c{^m69*qf`Nz@0!QrmKS_yfcK&^J z5TPFFuq#pzJ%@M@z^So(Okrg!9-y+*0O)};2$<3l!*raWmPX|3zLUDWORpyC69_n0 z;25z#?ijI)iMO#c-Zh2a;K=quSd#KGVH`Q%f1*ecx9{HtK!}yXgERbX&V9mG=ez`? z&qXNR+MHs+lu|%?fdh)Go?9}2ZZo;1zNsYQ7>;)PXRq@N&A&$V2d4y56GBis9bkxg zf^>E=5DZo#5MzWlZeg>sh4WbD7h0Uxi3zNB1dMq5@r0XJ)%b(lvn4xPOqR)s;RK}8 zv4)4~MVV*RffVn%IBbY{NjN_n=KVd`dw5|9!y|OfX47ma8836Xh5RtZzNu!Oj7PWT zK@b($ccs+?sQ_9cr?q#OUelI}k(al>OE68LG`=RH%^)cD1xMphCt`y=P@w=z*j%BI z?+WjIC@M^dIH8bc@TnSvjV!?8ym)0u<#Wa@z5Da8yq82jdi!&x~Cpb^IO3L zg8tm#^Fs!^kcv}h%m`tM$z_m9X9pg8{3;DhsY#Rj*yRd`iI;Qw_Z!?D&4wT2Bl|IS#;&VtKYeEnw8W|BaJ zAVvDk7XCnw&Mof;Ck(%i567em+oglqfhzVT--HboqxSw0MNlL=lZcWOFN(-|jlB zPijLSvmWMqQosxcN^CI9Z)dw|27q3w#Fv4isKvP?7J93zGyS3wfsBHPO(*RXRvbT% zoLj_84QlY{D<;a1P|`o+PBw3FB8Vf#=q72w!`6}eJI*Nm9ex-tkkct@oymjV6k04=gg0I0Sy68+WQbg4arI&5_B2T{UMcD&oFJRHsPoAZ0 z65aKB3b6a|B7=VX;r~!s1XVk|wXNyP$=7H_)M2F7Qzm^deY3^Gk05+-!$tk8baVOV zcD=I^%7i3HBLebo=XtI*)_?DtKuRetKDR$eUpX=F|ZIeja|S= zOFFUbq^93zKdeP#z#hW9`jOyw@Jlzr-yzQT-!LiRDN0AVPmN3s{@^Fa>z=N$bBEL= zovg7A%2G%Sz>dR;-Qq{JH?<(Lz4JQ4j%g)Q0`{LGXpN@!@FmxqLT%zWX=kCdun6a` zbF;sEKuDb-g-+nFZ!UCNq=AA2iQi|S;E})gCLt12LlB`iO*>f$bRvB4Wu)5Bx=NE- z43!h8R)wzv5Uh!Z#Sp4;o_dZAel5GjAgyd~$%Z3k8)-pyD^iFk7>o=26U(e^@w&!W z5pmBPm&rFHl=uOY1xaUFO8?kCz&1d-(ne=sKS4o})9K(qKzAv3V z9pc$0Ure#VFBJ@$Ml5j{bfR|M*?R&v;ZPRDv6PR*{FiO!2-Ph!_XC{7qo|3M6=_JV zBXwAQFFlf!jg>J=coQI=gxfRb_k5uCkf1(n2kIEGR=b>XOP6xov7Eg_(stj zOfnwWI6|+CW9P=V0Z7&4*T++_;lKr8h_s!et}*3~YJuL9G2V1z5E5Z@E@lpq{o zz)*b#;!p%iP`*DD>M>sYy;q+Y-;v88tH|c;6Ym_LJQ1&$f|q12=E=ZF9==_vqbHzT z8TSgjR(X3t+cbXGowa99Tpi z=y#K2)w73q)*qa0Zr^@Lr4dK(x+)|O8kr{^Zfs;Nuszsz+HVZ41qg1rl}JV)tu9i| zC6ad?O+piGEoZ1HA=_?AGjO5QJyEckiiuDFgi(jlg(Hi8hjZK7RXs@l{S7@v(0{bO zXOL;RvLcIl8V8mvY%?a52m8JoIL>3p!61f@T0=L-n5c*)*O4}Pi)!Ske|q?`o3MSZ zBgVT|_Vdg8#KbhFp0sIN90$pF{`62LL=ootb+z&$H@+*PGK%U87{iTeqd>~)iey5M z;=pe78_WTaC05mmPhvOh#P8ce>H?t?m=`2an>H{gN&lybQAf7(_Dv#Apq}FHQ}%BP zKHq2J&!}OPxY-c=5moAlF7-yVbP7M0+nrP!bN?_NJcN+U1xlLrnge?JDtR2C8(*&GyPXpJ=Iyvr{O#wCptT_w zFJ$NPL_4E97u}&id!quZU;~@NjgH3h4 zgons=k$gY?AXi81oA;~`<`HYe);4q5+mP+AH@dU_SZNtCHw-niWn;jb> zCDU{_4nq6^uR+}4HbF|pkU)!$CpmF68Xz?UL;tQqD5<69LiOiDKMry9>#bEnjl=&H zjvVndxty?P5M1YRYBgcvO4SfYOSVbQf7e6DFL&r~zP^?Kr|#7E=C-3Vok9*Slv=*T zQP9$sUJIQoBNB>B@3M&_z7Bl|(x*@>IF|KQjmWrTNRETikgJI;UOOkXUO9l_kT6b; z|Cb*1CEb9iQFNN1Ly->{+Kqz?HCfsXro;_zgjAmK*2sj3o&rB6ULXYzlCtmIGd)EM z23*|=%8VWfYvVIia10Ls>VtjE1aTv*fb#u*oVOQbhj@Gb1X}i}$(A0K@Lp-;Wz zo$wA5D!ft~i^1ehi<21}U$t}0F+Q$?R`mVr%m#57`e&ComjvSS#DBjKBNQGb_kx(I z#d?dL;#CUF$NdpKdg8*`Th13Y{l$1Hmf8hfw<3^o?BIKiF#0j~yWR;EA zPmj>H0%mpjMhE%6Ni_)F~$WhA%N z=~h}#Tp%L?JyJU)0kO^kv0j~{!n-$eK^T`z#)^vyGK*nq__8+3YYg~&ErxRkOmZ$% z_SPmnjz%N-cD^G;))hYlKDNURmUzKr#@X%oZZK9_hesDx#C3JxRBV7Z{E}+%EL|7m3Lm`jd zNPuzuPJS(Aio9&)1^dIeGAdZBZ!7GX@%XhI{!6t@H-X&bD7hQ&;*DcdMyjcSJlm&b@D3_6A#ud@6q?o|_#x&gh@`*~r^~tc<3WP@ zU7hH_t4L&Ndt-`#`VxrwmgKgyMf>l09f$l+DhUP7`UbSE0hGfm;n>6jjL zzlti?4k)wWF<7Ff-DK_2324zA{Hvr=pq%}tg@9ayiULL5nxCx^R;xj7>D0r5D+IVp zn8&{>5Kw{T484gvo0Sjl{acrVn-1?yL6#nSWk&3BoHaY^uncn4a5MxBkU87SA!no< zpNWfA*pfOQqZ1`XybGfao5y&z6NW;4xYzDRhT1$W4>1$dzxf*ZSe7|c`CAVv$eo?Ne#-@Bj{wJ zR6*{5>Y$-G`(Hsj2uPg;Z3#!_zyn0Qt=k%J@3DUwkk8m*dq7V58f4No2tINVp<3n| z-gniXYJ;Pl1dZ`uKICirnnc?iO}LWtY?J4Tz%3MyFVzIq(E?z!AMb}k;rgaiqtk7K z(~1Ad2pmW9_iPB4@?yvM8*_02j*;AmScXtfWZt{bdH{tuWr5Vrd zVice>KbOCY@%3v;%D3cBTj`(5(ejb-lo~h{q*%RNOPU${`}&2;w8_QmVZ1Zn_Xe8@ zwRygqcp(&@zbIq~ExUL~Y8=HAl^>SuhdvFsjv%k^KloxAuBeK+Zv z4T#}DyVA+gk>qBn$BVrx?mO=$6mI-$au@m)hS0k1KR*53^+_S)1^cGHB=>V*m_~Ph zk=U)y^KCT&_Fs3<5?;f(OzM1mazv@jUJpDoE}qtKep+a|2_(4 zJEeBLxGA@+ny%U&GJa#KO;5ajj;!Rggch#T0x}dmiSK*_BX8^8tE^4qc@crPZ&L=g z(r z%O3XCE=B-HwV%@ei{S+R@6xK%)ig7deIf9^O`&(K@HqM|c+ye<8qe6s>wTMD=e}oX z_hXDch8dNCpSupXzK@uXNIw(~Qg&SC|8Jm-oV5E#lazb0*NurJDB;CuXPhjl{#Lji zmO6YsIfb2-xZFW1Z8C|wPCupvVp!0z36tL>ugKq?@VD<5fwp>0CzVc!@$grKpAaX! z2-=+Nhhc*0`dPa_N5_kcQ>M$JbFL!qP*;2Xs6(S8E?q9MnOnJxZQmv`{KqKx|8jCs zEHAL}c`nmt_Ood^?V5|}VME~zy=)+~?>96E{QjCX&ee=rhhl0RTO8cJ( z{LMyHoE5Cqlfp%K%jPM*RJcr6@^8i(SwyPwoUKeg<+K^ z4kOKdC1+nV6q+>s;tHv!fXRCL-rMJ;4IPfuL_R=2m8YI7baT+pRMy-6I+4DFPuJvp zj>ZS4)En^RxN6;Bt z4tP7-Ys@BpXY23L`v-zUp=)u|zQ2Dih(V9g(KCOSLR48y=SCzu$b!kNq3=a`t?@vu=iuaXX2x9L<>7XXuu- z|8!29Yl+CEG*PuHHF`gKp4+l6z|GuI(9cTNo$oDy%tU#*rsZSSdu_p7dk;~JNRiDp zT3@{Wq+WDa=N!o~g1V&l_bI6erjs_}*)DekQHHna&4+8M&0S^c#aLe#&`6UX57f}m zkeeCuyghy~>EQ0ee0&i{3@Tz6ORf9%L(3)F!$Q#Q@bMY%M4MWWD_Q8X$7Hu%oRgl{ z7uu9ILA%mGm9HR%e9&C7>Z9|()Faj!%XnxV)dY7nTaoiPC4TvGc_cmVM!+A*53E1$ z>==rMdl@gKe=ebp(yD(#+-w9h(6oQUQf5omM+xw>NfDbnS*j!2*z(~XP3UHt8k8ko z64>TQ5RMqqab8|Eu=UedY--gHkb&FXkUB{ zmj4hRRX|(@%1Ymuonr&G&*sNP(=1O*)-wxTsFy*%BO%|V2#dYVet3Xr!R`lRoGZ@S z@1?_jdT}IrM9kpB<4{$2ukS&|^3}O!lOLU#JNuTR4ZU~`Dib+6wPDAXt2SkrHft<> zd3e+mx`W$~wWlQKJyl<&sakh`P<&&szxa^~pOB#T&+_@1;bF&W|2M4?N6*5owj6&k zlblD$l_h7-^_9=sB$XV|oljGIeB#Xv$MV7yKf|Gq`zjon^} zi!hD4qnjFJbrPET*_jap1Jqa=x`%(a;`VUk&M|-FH7W@hXaoE-C#U$W+?loc@a=m~ z$t_F|PPpX2CTn!+F)+L9n8U;@Y@VXY4BK1(YsoRpBu$QCtunMXVZ_S7g_dW>2Noxj zcb@@qZ)$7|-AenT8E!v2`dHM~t7OrzoeaCvmy@FSOUt%>RWsPisw(~E#eGH+?_!Q$ zZc3A{=`x|)d&=l8`;^Q>tDY!%OUtY_*VGG!T-@B;(s`xR6$y?V@^cFVY#z*Z-xFNk z!S!iOU49lK`$g8kVx)@ixO0GDb_fgZqAHeyzFNkGN(SKL`fe-~{go;`XAT2}hCi|8 zm7G3_l~DuE#dOajpzmO9D#`6?C8Q(uI#x0=&QOdd;0nt|?PUh7L{OEccG(Cr(V zfL9;ZE4%T#^2hAg1{N-M&-~LW4;ShOc;fAD>pQe`otLG_4M#-A8E9#!l5K&%gs$ZN&A&Eiu!Mf z$H@M0@wiRTxK(%OvfR)=a-@`g+lgKodLL^^pI6gUeMUGx|-TZl-`X; zMQzjktevy*Er?QLz3)C6d3j{IGUJ_X?Hcn3HM)V zdl7dWhSMGT>pn4SZba40765BQ6T|kud3+$tIcmaQJ`o$~BVnol-(v=;#>iTcUVbw* zE6O#0z8HRSzWnZZ!`=fE-Ai6Wi7r?Ctp`d4pS@{KJgwlrV;_fx;S{5#Q>4*0DY!q` zq$!N_K-$;m7N^GF>^6*uj~@s;7G{90`{1|x%3%rTNu=W`xrpb=!jx%yvUH)_uS2|o zf;6r{tvBB=U2c4{J4BdN7MeB-IbgbsLbp;3MN&$rMT zel)Lgn$9vu&bedFVvp_GGVOV7E>=M`|3}(!Lp{*AG(6(C1^9u6b)EmZU;fGtl^8ve@CO^b!E(0QFX?7k+g4|&Uj=#oj2^e{R8Y& zaEIddhluL>v{UZVrRX;glYWt~oG>w0k&fg^(mx2Crlsnfa#nC({_E$cl-z0-f78Dp z#E5TYOnav4SFpaFuWj8kh3F%5UmiARag@S+>20I%Z;|by#c)l+$oKDTDSIvrRF=ss z&yRm~f%d#EQMYfuIUgDQBX+BNVc~f;CZpRHW{Ga;A3YZhI-uWu7X5?o;w9H(_p z+VoEfb!KVu%hsP~HR-Q%D+=WBlr8ZM`S_6~?D1oZyVmnu9<Vfd*h^;y9ix_|z|Bls?KHN}~MO!bU+6uY;F9|%76yCO0k7ThhMcU;1j79t5lxTqGdCD-dm&Cb|st&(1=8 zRC4!>yJG3i&RJov51HJq>6^3}qVeP?eIFeV3gWy|;xysxugaTNkq*1wgyACA=yhi% zb_A|QrW`G_T)6_8-+sWRjOFtqmHr^np~HTDvgr!Ug1U#HLq_Si+49dn_jf*iduL`y z=`8<&3&F(+YWu|hev7m9xPrYm;b32Q=Sp_%y_V{2-P9r>QsRd03zIri3nEoFm&XSc zJttfLmh@Z+=g%wen9<(n^~ZOdYXvNULR(_B;u^Hw6!`VMjb`*zU=4I(KAFX#Gl00> zwL50_K|j1g=!KTG<(}3#@x_lX-_qaNN*i%upQmv0BAaa$SL%6#A1>7?S|M}W(-teW zNOfvfpN0x%dCw%6R^20|sF5_b9;%WpEG%60&$q14KePqzK7Km%=0mwsh0Lm&1H=OM{TMCI8_ z87sDv)-fMlhTmre=&v8@YRkJd{rCvY?jyxhg)TGqq>RmfT7b~MU9lMN*nRF!=z9MX z*@w_dx-u~Oa-KdHySU>qC%6Rv(kNVToweKt{atTpRDM`2GC&$xOaAV}N*%R0X7T&S zMeUb_g;PkPohG`Q z!L171mV0J->_953LRx=&%Gr_6_x90Qotja(XST&V^7g#%YV*;yFlZCXDjoUiRkR0( z;^LKa&?3rnA=5V&fvY|N4dFG2qxvacKiMH!M~@2=>TTJOxg^37spapu>HOWxpYS#)KUw8ekc5E`g- zc@EBLW5%oZc?J@)LDhQl@()xEte|yzSk(2ui9hET>DX=h=kxY@ug<0y%oiDEi&*rY z|69{ejp-7}B^9~(9E>6YACcN@#-fOOvz<~B*7!t| zwQ2yY>wZva)a4b6Lj#tQcc%u0=YKBN?sx=I5P4NiI(xBrJ+wc53jRRp*(vZh!O!GX ziWNSY)!LbTxvldO|27K>u9=f2jr5O&8l+ZJeG{k{ZbZt5Pltlf(mFHeG$eiJeM&q$ z-}K6FJ)6oR^b2&vDa+kbPta zK4At@gd>t)l3HQkiL*xhERHrj5(W{-HmBFD(;Y@F(5rbAdh#&Q@|t5q{Y$d$8Q975 z|1sy@H*?bAo97dzXt=D|JV|6s?1xlUTwaP!(Kjj20q9ApF`vJ%!_xRlg1F1{u18r% z5htR=s`=HGklLc4^sCRb64x^l-LoPhs=h5kZvy??ywssb*3cR$TJ>>b|8(tNFSkFR z#q9e`qu&)<+R|GH%S`Y^8PM>>G>c1Y~O~!z}h&kyhw}r z8oG??xp(~bgHAnhM+5}~a;WRwuCyQ1S531N8d6hpODZ zI98nFkAgtko4VY3;`{w8?b(-)LO42XDX|1h-h5MWp$zxtrllY1=H!8cx z1=p*V1O(pZ{^D3U;i{<^0&hDQT=(To+XF4L*&8pVCTngj9!Xt(;G$P_JqWpMy@XWw z)`9?wlt+1+J`%y1mP8C-HVB^TIM48-QGC#yu9~5ozz1N+<8iZLua#WKvGxzn&YpX!FN74`zVcYh>cckd z?j!nR4neglA0QGqx39Oy>U7HyjkF!}R%d3M7YAJww7N%2C$Q9_U^k$9e4=QU+Sr|O z#Q7c>`FIHKs5j^mVK}513Vz2+v%p~XOX0RPOK+X6Y0WI;n)`FECY-+YPL_YswG6S= z;O<=3e2^;s5umYV@T@&6%tb<=mqwgd&DDB$F>o!(61yO5yK%Q%O-=3fxJPqF?QdVN z>IUIs*LozGG^-4;)O^q*y{G@i3)9;I1{K1eShFXN*^kIIwIqBld#H7=0y;*HzS$kk zrg{EbOPtkYSN|lRh-~w*kF*1RypqNi+G)kbovyVOkzBN)Is@NY^a43Aq{Y_$>|Gq* z$C&%}_wvNIHnxPzZF+6shg7oq9~aKY(R$5Zf{eiVYv(`4)H2smYG zYHHu9dZF3vJ8h|xPT7s>z&*#GSSsdvJ=M&=ZQ<_Sk|1#jK+vzhIHrfHD+Tnv-SW25 zWV4bLnt9u&+z$GyqH3B8-=S1kSZ zRAOF+a-EgUE^^g{7&XWzKr8d-4d?HlXI^^JUNpTPygk6>-VN#w`@H-^VORR`=%DD)RYn>brRiwj3;-ebETRk&Q4if)@ew3zIe{v zU~$@8X!>Pmr^bPi&x6IWaz)p_@yV`}9)NiGo+RW3N>_SIr%sDO>uk>}SZZEf-lvCh zlZ^^aRb|n-hdf>ULxyEr}wpx-wJj*hIemY)ZonC@xy`t7NrHWzC{KLfE<+6eLrJDKk zUncS@mF?{d$%4O*CnrT3zty+NnlpAOsJZle&C-T}M_KsQ{UN~v2hM(b@79|U5aQJ4 zu9UXGKJPz%|6j5~#ot~wtTK+ZmFrr6mKC0Vq2(=Vsz5IxC~L%y*UQbVkm1#dN!mTC z!$L?TW0H@MBl$#Qw^JfLqhXR6*+-)4;}Ks-h6=zib=H&C=Ka{Y(Pq<+<+CJ;I0e{c#c;>?lxYTa~OXpB{rQHD!*|AK%REsrbgLJ}q2* zBUe8~Q*L1@`zK3%MaU24EDne7QeUudbbmJ)M(&-V5xJjALXj)^q$TfKus?q+1V6Et z$gWQf#F6dm^22PUWj!2ndLd@2w7BB~bSv65-`DH(vBWA`*t)l_;mX;45PexvE4Yc9 znq7(jT{h$xBofM`RBv~3@@VobxXub89Kl&z6qA2k)(yINR`HP~k~9F!=CL&GBKShh zy#=+Gb35o0y*S)NtG^#9|3o^FC6UM(l2NNgf&F3Boo;OIYCZeqjnwl#3RV$J9~gqf z?)+f?%c~`rBr5m8)R5L_B(gXh2m4YNaNfNdhLxe=T7Ov1tY1f9%aNnrOAl2CmDtxb zd**iU3VdC2r=R@a$vVyyiX1~>C2gLRkhm6NisChrEIhn#>#!odsgeH$6hOSDn0!h| zy+b`VRtr*eZ|NRDJ`1By-K*IdBjJ)aGTO4;@(co|BAEN7-*n!)t@6KyDID2Jo86h_ z`Saz<9CWJUo#X=0Mc{jqw^!YSS!+D+f%AW!pD6bJ>jltS$Z(c4&d{u`rL6fd=!$Fa z>+gxa5d1#k<466Zl)MVdz?>`9;$yt_4rYywkG}CqI;SwrkbG}TGSd*Ay@A+2RoT~- zF9!E5<(#eOW7VZfr)4{-$l=jmwkt@>vf|V2&zfg@{qE&_eXjpP4LVBoXYEU}f7ev# zRlT%cv^BW6$l%m<05wjy{&bVlYgdWddSa6Au!Wg-4~21x=PE_Ay}iB3>=)A7`d&ZD z9J9Sj^+t=|$6Wx3V`aLUqeONd;)1NKy&49;+1pBXoSx5rA1lJ&$^B>QjMwTZmNQaz z_f@yG*TwnkOHLsWZrqKE4`$Z6chizmx=)(EGbl9w@FZ*BnWd5a;{#>SGvi_%C!Eh~ zx;BaX*OdJ}Jv}%%H0&#-aF4bUGw<-MBHj0#=MHj9F|o3~tq&*jifFnhScjJ@w0@3b z8z#SvGd%cT5i`U}2G~KR)~1U&N`-8jy%&uQEpZ*^|oMi?1+-S%yZQZ+^TQ(H-@Mz8JQbSeM6vy!|wQ51N zFMp>jzj`I}Vl;|MO7qramUge75~-m6S@p#siNTBkx9_G1vT1#^>{gv8;>DS_bEn`!9zuEY{q(cHQASkR?1B@SFs~Yb3*s{_F|ri ziSR>8C!`c|l=!db>i)Lu3QK zdIoH5DcuDj@)sQ}3XxiV7Jn;m>9q5wI+F3u%%b7rW^IYw_4odASYv~`i+YkGqi3QZ zlp6^C^&>mm^nJ{6lAdbg24}J5u~f1J?5}nQs`Mas39UoQRU$EYQl9f`tMB-q?_qLv zY;zXewL)igQ~&P2t;K=5-AX*~RSS0#`0sm)5mGIqXMe5xT{tVw+PyhWE=A&GMzi2! zHXTUfh4k<6;iTUElSuyl*<(z-Kdos`UV_ve^F1>(qQdOj#@96K{Re#Udz(QUH81KW0H*}QhIbMMvk zr1AP+Aika|%z%kM{pj}HB&mjmSs)Zb6wNhBl|2{hrCE2)-MGN+6fybnMZMW1;6>?4 z7Eh7n@5(%OOMBw!ndYRwt7b^|#oJjw8J}HKJ$BexGrv1mkLSR7lm07tU9RAD99ouI zypiNF_aNR$D+HwHy5W!~cwcT}zupTeMeFd3S~#)g+A}C(SN=f%upQ}EDxMGm?uYg@ z`X!!b?;9hRetln=@5B2zWUZ+)wLs|WKcMdrcBDh$=c;K4JWuB=|0Htn{L3YmA-4v{ zAcIe5$(X?*i8yJqZYlX`9ZDOTAz(21-kOl^^Pa^lo(oB~>|D^v|3vjz$SnByf`NXj!;`pKeRf3&Z!uh3&jLGyWu%|JJ$(E8|6@7Zw#^#M;uG{$TC`Kv(ec*LUvWcN&2 zMJx}_RPWY}6sA>HjGmS>$0$&*_jE}k38O}bm-oDsg}7xNY=h$BWtg6nbdpr>%O3X6YS!}(c#MC^2(<^ zlV=*%)~Gs3|FyDVZU0GEx>wv$4tn%TxIrshNcixMn`|)9KpSIt+1sB+J zl~%G-c-|+!%)T+G932qgnVuZ3)%D?rc(*fTR~0NQbh<)H+~vT;S@#%fm1-TSd@uFv zv$OYzL(tFw>z_GcTCTC`iK*T1J+t0AYo0&y!X%<<>08{(?Nw0PbL5aY=46P_VjT3Q#2uD7fvz5z#oPw4vl ze9u9{-;Y&#($^s6jQ|BRPw`HFf6dSf2Q#-NQ-_$=JYz^4vXB5iMN{XtJZ2W)mCoU2 z;#u2OH~&kTf58J5lAa<`o}Ps5jtX~(6Sz`!ic=jD6sCQJPo>t1142YxO{dZ{Yp!^& zIk~OQHkg7NkskfL#y#-k%?5Tb4jdJZBtiSypbu!5`94Ow(qsdC zZlNf(xpgV{^g?g!j)VL0B{)%FNgisQS%Snv5Pe9=lR)KeXiCq^$Hz!}T02fE=CpZx za(>Tzs|1@T-v#q6CcXJrrFM8)*?oa>m?#J_yB9vsf?A8=_;RJ-3)6USL26sSeU_@myhzbj)dC<(pef zcH>kLvBVx~>1;846NNDzzw>KtIpM%Qk576}-{$f8FS_bJxX?D*cpxNaxu7C5sdQzg z_7Zdil}X};kWWE4=8FZ}$((2?4e4{vbobUF+maaUJVTZ1p4w$wHGr!#2`5$6sLs2Z zwlb5%O~WiX?a(qmjvXy%n7hN#+^Ud^|{da=qBWxv)O z+@t%*vOU@1=|)wENK(?3#jeYSEF>XvCAoTKjlfwm3H@ZB=2(&JpignN3*#9{?;e}2 zb9DTgj*g7v1}OV-?|^o}H9!4Zxi8d$=+e8oCP^)z67g4NjH&30+N?9I)ci+|EU={) z1wZw^r|;z!wR3w{x_1K-`?N){%5`QLIxGFnYidnATz_xJA{D0&zgd^#@YQ|fyNMOv zc*()YP%1XBRWFy_Fu&@RIGKI;(*5cJDuQ1|LjFY>{Bp_oT~@>*7@RHUd`VFp_MYab z}&7w3CsK3=Dl8IlYgG)FWA4Q29*n%6Hx1tn{#C?tF1zq^NIA%OTTu1X{V;k)%#nL zWf?3*4nMpllh=C9z^8dM?EU*knrAp9oO2Z8U9B3ulrz+!w6WJQLF=t{VH)R%NDb6% zop7F{EwySlYMLIblwoxktoVK6+G|SVDoAK*w4Uw}ec$y&HY;=ghdA4st9M`JOd0-N zj)vgJ-*8_p`ID8>(rp~B7!KD@(%29ucGfzJ-hAKTS+Awh-Crv%aJWI&VyBtzbj^#x z?^VOot*%teQ`9axI2rFdWrt}!m0GE)^schY|7!lN*2czTe1;SQ#U~UHFo{n@b5X}- z?YD8zC%J?DgD%5@Iy%uS+f6-Z23nY&RkTj_6^*{!wr{Rd5l$!044)aX!B-GkKQE%m zy}QfqBEFu1rP8*DBASmC(H@pS@`Q=D^xa zW=5jr%20dlZYVG`)##lqj-8f>S!>_+Z?IV%Nq_%`BMt%g6x2XIc;fW1Q~1M~r`&$3 z@qQ`9RK*unA4`c@SIZnIp?<=ftf||bC-|_DvGHlwC|V1!T=#2u-8tQ3ed+t$hYKt9 z)Z52o5XTS)v}_ujk&+=HxTSpW8w~ z&(#(!W4}eHgYr1TT~#=EOvK8J{yP-+k{|1ja=7_D4{D7}Yom_c>fa-KtH^J!Y;C_& z;gCe<~4b3$v;i6%q_N7tF_MN=frBa?>^ZW-^nudC@qje46RdG$xzahWD}e-7AdKiJfAk`^rA zLW16OX{zvW+jHSVJH0za?CfFPdv$einp|;nQfWp%|%wDM?%kE zRVmv%d(riWD8vn|RD-z*8}}du5SQNa<_23zxlRAMaC72B{_pSlo*kZN?B#iFDJ&&y z{w{7b0ry0)t@rhv_rR;Fp>7M)ZC5)lf5)^y0M~T!&-G`wZscCRe8uh1JC|#g?ZhWV zv)$UUWw$RxMsy0*U)@+@qr+&@k!gCokTL?P;4<)J^8>p_22C53Z`~?^uuhk``VbVu-}nvNM-2-cs{;E?zr4AN$9df<)_Hzm z66yrCwcHwL1d4A=XUr^olg`_uBU1K-`uV^y1G`st@X%8lRs*1k5i0|n)Sb6B3hJF! zN|U}!^}Hb0qCxH$M6^#E$fYYJ^m4nYUw2ot*v>cRngMzIr@gzVV$Q-s^OwxUOUUKW7Wg=<mQuQn07n1f55w8=-3; zUao>sX}6J7Scz?qOvel3JB+O^kp0?7r#W4)sj$MIo%gU_w&0$!wyZWMh*xQi=9?E- zfB_nzO+>=kX`XbgR?}n)cbT&i^YYq%HiGTIefY<$ZSqZsCxpwm71TAqn#sD5JUb2r zH0E;?pvw-1o@cr#igBk0-!#bB|5(M($(1PjlX}XLH1p?p(ez(ap{0LLh+v**8GpsOox8@hWc;=CyZ?}HS_*=P_Lvhz0Xbk z@>{mTduPe;8y)NGS_Ddd$z@+XBYs-`=#5w?@vF5BOD(X^`4Z&c0QtDft%Pv!H?B>B zW_uTMSl_Grxz8X}yLP13X?!}>mhnnwq;Wd>`uFh3a@snb!&>r6P#m9QP|zKJLy`5o zpsK&*XkMyea7>feRvE6XQsW+j1&D(L8Qxya1)hMHiwlWH++)~hv!ajMuf>UuqZ@OU zZDFn$kJ(Vw;ZaCZxR}@YuLcvq%#laJkF}TeMmd|_-IP53IBw@!7m}|$eq%>TPna4^ z&D|< z#eN2}C1Yz`>B3A`#p|v)!9s(qG;Bi4!5g!q%yyrk5>9Al4nqADBWc4eo8L-0TG*cZ zh8Wx?uC?nMu?V*UPrxhhjR; z8!XPJ(`+rAc1+)#>F8HTi=ycAq1$EVZQw7v=yJX9!BHq+2UjdXX>YUu=r(@~@h@N$ zgdEg-c24qb$l*x0a!M$B;Q}jy*7;r`w&)0gL-q5B>hml?2OA<8&%bgD@_pVN<_$mUmF1p6{?XMei%5Zy$s5)hZl- z)Z1gISqz`swp3R&(%#LNCXcD0%$Do8oD{wP;K7592~`u5U;`^5<;jX~6Ctwq)^>+* z#ml#oBzjwUg`uY)XgN7n!KmonyLX==+1Sd;OE=X%c~_Avm}Aw*!c#C4X-C9A1qYtd z2r!g1m0Y_2BNpN&K1ka0!d_~tge0RPab_H3RkLkv>H94xA3;@W$7lkiO*MvB`qR3gIJ&IP)s?rXPAh#+SQN1X|#zXxr& z^Nt1ix&%Wzbb*LijJujfGwu^I{Z!!OL}YsA$Bk!~%zfXWwD*c$BT_W(@Lh@%YLoL) zu4)32n?k+b&AH*XP}m)^aWnm!##S7ufr}JHfmaje-A?H&L=)oM^h1ppow?dI=NnQ6r3}0; zKZR(<_c}OCnLx3TD4W;)RWewQxHub%3Fs8A&J$ku^hq^#B_x8$H8<gw8Ozh^G)@Mcj3y3|PLvzjhT=q^lvX71@>dfcQ=-r95G$Ti1n|oC3KEnqX(kd+(-~Kh2ebg^*SP1Ueg& z8(v%;c~Q9(Lk5ZYK(=&o&MGjKcm+k(B!R)C1TNVffiShGEK?rRlP3*&wyl&ZJu7Bz zSE<&=pC;XE?VzN)Q(+95h(1o$C+{U>CopiY{SL5)FBwArQr^(N4m~Ff9T#8R0oEI+ z)>!>ER*1G;tL@BIa>=TjI>c+}@1iiidJhEz??<3{Imm7Mw|ewZpkfpF`!k2j-^tek zGK}7)2UdRyQ$w;bS%8P0O!bDwQXLl}?v6YS{(RMHuXi6Cd|gp6}TlqadZP(JR2WrT|7y~d9_NC!l{ zYJPyo#foTM6y~Y4KsqH~5?lC9d4Sw^3ra<-i8h8Ta->0-TypM&D24N-nLJec>Dq@p zu;jFB_%$LQ3rj~`nIRTIft^k3o$bHiG0CXMFodkM^#z{~JvAAXL9F=9xeCHENR{1j zMNuG%77JL3w^Fh*ECAo11pKz6Y2V`^(%1cQ+rOTcxh#YHQ-`TWyj9;pd-M`IJ}Y?; z>0fU8r#f*GC<{H=wy={(zIrZTKhN#J3WRf_3q|hKojPauVYCCM;30X`F&H8|l2ZL= zwJ4nBgcFXg{Bk?A3H8>l5fqxp;%q4O5}w^%l#IUCcPepsMm3Bbb=@J5N(!GB>o0oG z{oHlQ@0|8oF1B`*H;U2dTJ3=~*CUtAabv1k7OhYQ&d{n-By6gLB80VLPH;nkRDG&> zv*Wk9D80)nMmMi`wxD1yMyQ4`V-&gf>^^27I)FjKR!uTP5}f(u!|j zYlhMTt0I;G3;H`%;^S7-q5m8u#zrahL)O6T zyNhg*ozKSq-fa~5t%*vm0FUYS2~Ji)4w+0AcdRK%zl?taoT%xQm2=CAtdy!{L(nOb za2ry9;;g2yHr>GxsNYj-ElXGdHN!lR(xtzCI1Ut@tfB!p2275L+Q;l93C4wp6v4Uz zkgo&sG~M6;hJ~Fc_2g>sAfT$U7f=0O>y%G5(p;q?xY8W>LI9|5rp|Z!Kd%)60-0%8 zs)KIr9N0}v&Q4he@P$eWKDBFLhik)6|=oo~D zO*mG7qcIDB8(Q3gBSW2N4RBYfk+ChERkE%F7Nrd?7u8i@K8MbCKHA_5JW z;=_6Kk;v>?6*C-gf(OW`MtJHL7H`W>#r)_!36Yo7%J_*vXNU?>HrHy>gDF?;yOb13 zk+w1;b%2rdjdu?EP=Um9@kM;eLr+nwbU5+v6ki~R@{vuzXBK8GajT?u^442buD`;ap= zkgYKx>Oy!TWpXOB7&|(140fuwlbjUW9#twvzfsb0sCnY>(bjk5L1@m6JSbBuXFs}Y znJLN`KKH!IV?XKoiNqHdE9;xqMdq+t?2# z+xQrhx}tYPny{{Xz!0doSCRDVs8^t8TZt z{ViY#yf6P##Gn|*yM~|*F}D*`fi>h*#MHJf`o*JGlXtFI@>Itjk1~Ba}}bH66Ky<$Hfs$-T+{K&pxdrXpQxdFHS*yDe}l zo_+ZV4E=DpFMl@Z%AmlyN=pbGU8Z)no5q?AfhDFEF#qN^S>{a?$XBTwAI1bCK$U&-eN%?L)~5p^+vSc9lVccgWf=wj{fAOVC}!d}FN5d>5Voz7Tzx?TDjk1fk+1W$N)adNnv=*T91oWf8O* zT1nr1{76srz!o9lo6w+3(Wt|4`FfO=sl^wCQ)kNOE+72=_U>{7Kn!;tHCIGY!_Y~D z0VfZiZ8Eud;yRU1J&6h7Oe4rJIIgbV0GMr8@qXDvGF&w%{E%9~1}`IB)S1>mWzW4d z8KZb*HlU8H;S2P~7@`34pQGN)vnJmDecjvH(=i_H#KV6AHoDgM*ilR?a+p71PV6d?eg*l)$T>*_7X#JutX z`PU)Le zXq1Ry`K!%}~;X|8hq34W+t!g(RSNP*Opgd#I6 z<~!xjo)15JT8g^BEo%_C{|s&PLNEt_ZC3$-mA|LU=t)=matbRiIe1!=tM1#=RJEJz z>}g9*nD0iiF?ZUj48b1)&@be^+d7yVCHzS2!=ObLBYFRj0D7_&!xVz3-H&FE?sk!l zW0>N3PfmIsgR&fAMgxOBi0(gTpmLxrWci=T;U1!oHMP&0KwTJXYdC}#M0&wsjyUY` zPZDHMW*{3bjlRB^e)xE9rPw-1cz_7P*fP@n(C`#d<|{zKL1Qzyo;^=xZ*s)3wr`w7 z&CUH~kC!kuaoCXV+klOThcRnwgS)^m(n;v^`)TAa8CPwjtsO;&OP6OjhM4qqrCBFz zHmzetJdPFkbC~DN+`1zjfN8Zskn>6scoH4=Yhc;s@!B3JB2*P9<^>0x_&mv(!?b{YXk^hAroN{6xsH|Vdc(qBUn+H||VZr(<0%!YxG@x0HkO*P`6v?bJq#@05_KnsE5*l2BpV&#-+=$cNZjPS zJk0is`9{LBQ;C;gqIy<9zjar0L$BcBe*p2HrlbZ^$duaq(#PpnNWDa{4m!wzXQQ^T zsQi|(Evf5=uo^Tq{iw4N;b2{nW*f+zx9)JUz3(n?%0u3a7KQya;%8(eF6y&gbYy1K z-h{R-Z2)h-j!6{S(tqpq$}6tZ6-OQS7G)9dn7C+-=HRnQ`q>q|aDlv4AbXNJt9q{4_JA^TXdPMg<>5nkL*zz}zOW`6+H#;%k_w2#m+)%K<{?MzladXkyFaN!hl4U@X8qo ztE)@8pz7GfrGIwxaWao0z-Df z;aw}S7SvD45fSQc25Ux=z`zfsj)+u8QZt~C^)JNxfUUpYQ7?nR+3d1H@WWrD_(s>s zMP*JhD~mhT5-N_rtk;>h=$j!}pu&LMv>~ASYx#D_k^!{H*UwscdiKMcx*TqDmJWd8bP)Zw4vUNhhbdMjm z8~TKpyA;VqK?-+1Xru7I2JE#@-Y?7kc;Z>?`0e_SoU6}5RMTh3d)%2{A3dRxFJo(+ z*z#!j>Y2+tThy1P!mj-gTBIV6f#6lD#KhIJ1ZEVz3c$|Pyf#xq8dnYwvFBUSEq_Km z>eY7dQULZZRMt)|Gv0()K07wX3fhM7&IuR(TVa1 zKR&K-Mo5#RC1n4Lz$d%T6*QbYD17%ogP(7@Lh6%pzd%81Q>81ynWKS)RK`&UTWq)& zS{5&*{q1vT39tR&WYb@BtiPakc;3aOUAo`Z%{HdTs@3TE?C)hU_F1Xyq7(b_tyP-xcMM@{6Y;#2;3LU;#0IcU5$LWVh z4)<_S3QzUjR9%`YvvzW4Ch-8_a&yV+rl#J`BaZJzP#Gl>v9G+c<_0OD&(0Hx(rs}S zpb1MvS* zR|{oJL&^qVrshRdpmm7Y>Jo3%I_&hWje6yUrWJoxx~WB;8t6;I+9ciX!uMnrR6(xd?0y`#(CphC7Ih zIl$R3eC>A6=Oi#sxdu|BdVtzXLr{ zr1eG)#N7)GGjtWm2n;^296n#nJM34>Zc!Ixu7mmlCZHZtSI6FVbOa76A8E}n+H=JW zALHG<`~JV;m20%YhbSG|-At#@q4?tnIkBr+>J?ZRDs`=p6os@z$T}XV+8;+2^JS#u zS=Wrt?aF(KJfdSJe1#XLXw&m9(nbk$lTSPuasJV@4~o~&QH8O}QNq{gE`*19WyE>SWAO?Fr%qL7bUYcCJY8_&L;v1l#Gb~_+JU&h;@dLw+ z3xc*m-gjzOyvpU1l+8gAO+!fJ#PC>mdb939-P^aROB?RT^u&%^ncz_;!|8g|`vljs$KWUO)&XL|Ofc4ylbR;AW) zE@{zq!g7Q{OnvC^=4%=LpS)|<86H$P9H5OwMk)kF$XH|m{Dj1oh4&%3#or|TF0PA* zVapDYXIM&xAEd7m;GLrWw$0r9)U$nW+x>m6S4Rcg(~kb49U9o^J%2A&r1jlvZ|(f4 zFE49@BCg%*`C8)#KizC~%ZNPPClRWs@Ikd-%sFn&)$F##8^+%MnU%B2d0hk-K>rE} z`hN7?gMuA~0Yd)%k&otIa$0C48JgX)JAPx0c$PlL!XTJd(x;bOdPqt%|8N%3Kg%U^ zL|-U>f08NM&9gon7n_w6u0>2J05XkPN2b`v2rjsleNZBL0~rUQ&zaf9owt18BYwSY zc_S{)V10#G4$p!xNB~}HE{vBRkV?C|ywLeElTV*!j z!)UYf-SJV)MVc)|5c#}Cp}H=W&jR5{|F`sh-%!LJ@EOSMBfT$7i|ud&JWuP<07xwGwYk$Ga z$f;e3h}yyyrTI4d<;^?WYHIo`)`6S?u=FarX4@STiLV7K(X)i-v)v>Eri-A z+H%5vC4jYrdJbv}WSmN0H$w7kAxWs145A8Lz+%25xvl{0DOk1rN6(Z?G?e7*Sa)Lr z*Jqk90}&s}#o31JpT9v27O*28500ssB}TqizL4R_NKooVF)Rl;$kVZCD} z2-NiY!8&Es0BPl^$mH3V`v`jZxRcua_i~T+tv_xEb3I1g-nqwPxYbhX$);;m!?;lv z!&f9;U;!ay{7T-qiu4vJGggRbzPTN(f>L6{V(>N&nIkmr<955M_UYb=4* z6vQa3nwigw?;rgxy3e*{E(zkF^D;*4@7bkkN$?}afxf5R!*+`F76{ug#DM+&6-~de zzk{Lm_mAYwbhVF~vwx;~?TwZcylg`*w%#uC=4CjLXRK|&)6^T_*K=r!Z`kGefR|T> zvt7Sshg8b7yi;*2@$O|1Qa~E#^^fIT$f{XIfn>Vv_N@~#SQB8BTMkWeYN0mLer2^Q z3ih2s{&KKr)do@)BAflJmXdWglZIBB;-}k~rIfx^EWf^GEdOnuYumuQc7Bbj{lJGI zX&P$bAhba9xD7(kxc9X&st}tBhXo+cmn7d68Zz6b;i z+9}&RUo54~|C%%fDE}LVs^alL;`;3)*be%20#2(;k|3|alUlT8T|9wP4tJ+(osI{M zzMovrh)4J>N4Wpv)ZaAfqX&c+r-Y57!=g5zz{3>;6lWrFWo8}Td%>sT*?o9!-Te{( zWt_o%XtE4Ek4;OocyE`WeCu*}ijR-*Rbe5|liz}aRl>8*o{UzDEs=Ow`{eYzh43(| zBa;M|A2xEH)pvNYXBDHuam;C|<0Dl_XSsDWii=+XDa%Be#N z`e74jCc*+^{c+x#@GS8Z&{4wEn@Nue=ihu<_w7>ZymBEu#uJgQTHCHA%>8DTuK8I2 zHXDberD3JyPiDRm%^{kxa}o=ieB>@{U^m_$XTamc zp_Cx%w`g9F7S5vJf&p!di=0d~+n6#gsp2l|P9%wfeVFI5#h<#qs}w(|4q0znV03}d zg9tozC1{ZXqKlnzVzGkaWMsvnq6sPYh;OH{a5vIb zs|on%=NA__#hOh$eWQ({+iJYz0`!LSI%qR>Q{Olw+J+~pedrJpLeVBaVD963KiVS+ z2@>rife?r?1c(SAJ9*2Pf9+0qaT*J(((7ZLLhJ`+cqix!DAN4m$#&_y7vN2MF@ArO z&H2N^-`WBmbt(TxB*5_1vInjjifIjbdMQ(FmQoWo z88pWW=(9w(o0~^&a;<5-lKa7Wx+>x-WgFI?6RKz1J_;fr_2U#ODaf_(p7z{sWY8ZNZL`wT@?cZl?|5Pe%! zS}d&fLqcX+raz~FQEpvJ>P0lS#oH)_l*gJuN>BEH0bxv&#So-xSfv%N{QOMXpT^9H z^OqX^U`*d-|A+@9x$y?6YTP{$zElpeO2OPpz;udP}Vj7;qw>Sp5B{a=5}7)6g44WMH`NH&!7KUHbOAdhPr zO0$Yu?2^%ft|$W%^8)dMZ#(^>)xFe1B&>CPD2tJmbKbT8@+8Wuv|91*3$HhNlt$5S zB(`dkS~LyS0Z=niI?n4{<|njZt?q-!oEGc&dE9_Ax&Uqieh4BVjEu9kXFdy=L01!#2kNmj*j=hR>YW)Vfm9D8KGFGs7%BAlJIvH6C{&cVQa)e-6g5XV%#D-9e zge%I-#T)@3`YMAvG;vD5s`|v;>t_ihW3p@#$VVdMLrYz(#c#Jvd=I53rE|8qkhUf# z;k7zC^eGBC)THmL)d?$_mq{HY*4=CS3Fx8yCfk+gi2k!L4}$&=Bg~S|lTMR-9B=sK z!CCXfuc5&zDNVXwX@&zX)D#l#(({D!a+kx0`KOp#@6t*Jkw}v(94s*|NxR&oMKr6D zSPt?b=sp()WiEWe)?vis)6!Df`pnR*1Y)X>l+E9uG>tXJ4rA^;$WJxc{@LzzyJu;= zTBg=Hh|N`;me*NBHyH&dQaQ)58(op@;bDsKM@PJIFA7=hXP2aoes{a|8c5+Fs(0k? zWzFBrwip${2?!%s z+1om?sCc^}eqY|wsQKtky;w+^#vo1VjM&512?kVw+2L(w? z0M(Ms!By_|j%|&9-_yEm<^!kBy9}g|DF3cym-1faM`zj*5H0W1->!B_z zzDArUnPmaz$g3Dql~XsKj{9&!$MWjM2zGMVrOGs5W_*xz=v=Uxjl1GaQ5#vQFyoOP zx?0gd6B8)a1fqaR4!K~ocOIkeT2;8AC|Tj1g9PV2&IYz|^L%MUW$;5z*+dilxgS0_ zK_WIBOq>31_{Ia<4q&8?^rKzF43G-O2Qc=rT;lQZ2ffji=T^@OZsonKEbgX9U$VTG z8C_QlYt*~%#dPBWrSs2Q?+@{1f9<9o#!a^4NaD=nEr?y*NEwcQjkJMQy&nd4Uv{$LB%RJkaHlO9tGF>injBVfieVU9k=}?3z`_^aZQUo zbr3g7b-3722UkT5Hr}YcRTVe84VT{nA|1Ctp-ZxtJ+>q8>wS#dhlZcRzXE@kkUq24 zFDrd_s}iBv!$aGxQCn~DF$fU0FpeAYAc$kQ_o3A0XLUq`tZP1OKLhgrKMd|64X10L zI^RK(Qdm*SY)}8(ElMZLH}6f&*W!PPN5VE=%}mby;CfErdotD~oUxPNpU|r{ES=|& zV;$4nwTb5yG7(|b#Z%r5Xu9jbLRc5Ae7et&rCE2`BHb2Ey#t+p0<-=pwsDF3PPkw0 z6R{2d;Wl+LC5I!L}LmJ{qI z6>m*Llzj_WJdL*pIq9=6nfbIq17;4cd;n;)BEKLpu%uC)v`jlyZo|` z)4-6+fClyt%%q zO65R6vr+VMFNf6Bp@EJm);z91)P`%Re-98I8cIF;39qkQq9o&`bVlgM|I!1}Bejui z>ol{;0_iQt*7HM>WOOrHRS+Y9WFb~2ypLd<(-k_sCVv~4G0Du*rxfo!$J#sR0$gvh zJkr8(C90Xh`=1o+S+r4q61_~i5Vm^5Pk$VNZP#G|Y zJVKxCw{(yjJ*zEhIW#}~slAEK)Ez3fj1Uvp}=TPSQ>YIM5f2poN`(LV?>(;p5Tb zyU8`t&&Dc{tFX7;RrW(wyjXK1rcB)}ChmGeUJEg1R!7Q4(M3z&qVZ`7zB`w`(B$`kv{l}vgox0g_CDw!|NO5 z;q-SNOGHy=*_BIBJ8a`4v13^U=mYDUdDduGgNtz~MwJ9wIMB;HlJuKY?Fyi1PSXco zAt_gd{c*uc*{09LxzdP&naA_MrYcA3Em&BiQ4T5u{fTM{*6RmjQ**A63572Edy>>Z zUU?#=qc#d#SBYdvq?9?6>s{e_{fHC1mLom8Ap933Olq>kGSP6I#PNt8HYji$=9nuK zE1{O8wpGkU=^;n~njL3fL`;Kk(F(4^saECM?0#Cuce*Mu>z?PE! zE~>uu+{H5Kr~n;pmQYL1mLAG|7zTFyml5kK$AoiqEW={cd7GWiM@46}YSf2|<6%`@aK;8nxsCih1pXARdJK{GU z+jlc$M;cb{X%|t3;>-4S4#MkV6!@?oXqz1oEs(MBLs?#;^Vj~72w4X+6pfY*fpzs_ zjym%e3Iq&xz_70uR{b4lx75eGP%>K6R##XbWOx|MTrQDY6x@#5X75Q2&oPI>6_wMc zwz{@ujQeYUwAcDX?aHuz`ga#}@Kic0Rr`=I$Dn)zgsF-5dZ$Z zqG4su>GW*PGbK&QdGamMmb+!p;SIwhOG;W>RipCK=66@u9SU06>5r1E8jl|o^k4nP z+clT7D_35si+Mw3??-tX(FI}fa3XjraVRksM*HRvAwDb8jW)Mrl%_{S#fl$-I7WMe zW#W|Vc&x?HurE7HZss1lBU-Zl?0?!$JUqb5Wgn#_5kMF9atb`r^5XOqPp8d8YbtyM zQ~e=HQLQtTIc5+JaJu1#a*4~v$X{$39w0!E@@S85I-Xs}BI(r2Jx1)I{_*Kh_Cw5w z7e?WUiVHD5ndWce*wODQAkAi1B{N7Ndu0u>bOxw&5Y!I6sIA@i%8SC(P?w5}04m>G znvI2&ZZX`4$K7#y%>TH|U-AWS5wp!Vc27yMf-@a7YY)JlMV#C9BMvZzu0VRZvX|DU z52?695J&4jfit{|x6`@Ci-wS%N8Ecx2L0zPENU(ApqV0fl)dS(Kv}am`uRe5(<1rW z;D;r0AAt*2j`GwjqKF@Ytd>;E&_1#3HxStNE!;Y26rC(6M74JM4di5$U$(b4oH)_< zq)^#Ql8uUnh2~2u+V#T!qV;>)Sr^k*y;b%PW~;1J#SVdpAK6W8?*|&;QM0L)c(Mh zjtKu9d%2oORy|iDR_e6`%_w_&O0CzmrOLpF1kt0nwzi58fSWWSgG|#JJd5TS3_ZCS z*Pft%GPhE9&;N_;6l!8&1 zOB<3^Ysr6S>t1?SK6h4_?5zwV8oMY%)7)fNb~D`Z1N?Z&v+lqVGzv?eb&^!>weqg2%HxDP{tUSgFok z#Cq9*+>+i@eTXmBSC`wu?PV zF)m-IA1<&<{4CD4?r6+)HCqk<%rJ~+%$->-b#%4%H^2m7$TweAbRlrD$(n;*{nbtv zCDGC;nYam>P4CQl-u)qMeLTXku&@J07j8{} zRVp&J?(;uhHa`1sEcdqSuil2M!|5;QM(TRSU!$)=�kWxu?IHpXpQ?p+L>ay%h<; z?#v>EV98u*peTfX^!(vSp6T3vvl9w?e>Jrx>OYKgP3Tn9;FLgFGaCC4%S>)sRPI9@ z8czWm$c=70!Cp6RSj!PrnF++jyH!S9=xe%&MLkx`=Hurk+0NioHRTd8T`ah}_&M$M zbVpTwJdC}5|8uY|!e;K>qkXX6LAc9PHFCz!#MJcn$U~z4m4{Ie5O&H1l%&Q-rpC%P zu(%pCQ*lLjbkylD>n_H_!T&W`u{iQ7NTpXk=~TnLq*-U*hL=I2B`_l7f@n|oB>P!G zVOg^&;dLc07=}{_&6#bthwXLy_9@A1n~ZRs9^%$aQiC-qhGiQL9@O1FDgH_#LW@if zc31u!bZ{e{x-P3i2?Ko-2!v+GS=a;|5F_o{p>4MK=l9j#+w5*LO1R2grP`&RKS!lz=r}cF=M$}vmxJt_HHaXGYKa~qTTc>Wk z!EKDLPajw(E@l{jxHQka&_NckJ&dl4SqMaSxpP>lMv0%9o0+Do$+qn|zPK>ven(1; z=x?Uw0n5sy^Cv1IJq@boRrtnX%;|d&F{2#g?*&TVwdj~v!BkXhURW|{dkWT!NYBi| zb}^lZqkk8=RILlIi!Es@LlwkklWIM!Q5jhI?&;zED+LD+wUqP>?jXUhSXAv24nn;C z{VxW+@TVIiTyNfg+3_j^3Tvo-;3h-N8%Xp;&A()_()=EY{=B(j-K5I6PK5Aap;>@l zj^%Y#31p4UpNXOX*2z}=Q{Q0Y*UZ3kBVXZK3#Q2(PaH+k$g@qVnDXaGOK)F2<`#&H zgXyJ#FxcK{D9VuHU5uM^0gTzJY~_S~eZjCArN#Imtb+;lXBWtxF6mgBQ|KXzWZ#=1 zr2pXq(UO91|L27%*2_*1&|r~t2_YE`hDbU1;=>OkyNZilr#{DV+q$BbZos5Nv)ETk z{q(B!Etz~I%LHD*C;*N-PPI}~s<5m=d#*;pyRYt%enAoUp|jnWO5fy-lTg zreF52E^MwlfUNQ`?J03PtE;;+C~>Ih+DH4sgruZhQ&wro$pJsQWJB^D`l~^m> z>f(Zt^x1!4LA7Yfm8_HJVD@$Bh2llMJ_q5II`Lklv*G-axqS=8-HL(7d@-Ga5hCnhs+yog+a2M2pybY&S24lmUHW(a49swDO};k;^XHM>rwbXp z3AYv6RmP?`)T-6Hh|_7T?RhMtPm^8e8720^lFm2z{{HEoKdY_1Uv}|VyhW@*q0Gci zP4`<$emfW^&#z>zF)T=^;;V_txzk$9QB9ochusjs+8?*w8EQ(;4mm1*mBaCHit(d@ zb{IZji6;)Xq1KY|6qr74hBwfm7!UA+BiEdI1n+@bajmd&V)lN=_i2trT7eO+i*D1w z6Eg`a>N||?+jW6p;sb>!#y`bMgDbgtd&?dv@sCdyW=dfXgXuth>Gh(&urcaX(EgMP zt5YF-EOaM+S;6x9p5$C+wWa`MO0*%xK9O?{Fp_--*(0zVs*zoo{aceRE62xuJ^>?Z z@2g`!kY9hiS_@EV9c9P6`*vmsJ-fs^ST*v%fi@BLW~eRBwROuFbmds_JTEQ5B5Ni3 zr0LROsfT8B2O-C@<*XTO^gDR4^p^h1Tv!TAM?2dJgSW3wR#hDjeKxb}sLkV7UbfE* zV!bY$`#_JFCfR|SFr8LPQR3=XUm$@R$Dd%_Jt7bYal%&b_DdFCEW0&5+!o4u!R12d zOWVEUiygUFBBiEkM8ahjbYmxc=n8dM_|NZfly*e%&vL8#y#{o(|2TvNy=|e3?4~f9 z)i41b9p={1WnG6-{vOy&s$X=&2g>|*)>U~^GjF;7vgZOLvEU?z2*Rie%*%w+kz$kI zBj3sx@SKj|f+^jO%{4ix{@Um5Z7;en8U=y(jarboi= zHUk)-E#73`bG@H^=h4w4DapxH63?CMEyYfSmK;oqcGgo~$O+QcA8hKM3z*+L>{7}8 z@;!{&-f>prof`ojUH$aLeQBRq(KIZ7adXzy#=ncRvV&bmyQqZoUltWrJ}3*)wfa=> z0NFO>GW?>lV&}qnvN*%}hFPat#(h^!UU^bJaXeblYE{%_aP1TKwNFk-XR0a(9oabI zV57X2lk1-e>c&!c_fiLomMCOv<7x&$*#{epEhE-~R6sW2DfYu;e-{@TU-aFs$k9tT{rc{aOrMF_1(S0J?rL{snZH#3Wz<`K8`(qVz5CJu0fErk z=)zwHf!d`F0ViO4;A_2H?L!UsxEEpb?#tWT4m1*dcm*dByUQl8R}q#j8{n*t3=>*R zkcD{2iqSzFOsoz9k)-0a$sO4m*D8nLlzRu&6XaAO#^NfE(_C?jZeJ#=V)o~bMf#brH+-jZI{`8 zM`VNJuW`nz4-3Vy9puGzJ$$(Dx1u+B{L$JfG@q|qICIgcC&+9VB-Ud;zZ%~M`J|9M z<~qeyAz`kg`(C%(El1TUKS>!y$0?v?71(vqt9%+Ky5ULmfUXeJTBHDbl`4$ViK>97 zvLovh3YQGO4B}dOU}wpDSoCKKyACsrE7dx(%pbyTns@53P=5X|bwkemni%P>=aJeh zzhMcR=~#EM;@f~anJ$6(S$8uEJ6M@{!6{lhr$O-EHLFH0Es<=-2j2y72^`c}g1)7K zZ=T5aoC0QA;7>{Ty0;Nn%{L?}az3sm@@U=9&Zkc~2b%}m?1Ef1`z|*(``N#T!jT1zO6>i4MFN^A>GBUk+rgRpD_h8;`C{AK1W4hr=2F~6Ch#|+k; zM6|{CHrKy?!nNbcd41i-H_DBb{w?|Q!&d%V8Gd7h{$jaRC#;*M3=9ggIyZc|Ir$Mm zcM4xu;Ik!P|K9!kY0nzw7033vpRSx6EgbW+SA!KZ9XVsxRz@J_7$>m&!N9I7{uA8UF9zBL(M6TX*%i;OV~o{iE7y( z-aeVm{WOTpcTZtvYXUBJ|e6 zTx5yP`67kdWBt1y&eHFd;RB9Ip&9Vb&Zm+{dF47*Jj_hf@MIHR!3o&kFnDAr{-MmF zjhjA>Ohot`uazmx85~*kR9*~8ovny}=pW?iN&b*u@3$iFXP2Wr<0pBp!)~>&d42xE z(VRz*>U{seOSth|D@{Aox|33rfFJgTPU9{AFL^%Q^C2bb>Eu5a(&>*nWsZHLoJ?nO2w}_zd)vxkGunIV}gpIQ^XP7y*WcJh1k5tono#3M&Q@Uk_5%XX`cd#za zwQynPm5_D2V2Wm?uuFm)%V9k}h>Xx(e&|Rtz+m9t7;(9<-TiOWm1hue%^%z8OqyZA zunP<{(SFs$=!h&+6B2U`gU9-j*6jOxaG6_scB6Z^z)CA;pgKqh3Z2dSIowufjH4^a zw&*RL^y^?lmUtrTex$n_F|fkU+~>vDGK0OY)=?iFn@%6EY+`u3;#I!6C>q|}Eb@>> zUcv%AuWuiY-aozZFtQ}Y#C$dgpj1UPntg|C4=JGk(e~_tIi|8@;>n#oOmt^lUc$tF zJ&1;I!-j_lV9EUhi=2bwuw(5Vko`@5QCil^?=F9@>Ay+Nwd?4u18o;v(|ZO*rJBIE ziF@C)Ff-~FeP*z#@YrtpgNNOE`kltc?>Sq*N*q3exo1(Z5mIKsX@uY4*4CHjuU2tS zR`bT-S&H8kX4ZlYsvIlh<~e%Y>(mX|0qO6bU}XC~ShzBI-C!&lL~82LbiBVrHC4WW z>y@psC2$Mo^c~+U!Mb*pc_QqlJ~s6s@@Qm6+r@EE*BcTtH^)XmwqiDGDyA!MiUylsK z+L?pw5>b0wPtwp_5INM5cXTY3+lbDh@p^Yreqea`PRFn##ue2B-O`Zr#S;~~O#l3< zyqPiBd{ACy;iu-qwl>0VEZb3=ktUs4`-T&&_kC2egM))XKHckavZzcZo4O}SKR;OWzVCem>VZy6+~LxJYC}JF5Bk;KYu#LW+3~NM!JiYkOi1}Zr|sI z=au#JA_fD%=?aAf2qK>y9ljTFvR=ITqDKaRC;RPtZ4<&qqu%_Huj;$DEl1AOy+ut&1->rbFSHi@=;P~_YzXKC{`;$`y{$u zSD2Zk&n`h&dwyD%Zx(WchL;2)T1fF@J? z2JX*Ip0ZjA_C`5$`iQE<)h?3rjJA!mzmVrI{c|rhG+WPK`{IO++x$#$OPXx*jo&rG zb&uWu7$>UB(tU#UQs(jG_j6$#FJEanH-4C2r3{|NObxG$d`t3c~1k zMrq0O9z*5iyh3L;PPWg!*;m?$XKu`l?EE=-mzn3@Xv0M$WB|5{ug~Q_qDc<1b^6G@ z@}0sl!o^?1&0|8WGV&|>i}#;CoAw=v#HvOQ>7O@GZ+nI`jBOJyXz)7$rtF6X!tiUjX>>76eaJvjKdp5vT5SP z)rEwFq_hd!O*(TVj}i=u{!E@~gebs6D=_YNUGB;?!1YlK6~WT0W8WTY{&8-I^V(!3 z!tlis+2mr~mD|!Ef$Zt~Ugw@OO|)t|wcrd!!vnwF z$IePP7yUoNzB;Vxv|D>Xq@)A|X%GYi1f@$lMUie0MWm#=L%O7uR=OJj=@5`^kWOik z77+N>c4pq0bH3}G^Ph8V2ibc+zxCYfj$3;MN)flt}pt;V} z%z20H!3JT{qT`LRr+a%8$7^XZ-sLaC^NjDr2L;u+kEzPbQ|Y%0E*zf^Jr&YXEn>MQ zsdrkhsti=od#Ql#5i8h`y9eCSw?)2(Z_CRw1@%XAl%%*UE;_O%!t)k^ExQNwLy%GE zzsv(Lmup^mj54c<8?LGUWi(2;%J$EQLpc6a zA5y%~u#a3{`S;av{?QZuudkR&@2*!kFtyYPG*V&GLzz9z6PqE#L!m)b+gtq~cQRm& z`SVbM?ENru$}SB0#JS2Y=n)N87G9hIp+8=Y;BJ~u+=eY7ZT8N?+~zvJQY2)1{XuhCs=W%LgDUM{*Es1ho=QsNdruRXtTfG{f`idxNB(NBQ6st4_|w?H_8sqwn-`O z3(UDSiQzt^t(+AcUo^nJ&0!o#!bw=*nED*E47P7SH~v-{v$`__brdrPcHXvkLFYGE zG`oX@TOg2j(`YKePX!f+j24!{aWEzu>9yCaQS^-7$i@#5!Ye>de@S?06tZhrm^qY@IM_31M{Xrq#zR^E{%l!e~uq3$g747O{7BUw8j zwcXRST7zvbM@~g{8(b3Yn&ca%S{Y{?V2 z+*XIAj&nsQ3c~hb(~=gnW{d!8V0G?kZf=oZrDUjc2fC#(=bZwG>{MgB zB)KFu@w`2w1dcTf*lzZ;kyBEUmnoI>?SJtm;h7-F=1+X~T>p51fldZtYEch;i?)ro z81p|@zgIlL{j+Xam8AInSO3Cu;L!>bR#BIFgR7=(RsDMQqQpoA`#2VO@S)6yi)BpM zNFgMgkDy+u{N9WXE9_qy=-i8mNeMz_pL#>)yo=2}MMXdta!f46x;1c19sN_0EgiLng|e%ApTD+!JUIebd=noI@9jBnkcp;1EM z3E;|Nv_0G?jb{XOVp((;0ormVAvl_^Pq?R-_Gh45T8@+z#cgrK)g!cZhBNT;=Z~{1 z+tvE*VQuGDu(kWvIS1BA{qR`O{LlvNVr{tsRUJF`z=+s++qQm%uwjXF7)z&&DDI|+M&#{?yoaf@Qo|9!Tm@bp~&UjpQ?7@rHD-#y{rosrj9N#S{+}-@o#;nlVU(IG9eAb$SzvbH zg(}iNTqqSPe7`R|$=YssiSi3{+(M<*`3G6x%js#)>^$?#3trqeYbL+`@3a|ATnEY= zK#%)Cb;S$+yeIGPi@fS~EL?9e<$MQ_6jeLb4{3@CR8e**$uQTB#dyh3lCKRH6>YX; z*cYoL<*DsXD;38WhCT~Xk&(9z3^Lr?Ee29Ee*Ij0o4He@S@9^I>&d=T-$U02KrW?u z%keyUbAu#Xz3lrH})b*nQ^4ehXjFv+d6ta=aknyM7-wk=cG; zkRdx8JzC&_D_G6a&&Z(}D)~k?n6PeMXLd2K(`tiHn@=!l@8pbtX0P~_PdxqSy`+#F)kGn{5DPTA8{0y(HMR4jY>Hke^$ z{5vkQi4NeIVi54s&}iq8poD~P>dmm!f%4R+XGi3>Pc=f?euey#1J!%tmDr7?KaZHZ zz{t!ojq;q_H2GfMKi6RKZyN@G(QrA`n*#sUomZj$Bm_I%>i_7G!g@$$@~1Gmp!mO{ zLi7OX0^rknVaz}!ytSw1wF70=!41W}LSGP^LusiiQwo$o`y5a$g{27-(BIO)16*ixijtq4u-7e}2pY$TtpaJjB(zE!<8m@6`Yid)~4E&6g4Fy38I zGo!aAD`ntnp{ajBV`uZ^XD1VD*PVCT3+xSk(i~MEMRMW6A-)-Y@xx!}J*w%!i=F;Q z!G!l-ehFF@Tr~MZ^3DCi?|ss5v<;J;VioGXj+j?36B4t3pARUo?ZzSR7^wdupL+ zDmGlN)hYKL<`S(RW+ic*$h45?t-14qv0gLzdcpG zH}{6d;LnDj@2%0SMg2)8(6ZqHg?2i_a5LV$dm`0X=gZb~1y_J-f2Bclq@v0(Fwv%$ zm*U-eoVM-2mjzqChq}+Ie%Jxzsq5o_ZM*LV2WRu>4maN2b8<=^u@}`yI2te!QG1qj z2R6&^Rl#D$yOKAlk25y}7DMO+#rTuXTy;sgHgz?sET2nv@tm%v84P9OhR!La_tYTA znkI&k^OLB=i(C>$#I>A^@YA80P&;EG8I+XuOi=2!{cs}vk_ z)wJ9mS=Jsx>?5CvgIZu^Xi07aN|N$gr^9~Rt?SqsF7A6@@4~XqK4qPvm4cDz+R611 zN@-AK-E`IZVBcI?P=w3Sx}@fE$B6U6Dm4(9%HtcqjwUTKv?{xn5R`mhMV-rWEPYDi z=I-Ysi=j4lqjYj02cwK`k{*wyO32b=-!vIITMgwVj7P0P4*e)Z!^_J{@%;YI1xcX` zK+1HHH>mFZj7&`Ij7DqK#s=hd4yGfy%L78($0Bju7P{G5ssGsYA-n;cIYwZh+&@nX zy0R>ohAV!)uz#Rp|3b1y{GXQ;!0k!K4}I+oL%shjANzf+!Pp&&=Sfh)gyHmR#V#3&@`RY}sLPTrbn%zCEPmy5l`g&lkc8FhQ8TRd}goMOk z&X6yKg^tyiXG>rblctAon>^H}s`!q~}ZD~3k zbi@>^!`jOq{)=w8ovRxbbrs5hkHkSiTzWUZRjDTJ?Cd;p25>0tmQ%3n$D7TijB&enP(*T<9T)i!}E?;^-lMP8$5&%7ozwZB@#xZ zzS>$>SfL!;?uy_`u8R(t2!V|)0?5a&u+)_B^|e|A+1Dx|eEDl@s=5r;rOF@(nW}s* znF1!5tRorXD6Cm&eh=2ZM!rF7U>B?r1Pwb?WL2H(d@7CFzKp8Gl$_*6^8M$v3T~D= zGnlYETnkw>BxLziENKIwAk=Js9{O>QF@ERzHjW+EsfHRAO!2^y3IgxEikUaK75buL zXdsVvYN!u;SH&-}88|BfK|NOoHQn@ATy6v64!Lz1mgZ643(KSzyM*wcpT9Eg$E-a) zwmLs%F7V#inf-`wcb@-y9P94@Chhm?_XUjddQxXZe@;EfFQZodR|v@zTA3nX#SSCN zc>jaAAgB-^|6w$QnB5&=e=`6}%N{&d{sNCdiBA{vKS~`DDQ^xKT8oBLgo?H1h+Bv& zZNf>6y#+gvC1RQHhIL+vBze?--(L``>ctmKnh$s;8#8q^A!H{Xeyu(Ah}_$=rWyXx z=W)S4-1X*Y$R9Laxim>^i7VU)js@s-+LVpq!}YuxQm>nC{h-Ppkx~aExK0o;6c=&L zR^Q~|3RpJ$T4q&yl*C8>6UCw2IZ}}9yvXBRHuG^DBUEO#=^70l86AFyZT#U!J4_G7 zj&mgogMs<6cAEEHack?x6uU;m!a-4MHda`Gz*{AsEE#83$4`s<_;>O-wvjvvK(I}n zuQ=S`Rz7Q@EXW+rQKAPzqkzM7bl9;tJ2kcWmR2Y%wmxrRT2#=U{fpppili=pcDx9Y*$naJKV1m7K7>ZOQl^b?4lpb3>$9j zH~RV2W$J?L}ZF|l^0^YxXGJp8O)?tWgn41TtR>)9ijDJo!NU}mjfC6#1hWaJQ} zuZq_Q=Or`^v&C`y;RyTUDc)@uZbI+JHAr3EEBbp}SVF|}{Hj04ceQ!YVc%zu0BzIDlXgf$5`Xf@e;q^Cb zV)j>$^D86n@2@r390I~+smV`JxH)CFv)VAbI_pp2W}D~=RT`sA7csNsuk`cn;TUMx z|4U%0RSCKP=C^B{>V-%)|HV=R0h=u00zhb&$d5K*?JyRTn{2KiRT{45`kMiuZISYZ zs!YxQWH?>Jzr#boO;MUHH{>r!;d+0}5WN7a!f0S&9jm9}cVWk1kyq#Ga~vP2lh|vx zJUKZTtlT+lYl$Gi84=RTJzG)o9{OVEL}Hl);#E5d$V-#iH*N(Vnd+TxQ@mywvd&^JaIU>NV_Xs>i9IqX7fBo8Lg?-Y7Z4FW4 z`@u%u_c~^Y@=$vF_v-Q+(KjN-$&D3?#Fz>=HTDY=;!`naT zJ0d$dy_JQIR)A%qYFDiKXu`CGl)-z3giE)(4wJVz4R z0Bu3*vt3PhR(!gj2HoRZtVBAt5en0<06`mcM&G5r_l)Uk=&c8$9JB{uDt`;@$j`p#f8gNp&9YiQx zWr(z&aatj{MCJ0h_*KkAm*Th(xVID)b*}2eq4m~pjuY!CA`&+RW)Jl<_Ya+6760Ci zYk#x9FK7G7n$NII zJ+lQEG7|7h$;2W%-0^U%4p0vIyuiWEu(r15j+Vq}`Ux&Ox^$U$_grMbEV(=Yz8Gyj zxH|?9C*Z<5jdK0@ai!9FV`^@0kN;wjNQL&Ko`LpwUOT%LPrxJ_Muc=Jy{UjX)c) zTB4k%f>!EnlUn!yaKXaszWWB}zXFv!8DO6V=S>mE?~KT0P)llNJ&2YIiKq ztcV7^>Kl5Mk+cL;9~OzNY$NZ^=M5HOz5@$I*g5~L(jvun$Gt)dUF_LxwT;y9Xu)Ou z$IIUNB4ZWeX|XIhB0r9mscM_$CNwGMgPyO%APQscI^K)!`>wgWTz5Qkh~5Q4lk#@$ zKFit|tJdD$o~Wi$c9J$B+!_&pj(wdq8r?Omq?v3#S0Wr&EH#^@xw(FtfrdTt9)Zbq z?nqCv3$nAb*R^f2>{0+SOTYV#8m*V!JV%Q-=rTQ!eT_)2d)7b>Wn&f>#XjG|Z}T5^ z=fC~w_qGYzop0?gSKet`2ZoRwhAZeuH9S9rz1zW^wKs3NJj|tD{opF*qHIKrhzJJc zdb;QR0bUF;A~XSy7!MJnOvKlRVT%|&2bXsY#~P3-isIGm&^0x3=fY!Ot#5Ni+7*x zJT3<#rE%5ske(z9XaJo#VjBQrQYRSqGQcT7{d5=jS*c$BqB^NdfXFgKnaR)mQe$4m zb01Tdk0Et8w>zVnP5jbCQev7*bTO|9A0y08^UI-dUZuXv7|#mu{sH5u3;<;ux#0f30m zGOy+&;Xsf^T#$d$PiZ+In)oYn{W2xAl;GcR^&b}!;!m$(@ld;@e;l)xF{st~I%ljY zTkB%HW|NXDBH=GfULsaB2tUCjb2}I)HJ{c@@uxcP0m2jq1bHTJ7MX@7eooGjv{`Uf znJ9n3sHlLy6U*W@ly&y(azYDMeiNBl+zoV>6%a5*(TNQ+fYZjI9dJT57Lj zcS^+K|LuE^mraVXK0XeGu?6clG?m4<`94!x26!kTTqfm~nkWn!A1y^3*;Wqrm1ApZyP3ba{Y3w3IKg*6xt`CIRO zrFW^_Ax9nj5$qlq9QLe8_aV(oFpJuO&2hBt-RCOq>3z6@bPpp@3!#5rPJ7GM1=^Vz zN^yDJ;iw|Q<<>G$sj*Ty<1D*fwJ7(tlEUpkf%yL1s|J04lJN(5>)D30<;G>68x))- z(O49&TK#o7zN#vF<}LoSlohOBF+QV1_`=fwF@E5Cwy37j2S$FFWNl#Kbinw_x15Ua zzmbCfk~%$w5i>@nsA53?@Kc;_{mT~^JBj;ya^oiLhgom;<7u%aH~_KWZ@=$WH8|^j zINt%}Iq_^5MYi!DtHVl94_Ehg+bO$ls{Ok2va>gdzYFxJrJ>7sQ|AuwP>WLdhkd;E z{^645(8nk+J3N8_$nri_p&)b4>?y^j?lm`uh{NGS;~0kXYA|%(ueX}6iBln%`cq-y zES2%7c|`dGFvq7=;$d6%GgMc%pK6rInRopEN<%*6nmk%e0WH0K7tCI{cl1<~wg6Fu zAK1=X%2W|-O{zU7vDc`!&OqD>BbFE5d=t;X5j0qlXLw8F+0LKE+%?t~axz_@47KRw zN_k*+1?MTDX{0Yt59;2l}fV5(Jzgjqn)d&rUar zLf6Ilk{dp~_+o85(7aE9eN=tC+fFYHo`UnRoDDblhXHFCOIqfbjRYP(TFh1<-57dRG)$=wF3-r0!J5@ z-}tZnU?@DiY^b}pb?+*p`~>pV$PZ2A-hK;m==cobR87-Kb8?o~M1oOBrl=jg(PV$j z^%G)BxJ>H0UtaD%oYIl(-Ry1apxZeU5U6wbzPi%A{n&nH=j3)1X(8WuXzUw|`$%++ zC^M8^HnGi9M}XCWwEff?ozu9u z<*uqmag5JX@#W6k#2fPJGJ`HUZQpRJfTfoGmA;|DjIR6|@AeE_!+fxiiF77EtT9>T zwwOr|y2*5B+_tusL{U%gL*cnh6x|RYUxNJEpC-3z8=sIuSDbpZbxytxEhNWqhOA+P zXAtcyf)gLEq%FK%{mHOT*Q)-);7ue(7<3y_?(Sw}@mGlK&5<=7l2};T2d0~!Dv$q55#COMwlqHtqkZ0ucg^O3B-PX(`o9INGQslhcn5G1#OCaEg48Rd1@CL1Zx z>ND55;WPB5G8l^sxA-9ZqMpsvj zlFIN$Gl3u<M5-u2Wvx0KwDzS2>~u1vI||(qs1_7Q7E}? z--U~#n=czQ@(GAXFwhLtI_+=1_z6LfukG#rn&n1vVEwq4@gzZY*y&*Px=x*Qs9X}? z9ebB{L&BO%Q?Xd)*Of{vbHga*+k8YqJ)&>e8wgh50~`fBv+; z+{KzKvq0kIHT1QsOLhXQbr~<$c2$o#Z2FTKDMW`f)F_Vy1&(QbI?z)|1ll`a0B8pd z)np_`_Fby)8%pFpVQ_NM@U$?rbPQbG7PTaf`~r~-srI*&LrL)u^y!`?@r`^R6gGO( zKwN&E^g-#T&@`#H{l~K&MSQPEOVDCM3VF?h9*MkrUCK5_C$Mq2Hl*fUr(I<^(|^}! zzFC6fRrmWU-EHj@jQd_U9(#U))p5I_DE!>|EJ{C`1HZcL3Gn~lXH8BR{ztiDX#sIw zFV~Yjg^^qp2DUwBa)InwyX5Kzh${77cmcm7j-7sMlK$yxi!@cH@kDu!oqXq#NSBBU zzloSYsr53ZKfOJ8KXO$IK3B~22^D4G-+9C{99AGMn1??@?mTBaj@%1H zH6VQM$a-hyp;+cUtqX>#SDn!^=)o5kXBO0qdxwYY+SfGn@fNz{Ifo{Md_-fIE`=R- zW-17GhO$5cAJ9>&Q=qZ%?*IB2(kO3$1Tr|CK=D64B*ja#4@tG(n&8lv{P0{cCxb#< znwpw=I91}Bj*_Gt%B%0K28mIgs`+K7d4oP4v3ez8Hib-Y<1uJyk`H*kp2SovM}tq;OotTYth5SKG28Z)zLcjG zCXnTxQ$(q}jr02uXrmXa)3Y2TrWHo$p{4o5<_hxPFZ}Pd?D5<=o?c1s*?eyu+wa2! zRnZ;=Zf`L#9)8;>_^y3Y-e#yV?~doS&_iALXxi7CvynCcdMqnxp9Zy)VU(K^`3PxS?+DU8KkJY24o4 zR{kK2MEwC_#bglkJ6QOJmP%MP0TF1*=BB6Lbq$I0B8uBawVPXLvcX{ECjm-{H2j)s zNx-V`F84hXCTqF1+kF-7{o}ZL0)78RlrRC-XZ7iT;McXO75b#5y;o=jSif*l`S?@K zM66(kkh5@2td5tYS*DlY#LE=^kQxdMz+lkLu;9IzE$Atq9OwJ(>S@11mXos_dq7%+ zWBJfXjBeEH79X497ITo{5eRJ35+ff!l4ct|oouGo-cY}$Ee0hz6mN2oQjijFJ%!Is zvx?jr@KAUu+tEkXg^U2 zko{fudLQY5DHo+3TcbOPUk&|fbcxeG9iw(t@%nhzyCU(03*Ukwf>z_lx9%Tv3Ey-P+*focfW`y$&FGzdO{uPR8T25z^? zwY?X41J{Upuc47kA$yl1U&YMLMM0ILXJuto>Om-#!__uK^Plu@NU|tvqGf}PMlp_U zCsC@OLR=MZWBl3=)Z=l#;9#aYr+t_O+f0ofus~8Jx%Zv(BC)-~t#F47rF3LNOk?Ay z1Sj?jUvG)Iz$3bFb(Hcx@lL}u2y}KA)+A&IkS$^ov4}cmvIRkF8Xg?18E&S0wD2WZ zIWP8?)0pB)Tj)kKqY9%A0UjuB>|2F7frwBdGG?*RMNAU?!rrG&jGtpN4S?>$A^xkZ({Hx>Q=s4q`kj ze_^Ec2D;BQPtSAWPn$5t)*v})%!nvz_e-8bUzr7`YdQ^R;W?t$7L~d8$YQ; z-(?{6v}812BWjK3%Mr_rkA+Aed9v0Jv6aGZ_rYOws=BQxC_1hzz2~xDm@)O4D?Tv z(3HvV69y;XS+19L2*ooWVpxP{zad09_5p zhTOctqGv*yHC(OAN+ADbT&MBN>s+uv3JBSK9m8j5qQkH0K+Z#}Q*EuGhO_T?q2g28%Mcep-JWKl%5B7v~ zu?_oowyUhl7U=|8VO4DZD|Y|$km!KU0lJR!fLiHexbu@ zQn=H*U^0?lYzwt(2SxYQcoMVBfZ{FW=-&vD-(eL}sz zN-syoeWO-yI{cUO_io}QQVd^ii?-~aaq;2l^0v0+J*|^Mm%pOiFV7vUrso>qi%h+G zjZl3|fcf>ad;%J?trvpUOXwzG&NgqIFcq~Aovv|sDsxj`}A1c1llTi5qK*~7T+X(hM zK1&|whidsN-YdfoUF?YPs4v zOA>pF$MO%m?Rh^^t>VgT^G=Gc z-WD+Z`v&`;YL>J!ih6$3!KjZ^>tEeu;~Ki7i_1{Cv5A+T0X~?xuH)Rni*~(qBb=Y| z*2yUeuI@g$au|LC%^?!#r|VOacz7QZ`5jc-QEMDnHE)Qc;ZksXDS)W!aGn~gipGJz zxSR{d``AZ#n!=VJvCEb6)vcUQ4|3LrFV3HfHh#d$coV$*6RbTsK#XR7|HwR?`9y18 zaTt{1Av>t+B5*${$4KEbM2f_Gd@|Y^PmNKbc&CCXG6UzkZnpe$6;d{-SlM?h3c|vF z`1(t|rzlIiihv@MNavj~OdJ+7u3#}D0+G10_$$(tQ>Aw-I#_-Bh6f{gV|a!18dM55 zG4e{^-+wN^X>l=HCq)d=lMi4)(!!m09t$c;O=HDq3RM`lc8$E#PZCq4lvRjVt?{D8 zPktURBNXz~F>vVaw#s!jn(qb^p(#Y%b{?aM)ACF+?OIMsfoC_-1KSDpF%>n&IKfo> z8mady9-7{ihDLKMb~!;ubI$}QbGX=(!CPgCrA-fAj^rmlnPf@8wT+3DShzuS`+9)H z0=}h}7ikFFX7yYQi%z_#=)rC3g07I)+ECuoBJaqq%YDP&Oo)31R4N05QAD9pz3a&u z6!%y0P!~}Zv*ePL4d(L~zaNc`v~|doySaGmz7Fho|9}SIgW}k;qfHjZECm`7lElcl z_b&s>3~9d1Dwbfs#`!~p2MsFJT|@;^+@7eZ`cB=)c!+U1f=XsTSKiX}e~}Mx;%J$X z2sdb%pP489bq@*$Q58EMGBRq7ewvheEiByOl`iC?pr{8;s84bGYpQ|k7;ifrA=7e` zLB9Vr!ONwp+ctR*9!(?)IAQk0bIOjaMBk0d0m8{|(X*dct#AO#s`76yfHogm8r)0@ zNqiL<3-p^+g1`#skOzDac*vA@I`PSj34W%q9oFrT+Y&+RZL|arH871G?l;(NWcC^u zKw76<&&%DxdMH>g^~2-*H$Kxq2z|@PMn%j$;*&TBWW2OEl$e^Dnr+xeF$|LfjI|il ziuI`1sv8@H_x4P(`HArHMIq!ytnsppefeTVVN6JBv+qdry~H=37W1NRYz2XLmT!Lz zXe6YhWPEq%qIs@jyu0%)#*$Cy3qNsBcvdq!_uA-xt8_7qOK&viFZ-#>kI<|Q4P|-s z?=%Qyf_S53`2#;sUPq%(njx9rfx;Wi4We{7^)AOOG5553i#3FVgfP)>f-44%18h<_oTM%ult%o`iPYPEb7;3Yc%=UY?Tdt7SuB;d zc$3$2{r{}je=l{k0*F&6pug?)_bDvBBM+M8`cyT+a$m|b#@7KGtvkRMEqBHc^)zy# zilUhDe7<%J9t?_LQCDKO&VE8^{^;VZ6g{zG|AOxT7rA#=upi;fApncms8ZSWy_AaWhyXzfm zj_V2Ls2%%+&a~nK3tfjCm%d*o`#Guw@t=?2ACN%IN~36S+(ibAEvF*)X|lf5bAV+F z^wv88b%W_Ak7+x&jQTyNrl&JfwBne;H{(4HRwM8>Iu^iHKs(%YYMQ2$)msiQ$Vc1R zoIjA@mV6;Iwlcba|I4FH8i#Fwwt+>YxNk9N>5$^)2t`o?RQr3ayu6AMk_t6X3BuLBt6Ict1M-i%v6K2Q0PZVihMZa;RYl-X2m}Z`eCX?m z?NO;+W1m#|s^^hJ@cLNC0B^f5@0bDd<3?V)vAji)r|uN!D-z zy%L;f#Q!;S!ujaHQ3e8zC(Qf&iV5IDG6*vFf`AVSJ%tfTFsa!5TXrqb&biteTMYV3 z6f~q+d3j9aJjJt{H`1417V3Fz) zlX_Q?5K{ISKuSu`|CIk+Rr|E07Xiq?)~tZ-272>wg(=+`OAgP2vMNK1?{kCp_#y#_R>6!d*}&A(#Pzos4k*5E+kyEWbM z;s35R_q|Z40D_@-c0TzJOND{*$zqm{%ntu{xH`8%r!poE&L=JlT1Bg24MP|_p`0dZ zH&Q(B7bWC@q@)CxlJsyRix4|5SDMz|gOcB#`JN6V5ij-g;f_EwZ{p#;L|y_3aq;o3 zQkA?>ADyY|kS_Sx7CL)}Sd&q|-8R6P8R@|dafQyGFsewXGkN+;vCy~P~?nu)y{jqyaDmSRL~~Y<(M1_FCk#E*6h1~ z0J*LSvVlL_OG;Y<=;Ubdc`JZ1zvi76dM7n4&6gmI4F>;7TwGiXx;LC3X~D-{^(K1e z=G(A(TKjY=sgW_(XEAk&RtPa2`I`_&UpmZ$SAHrXArZ}0L!cQxj1p+Zwtn%*YX4a!JM1t zu_%c(0>QNvT_9y%nhd4~7XCUv!_9t7e7+z0Nl#py$DlV+!)`pkznzf#+rElxm7)1~ zaRdlGAJ->3zW!M5Qu8sbgk~~#uQU`1p_918maiE3NHVGBOE)zI`*wvf|2P)yO{^xX zYpoIhH1+j8psC2+g|-(a%;SMqISS$S{vTL`zttTo96;y8bPsxlnE zMti+EW(nX!24L=fh=;7rw>(<~MA_^l#w20PYWQG4|2*Rso&u* z=zwhl-`&zr7I4B?xZ?7_{TGkPK&o%ui`;!_mgy=h0`Nel6i-$t052CBuAGBi@KfTu zb!z_p&s}{+&GPq$^6qx!+hJ>Q z)8zVf6shFoqa>62gft?xkE0EiT8%hP5bX~_9Lsu|#CO*q=cdwKrl#r~!@lIfjV?tN zLG7STmfJ*dhc)IPF1VanCI!?k?IcMy8yQ3upFe&a@;sQ7%SW-~#g6!$SZ}axrVnNH zWv_#z7Mq&NvOX~Z{bGgj(CAWj;IH$^q5h9zcFQob>M?}sKTY<+*pO^7w2Cr}NKUYL zNf>lF_p3c~ljuun_y7bb=Jgi%MAm?S8nqYt^+OCnRzxzuJ5ICyRPA;a8=b+GGbqjT zeC;I+Vk)-J9!V!T4(#?%l*Rwc{OpvBw(BOM0R(FiqgEiI48(@KjLr(x4}-sV*$ zg{5U4pu{ZmuH_|=S2Hv6n0yorz8Nq zr|GcdZv>oAug{Wd*pkfLn&(Aspo*(fWRhLvisF9$d%z*l3<}=v7aA7K*Y_Hf+xAR=1yS3d& zuu1s7ii z-bn`L#?bvbH=rx;i+;6kNeqlfB&*l!!&CXu&u;kyu3#fbsfpULPDm!cX8$;C!5eXM zy~OD~9dIp89jv~}{nXl+YZBvtc5%eTN1XD8LkDzJsRWHK4}TGQ?jYAvKD7Cc^(2n% zu&$Qfc&3)uZgX7ZH9EnK^7k&|GdPQma9_cZiJwlb3BAJ z9FN^M#~;9?j@qB(A%FQb{`16n9U} z7wB|Cp&ustTN8NeV^4QbXl$%RbZE_h{E_2%ug7hTE&=4VemuJ&OP%Ac=tynE2PD?| zIaGX-&mz(O*_qIsEqLrqR`#ujAx0*U@W|h*7xsRhqnNP;*QnX8d_mfLLCDdq`>YJKQarCsrqLykk~c8EOf(u|q}+8GsB__7A4v08 zQTOFd;;|A{W`b!ZzCQUeTvoda5+zafAohWRSMcN`4oM@~91n^VrEEvyXG=kk3uH#5 zs5;&sPAk;KU)udU4bEry^awY2`X+XzPY@76>`7Ib!br{&86?C!r%=8e*@Xd=o>v9^7G(i33a?V2YR zy?e@q@AzIPYOLs4uuHKbS;%L+CDHe!lkd6ANUoyx-dAx04rFP5S8mlb$7e-N3n!Xk zWo1PQ35n*(p?(n_K4CnEvB^C;0wp1j3vGem4&*)^yy}GO{Xb7nG5l{E%vI-s0qDq& zIBy@?l8=x|Y0YI(dx89z>g2j!iQJyn(9ihm*X$R9X)9Y0s3d?A8U8)s!X>3I+rrJ< z(>FOSW@-;0bk`50)uQ43DNH~#JE4D&B@?k|Q@Lfm+BsJh#nSvPo*3=n&~J|a=E+7e z_47vTgsJ?bH6>bL2}p}>rVL0~IJH(7-2v&=m#TvS`N|bv zysr`J#d;qt4>ve)|RdD~fiLkpCg!F#0h4kzu2=(5#!-n>Co> z(Ko6pU_---yXue1H~}9}HU z5F^oI<1lE&?p->)9d%WS51XoP;dkZ#MzUuaCoD?f8Gsl_X0N&`tzyc-|-!8DYuUgFT_SQ3YV(y+aKqP(CQFi3rSOwc94n6l3Zk7k~N zIT)c+`^bc$PTg;6di^+#O@oKhSd2;TU4_4)r9g1I6hq3+K=S5J2$13O``_FM(~&~;Yc`_&esmH{N-%?CJ zd8$e0DZC8!q{z2V*@w11=y@SuEkM2zCyZf0b#QVxHoLmqMbvDDUelEID5kTRatrrO zsXeRCXIkCjyI1%frKF?=AT{y9v<8|*+qp)hX{)#{EKG2Ag~R(XyGJ>eF7j=n%YLR5 z%>$A-;D%#!`SLos9cOp^t}gLgiih~-9Fj;)=;GW*_Ma<<)##2+&nmNE(&wWDRl5C7 zaaKI;c@ia5L870o?=jTWxUJ63rm7->2=ArlAv@6WoFaIZ`d@q3tgWm*!h2HyAb_

dpmvoFiPWg+b2uQ_i62;03g))r(plE<43c-ztyw0e&bS ze$cW_rpC2u#~NQr|1=|NSfp{%seTYkZ2wA|+h7O1|I=%B6rP!ulVk*bSh+0^k#BIm zq|Kk*=tfOATmBlQR(tJHet2EW7fCj7Q{tPV;~dfuY^ZZh*<@4G-(#MvvieJkqfd%h zY>CB)WiHUumwP=Ib*t%kwe>@{+rJutMkn-i^^&l?tF1lnyHHAS5Guf5xP-^+Z(~_* z_LIHJ5fiW}7`Fz@f4+@(MinSWj3+NHp~JED>%~&4H?7oWvYD)O8advS*$*+ER7g33 zFJfx5bXVVbxl^bgPJg}*@OHGoBdd&ocA0jve^}Fbc`)>d@mYOsZvH)LOkFYxA@|3} zVh#2MAyd!0wTVIV$eO7;lMXJ_JS2!=qMudKR;3oSch;=3q=rsjXXKYf!@1;_;CIEm z{(Mjg4blz5PqPz*iC$Fp-qNz9oi9~scfR)~X{od_%o8D#LbsN%sPdWB z$u z_(q`0rv->V6NcSa$^9h^3DWfNa62vsCP^n+jaEr*|5kPgIS2e6PB-toXfZ$QAS*0BV6tZ!_P#1(DX++nhivY6S4UisZg6 zN7(g#F6uu1=8u{(6sbOPI9hWrQTCQ?vH*5G^0@q>{Nwlz{`hW9R$L>U8dy;)s-HK5 zjmGCb`u5gD<7}ctP&T)DP)JBRSmfTXC3nFO2=_r3mrVW$ZcfrLbdL=*6qJ4de*|{J zY`a~6T)$-AmwE7VaT;7MCue8$U*_A+-v3A-pMorP|M{m-kpIDwo zS>}OzJf6+Gi>;{qGS{Ca`FFtv;0@hwd+AF@3@;*_Kc85Hf-1a;Zo54d2jh|S3Vxj) znwFGB(O9hy%S`t!{%v*Zp`x~e&~7IF#E(T9cb{kpxK@o9+pseyMA)>`Bu%`U0j{I7 zuDl=sb&}TO5{GZn>X&CwQrkSEk9Zp#Py%0=B&i=pmM^3)#(C4^If87bLLbONoI65Yr#bNn%$}YtOJceYC?GTLmJ?!*LP=Dj08DCbqGRG~S`5h&bUIGiv zXSdFA6%8#fajV-FbL@+Dwe>(&%7s=pL=Ieb8SQ=b(E5G?OntykR$~AmqU$oqgkGIM zGG%N3Kfc}r9Lx5NA4g>K*ef!#x6G2g$;`+eMIn1+?=34OWN!(Pm5hvxk}Z)@*;^tR zzw_y>zV-Y4kNA(+FN zJgNaiiXCIcxE}Rk*EH*aPsnt*)&n5#o8-UPuGA}LdgCG2v<|C1o)47E1tZp z7LokP@6RJzH%(7Y)(bM{Jn8Juia;YVddE15UqDph)A18wu1ybOE4=t`3?!@eGcPm6 zJw_xPBqpB=raX9ah5f0%4B!0*+?@0FQs_4`J8)hqvXa|vMEq53zm=x%1jgDb%`4@1 z{gNac1ki;R>}7J=vr`SbOOUNXfKS<^_4GQ0%tQ}O5QUc02;U?=wM;a?1_=8(n;_ak z5K|UbS#?jI!<=m6nV6hjOu0`#0$SfRjfbc~{q146`901GnU==j=_Jlg-ESCnPvtUP zf>77 zT*#OvP!W``$o!h))$6vdYi6kwl?GP?8C+2U4?3%@0=})i$IjqhpEm-siO!_nNqW*3 zGIZ>Kf@nmy8Ic`YK#GU<=wpM^)gfyB4c@rX+0_@2p1ty+E~{SB9%HiWrsisC7(eWL zk%0RugAubV>Yn$jk5#G}+`cS6 zsOhTsdzC@6>4XCg2j^&;dFemA2^D&RTa=udO;XE~G$1ykK<^Ws*|FVn{_Jtm{@7r|o?1Q8zywR4iEn ztBTBLXP72qP|IE>^owg+Atkn~8pEct8_H)8UH09duCpu>TpPRMy+f+^U_zRf@!Z(R<0OtPigI?m(mBay8nrq+RJWk=-l*Tw36h+onTVFa zT4A?5WTWi$dHafxXhxmGhimo^Lw$pwd?F<$CqJcqZ=1L|7!{o^0UPq~PE0C^6)Ed& zk-H@IM-+wau zB~1;03~Pg+Xnid19vY$=d?ObmA4TiXIK@NVdJb4s^%pO~L^0ci>j`1fK>}n?5Ep&> zWMue!m@30F!F(lhiKpVz>|vB@VDed#2O|4j|4~3a4^V^&vGDQrSn`m^w8{ewgP^-R zEWLtOXgx6~S3Fst?y2>^uQX@nw{tavzlvEN4S=F#$G0|Aoc>mwT^3&Fw;;D zxz*0G|}-gM4*b;frW-|xto172Lctd8-iu>v@;5|#TLrOvP9#m!zG zZTjuD0BxlO&y>5GiLr5VM-%UQ>(tbg?Wun={VPz+u#9Z&++yoUn!qByjYRe!@LiBy z$W6Aq;=E14{&&;L-~D$4^s~c2WY1r!OPN9$tA)Bix5k!eE|8Xv&T;=ZK;~nf&W_R6 z)ZfKkIl6X@L3iiHdj@ss{0}nSkG-b;td^gKvucx~q;>++sx)TPB+EtF$RaNO+hbgr zed_j;3*y_dD2@_9EskG=bV>1*;i5Z7ZtYK)m66RV^Mf$V&i`(!hDz&)59#!}0}uK0 z5E8Jlunv0Sxof*!vcHh9hP~zLS#36to=Fd$E>d2RxA1%tU(jrHv%|qQye%rjJXGhn z&Y6U4bmie;9J}u@l+O-ST7P96{1dV8sdg9at|c%hr?BVsWb0XVCtK+uadZNw^Fe(b zkl_xY+i5kB!JocfcY8b@S-$ENhe|ejZfXG4QP|DF32>Do9Ci1c9J>uf9H-;}rYZs@ zrl7|9WGP-wguHK{dO7_!rP!^Vom|MqT^TbT6G4{%Tt6iqKLfO7+`{t$H1*`bg;_kW zXN$P~OfyHul7z1wiO?p)SUXiMtwB=iL%}Czr_W?9@l|~$4A$W3l{v5xMpmDQ8n&6; z*DIFXtQr||AX^X|{R*xn-ilAu7fXkc4#v`u0WY_&EZgrDv|nm0 z)!tt#{HHOLlzWWovPIN=U!d-|CcOj38TR}Ckj~7gXa{Zk$X!?=_yH@o(a}S6g{1=o z_!N}Et`;_mj2H;j#N=%fO*$I{I|kSW3iLuO^fU9Tgt0H9!8r%DEIK7*B+&C9 z=0(77-4|z#cIP`)U7UM(rtD`Yf&!GTg>SL)Ii1bC5@0$acxnib;kG(Uow(_|rIsA7@Fc#v&BvC>W!zg1y9fxSGEgw}s* zTH5%$_fsRfgy=e@d^jFe=qwf^h7YQv)06vAdGohTKzsw*ya8R=@6g6AgCI6^|GHOQ z7)0Kd-di?1J(TiYpzlHki`8oUbau6Prf0P|umMpKJ`C|Zn@b1Vo|ZEG&%ih+t~Cyb zlKWc=etv%4lBA-`ZqDL_ZPrq)O2DS1Z;1N>pAO7xxJK4kz@|oqXriz0qmsiDfRfp6 zzBQJy4;Pa~w z!LvQ7ntEqZKN&*)`-VwQIW(T9>QMM12!^+b>i6fGi1#M}IV$VO1^rV21j`uif7ksc z@yn&*$hPfwFaBPG3(a2}H=w@{q@u}Ibt{GfFO`kd4J|&v-Q1V?{PE9|5ww5R{OG{> z=g}qw(d zdqA{dzco1X^UL;2SD>n}Mn`I6$daFjBm`WT9E>!~bqLYp8xS%?DpTjTRstF2mJ=#bCaGT{oPlPxp zI`-7;sY@xu6?Mn2GcuTvgDD3568Pt+Bc8N7d1PJo5pJWECP&0czYg1 zcYz!IlmN&jmMZx^;iSIr7w|C64FZa6fxgrNH4knIkBPWmEjTssJB<-i()RhWAP&%m zjZC-O+6osJm&wfRilD4pJzl5~Qn~|t;6(Y4r9q~-gZwxsI_h6Y2owyf zt*az1_}*-LGI336tJbFPM#M`&2W(MpKj~1wAIzJ0v=QCbeT5Y1kbC`R<=hRsUBHWE zoNh$He{?_vir+$Fv$^QhOh_&CdSmhZ5T<0v$qDTegs^#dTahUHj>TS)T0T9_iY3czH#TMq(Dp*x#?Kn z%&Y_jWAbe7R>B|p82SE5v`Ea&L-o+~f;IC5wy9{MW0$c%uhqQ9&-IS2JUdd7oWgIV zV21=B#~VCePr+#`Dvcyq9cmn>qZRJa(U!3r82dr=sVppdw|e}vt=2K$b$2+-<>z)R zqd;)2_3Xy2Ze-XtcK4594BR0=;e|;C^FR&_p6_ps7v24#QG)1G5*?eYOWrbY@a^%@fXi6f@-~`JTzj2lC%>IRX3AQuqz315J;|;1T z8|*M?YOt#ay0Y;iqrrqvgl>ML{q-`oYiEWP4G&L+xRy3NWy@Q0vIK94j_*h5K{BV2hZl*`m*UO35=f%1J0fr5 zUWt-?5byG(cMvenbsW8SycvPUCev=RfqiM;COBsfGhzeAad2br72 zt&CoRbw5++J5(@mD4Q7UFbQ+=$F`ThF6)Rdrf+RwwJ+jjsr#PFq}=3$Usd(^$9^4o z6^m&trtSq4ov}7Qq@GuHE}{7w!yGjj_A>u?JkhPFR|kSAi)!+Mhvw22;BpMr{d z&NzaRvE3a_G;e#>HDutP!2B+q!@V`VW^)GFHP>Kq; z$&VY)-`dXegjU3z*F>9x>9OY#acdvj3(fYoVW1t0o#|?*WmZQnudh2>v;QtHY~=E~ zfSZP1sMVlyW~W@+OoW(xjX9`ujsdnr#8dQ>y{7?MAh#_EX$5^v@!dV@#*-nVch>3`hJ`e2%yq^x~{S9sh zOn2ac)CRJyknoT@Ivz4_f{I}nR0HLEfCVVyQx|ViI6Y<8%+5xL6(}=#j#LQ+GCMQJ zbYG;Wlm8`moM%W3QqcM6E==Y227Ed;BJ%g zs(S|!8=1*Chkz<01$JeP1lWeEa2=jE)hwNvIfE6+Fk2I}Z=D)B;C_*~V}h2MC@eHo zbLQo-?Z%XtZjlD930Yd@J8fGSP4#(m##B#n^;$B-SAl7zsA9S;Bm$)SoNw?3-KZja zXPtsCUL-lt=*g70SF!Zux`_MCG>9$=!BL)OcP*ugwG^B1%B|rX%<<^Nn2$_hbrKcAlLIn{09W0M7 z?&fT5Iq8wWO`#XXVn36s-4Ak<{q(w-o>Qgiy_6aJUJs+^_60sIRA&22^_6pt{~W-| zF+XX0I-u>1C@$O!{{u2A`J(;DncZ4FfKy%*LK#Q`_@A|&s75Va*uieowD~|L^@T%UW*gBs zf*JFAhCXnODo}h=SS5?G&5DMKK`n%|UVt){+2n;+BGPNN>`TulBT?P40~os2k8dX& ziNRb%zQC-JQi{Hs^#6HsLgGLGQazimL?rtSD!DwsKQwKZ!bQz;NLV$P&u87Jy45{@ zjkv7$k%|*#@Nj}KGTK5Z+mjjM91C1SgD5d_N%QKR<}N|p%wp*<*n``rKN5r4Jjdzz zJDeb{Md!=lD-7HC*sSWk^En5Zzh!LYq%zd_T(p<=HUZHC;n^)FS84C>J7h?$D$7RO z5#Ln@sgAtnR8Zbx$;R2}9RN=t@+ul~%nFkO@GV^t1l+)zP!&>*P2Wz6eu4mbiVURR2Z)mn+2rS&?)eU?YBeyeE!7}>`_1Z$g+2v`KCZGYf! z{T2SQd;^le)SyQw<{<2*jO?rl+PoTb`~J=pcoM=YuQ|*Utn>sl5g_V~6L5!hRq&6nptx?1E`N zfRrj9P1nXRvUc|1x$~zBdQH`ve#5wXEy3Vn7;Q+!Ku1r=%6j^ykUlj;)dVVOkPamc ziDl!BsZt5liI!`D-d1;B>3uWhzT?*-eBYV7K0YKb6>C;GNYc8xFHdT{HU7!F{t?*P zfrcL_`iM?(od` zt4V1DW;K29jl2VxD?y7{%$?pTI#}GB$x=>RmZjPS(T-S3vk`gdN%JPg)g)6&Lj|ge zNcrT!Q(D56YXJczv4DiM!Q6N_;LAr?z3kqXhrS9y4N8hIE(UnjHP@x=>^%}_=r0KhDW z&^=)i1iw^&b{t?VM8=%4G8Z%kquuF=0}Z6e5qB>-OvakN@UpmUk}HSZ=)GI$xKHcc}<`W-MG~n#Csa%4j96BNs=U+16zTd|}7eKhm z*q^N{h2lvQs6l*MFTN9*un`R>g?X_)%GrViSz0qI@uh7OqBFGH5Z-zl+^BXfoA@O>Oe$eov zdH(A2%xC7T?|Hbd#=>UK2nj{Oh?x#gwlFfK`i;T7rc&HU7cl-oyi_s%|GMq$I;wMi{n;FRra>^6S6zsd~QiS54&n`hOtbZHoJWd`UgGxa`L5;)T zy4Y8*FFRQ2 zFa1kDA^6**IgnjfV-wzv$!4DiPMAYyh0CD!BYXm>Nl0w#*4yo>iY2z&W94vu zT9{;258NMYwYD;>vc#}>5{Kr>ty5UD0={_u&&$hpm&Y32Ip=C}@t@4Lk@C@m0kz5P zE+FBj{JlP|B}{in=%BF^K=BokCz%yyrW zB#YD6lt!rd>1~R)@+yO)+0tjQH+UvnDbt1QIONiABB&}(a5ua z0Lq4jhWFOtwIKCMa-xuZoe@rN9BgonYTNmqaPvdsgg?hPQcKPbTJpt!z7BA=*PD7L z{IzC&%0-yZAc0Uk^2DOw`|Qs}mGBvfc_x`L7SRIcnc=bQH48tfgeCQP5Q4&XzJ0rZ zg+k0YD|v>3I+SaU61%OU;(2m7_osHRJ0D{Q@t;A`sdhKUd*9e8Y4GN1L6Tb-X9D=A z^q!i=fv1DRUGR74OXC8su_!NDKO1g5jY#~$fEnUxaa<@J;*OC0l02N~xh+sl4n-2( z8J8gkmvc@S?jH706;*knPV#f^$SC@$G*Ray9`BE~FQDgWUzR3!T8oTsESC84W%M-% zO5j6tIC2{WoIFdfsq?<5x_J8fwP3rAEEOVKS~K5Ws%kVoG5c(LGH|(A76j*nLC_}! zu`AkbWK*8A?RofVN*d1zK+s3to(XP-AwWI2X&rsbIjK(|2tBk_jmBv{7^_3=b%!{p zEpnjmqPlkF?pG{7NY^agb-ETL)jFHoluxXI?j2RSv0wYQpGou29W12`$4v@F0f>x5<2=;&Sfe=jppe%Q9$<~u*- z{NvRV#fPrN<57W2Z%{9~#E*T)+{$K}Oc%*Zw#coHCx~6=0&d;+P#k7#;HwhgX=G`% zovP=DAT9Y(+75nDAyeS=Bqh#ipLswo_&fO`Eq6$O@(!F21g628EjAEtN z$6uZa&kKo(kI#jzJjV4>&;>cy_`KS2+6WAZc#n)& zYm4e3b}GffHY_WvP)F#KLtvONOr+G;$J|gy4AR4itjKM&B+vazWYSNd&xh&bVE^$& zTBC>i8ns~Zr8~FH5tqSP@DMWY*tvxvgXq>oWl%|j^ygap_X5=XGQ0-TXPiy+`{=E+ zzCh&2Aiy5#aXJfU3rR71siCJ4@PpX~r8>ymsk%E?R{5C{4Sb{BK{=6wO>$u=oXwS^ z3X_SEF$3~f?4M1567ltirmE6dwY3^(^r-oy*0o6<+}>T^XF)f(Wsy|A30)I1m}FgD zA0*uS6$QaVU*4!8vug_=m8LkloxcneDnly=@fhIH(k^++`TnUvRR+g3U5Q;O$_Gq} z8cniEVq>v@dqAZ;)>u72x&g3M% zU8|dI@j5&#gpaWEZIPr~-n}a)qSBPDK`8NbK%|U)i`xp(``CX(|@pA|3N` zdekqhs-JZ9euMw9c&+2QDtM9ZkC`>PVe3^G8{XYOil1Q~p4jK-mNydM!OEkRc)l79 z`hc)mQEP0baZDTeNFe|x9F|HgZ(N!bfE%3+H&8WG%z7P$N5mW*A9u1xawO1K#KA5? ziKA?d{cY0PtG+vhUB_sYYhhy{_0_v7MPWFRR);dgp1BvTjNdVL7ZiAfN5JGn9p%s( zR{$AxNP%p+daG8k&;G{G2onygWx z$MUz$p2Pa8ZsA@rdoy&KK1b z;2THz>uXw{bP~_vzOBL*O|ZCpe;=jB>hsH;-)^xDl?#(e7qtu3sNiG_TK;CkqkTX( z0HHY+GyX5OY+F&Uiu$D+W9;vLBTHJ?CP2amJ+cggSTfM%>2;; zuxGl**53*fQJA^jd*bWo_eADupR=g6U1qZNd}IB`Jat|K#XlvF6EzY{2_e&dDL4B7 zQ^WPmj?^FI_}A)kazP8beW*1@eaFmeS}8N+u?cq3uFhxP%bC8Vmwf-!TR)fM)<@(r zY|go=Mcaoi(T2-PEZpxHH{&9~wGKobRwMUUznO>Kdvt zh*qP6Kn^Uu4rAAU2_uaPfkJ}*mmuQsqRzxj%Vm68ab11V_H00KV?NQ*(Lvzg)pnC^ zQj1qi9}CSa84#Sl!>|=|H6&R zsi`aQBiXrb=U-xXz1Zemuu6Oq0K25!Sb16_d!9+;#d#)c4aI}UZ3eO6XQfV&^gCPi z5!&}>j7%cnd!|Q4n zmMZJMv8bdVjsT5%N<9F;i@IN3PQ|^et^i@Clzf-6hM_r_dWP3EoDNJsAc328AoR4t zaPCaIIi26$-5srkAx%Wc)RVb-db*HEH5yRRmpd8(qgqg09MdQ;`GY^>%PikbQs7|e zuU>6l5(2@^V*gl)&P|9>NG~lxu2XITivPqNzZW3~8oN<>$o*2|f*>!8R$ly!mBue^ zzvVN3G`soeTOto{-ai~0yrR5Ew(fI42>7$xW$%f9_CsZ4%tzA3By&+>n;~W}tpf-R z#GVKYgkdw^L93{!fZS8j9cV7omw+CYoo?FChKB6A{CNRt0nf8c5s_e7(~|3uD56#M zxpJi0$zfcnXq?Jt-<8%nwDB`0iq2i;2*fQ*AW8VVv7w>lb+TTkh+Y@{`t>t(Pa+%Q zJz$PBKoguBaPpm3IwOx1gX?DnKOW;=^Oon^!qRK+$buKmqco_^zTcA)I^qJipaP>3 zN5xHgTboOs(rt`Orj1K~j_O`owNv2MH%B?>3iBYObh#9QXtLfuzRVdfYYmDXF_SdV zC}_gb_atfs5qD0~S84L`*b^2uTOz%(Lh|GwML=5I(CFPVAO;cGm>7ictgBY3uHAMT zA90p+3Wjs4@GqpCG-B|4hVRE{RhJ>iWOx749ZSX3l?Js11H_qaQOpUEQc)=r5p}y+ zg;k=(UsHPS$;<~MG`ombZd>hqW`_YUTsm*Wsw->}u0cLY0>GiEg6mXh)U3td55|@{ zHdcd4hp|#ep~_O8T}VjR^UW!^V0Clll=Ter+<$8UMkdzCsKs2+fzeaYFTY;5ft-3e z>g>xp=Za!MHb0oBh+cFKy#hB2rg`%21E%H3x$nM@iV?H8KpUMfD2c zao2Ekte8*Op>A6|nQEw;A-i#AW*?9>rbq+(um=w)FwSa>lc5d+n}^e#4S$#>nBPOM z#}^e9X&6NI{>t5n@ra!a4Sd~Kmgpwx^f4Gqi}U%&$Nkuir@tDz+V%gOu4JEi;F5pA3k)>p3N!rTXWQVirKAi3n_wPGzc9hN$n*Pt5(dnd73S z^|gTmro`xCgKDgcsS|NvUZ5~Q9?}q=uSI(T8R$z$Id-)Yvw}=j8gk13BS8gc^!C`( z87by{M$o+!du>~ilDO(%IdWB5$g+`fUQ^RM&qD_6Y0dk~!~?+F(1-GqC*sQCAp{L9 zfeeP;T6z3!nqcAxw1tAhuJvj_^6u>Vtj;@NV@X$-Hmg8qaT%G#J9)#vfD(Ae=|?C} zxzKOY)*Cassr`;SE`&s88H1oe%Gu97B-$243sIpzD0oe=7+!(ftNb?Z{3FoQl*#Eq zpmH{d>Q;MYied7%&+1;V^|r9XQ8Caxp?h^5V50=|@c#S+z<>D_bd(g7(5_rTSER$5 zuW8Q*x;`p2fT>OT7Z=#nhf)*^k|TpKv|ekU$UeRZnAf8un%VkAmImZ>M7!)pHIW-r zFm6o_D9+~+XKhdgSEF~TKc8N_J*EwvpZZ?JUbul~gK%mz2-C=90qM;L)nV8RLSdB-&g4|`V8%XVihFBeD=jo1HY#d;+0 zW!f{V8b4A6$_u0~zH~DS^shqz^~h^QGB{7$oyolq92)NS1yTKCO;gccwjW@$ECi4* z8iqybBAy@#--XFppYyjHZ3h`;(kJZzDYV@7Nl`KQ3DKuG)gK14IRR*k`rVN*y|9)bE zLK%V23}x(F@5Qn;(bWpaknnv1_4h0ndP4ubd~XMO@AJ9nK7&P9&w~ZCsbb>f7_-=Y zwayW(Hvu~YDMF$VDV=7HJ-kS4WYF7kcTXjfH6@;Q;`@05lt^gFv>Z-K``xVO$C8KKVsDl=TU`#=)y)8CX5OtBr z+XQ3P?}PntaD^`4o`gpjbZsOAl1^8&!nKuh$U4l`83clcm4T@Yy-g^mEYOBK8R}&Y z*e!N-MbXu)T<8RhE_!?olYG+!M`?+4h^NN48>>0zLXE%gQdHs`rMNSUwbhp6KVce$ z?VXL!DV%BAsIPZyN=+8QyzuteLx&G5QATD=rL1ibSLR|`8370S*|SLx^kq#^6`8`sUg3@g?y zz`?-e9lHoJL8?gzLAQCG(56d)a61G~>0B1eS_65S9i_cWDpvuAI9TVP=1i>-ub1Hq z8DM=`WF34#x8sqFuFZGTPJU6^C&mpj;}zEoY*q!CTzjj8BA?f8T&Ki5_m*qMijC%h zj~h~%4NpR&SVu>PCX|>r5?qmX?mv8F5K!O%(mQE+DgC=zq0kQcEG3m%UXVc%DlVeE zn0-^f-LO3Q9C^m#Uq25%l6)w}S(UjJqbXz8?zH~WCb3(dTvCP)%6Nd1bb`d0)U$2+ zuDL-ZN)t&4+u5jfBa1WW4Bk6Sw4Ogn9c6H4vpq4$xhjzAPT-06^(He%iCyiNPm;a(~U*5`BeHA!>4Iyr0tSXMX*y#eBH+IQt0EccR4Aa^9-P+}DM1 zMIKL}hoX=(kW6tQV`kh>sz#rWy#eP*oc2u`<79}UC<6R`7;d&SfcrA$$?!u>Xf#eS z(T$Yp?9t+gXP@7!Y9RcWI~o<_y1^FDGb;Q;|eWc7LF3=$mQvUug z;|X;1=om=}2?pIC-XHgJVbvS7t*b9Z^H6;@n{y|82!&O+3=ma0mb~UU6IPa%yixoMr(-XBVqHJ<(^8s0{Qk}8 z^gir&MaOT`+8XwK7gxtOecFg*sb8S_!ie%s!IvCQ`z7AE>#z?^jNJYq)2txr3${^6!E&E`>6fmohI9uG z=5;^p*HOj7#x8`-=oZf@;0~pR&r3$o)X^>)l z%de8{3FMpQmgWc0)W!$Ec0N+~^vFj~|FQfvpZ#!7aYWiJtAWQ(ZCa|-8yk)_FxTY0 zd&$;VcXtFD+Ct9`)KXX9JZh$xRw9|OiI-0v(j@-QynjciX{B;Wi5ki0p3mA;%W3mN zprHQw;$$%&x1@bV^3w5wN#1DRO@n2joa^)`X#aefAET>9xgB^?+!;D7GPdHFvQF@q zLaZ8`PUS}Y{X(jHC>`Qdci!3;bgKl@`eSytU=tz#zxfmN;1JJ*XS+XiR-tWH1XOZ- z%EY5=E^c3Yk(5Od02s%<6TyT5}rP=1_|krCZO;TC2S9UhIanzO7pt_Z<7Q7KU4 zF_60|g~)S)-mhV42kG9r_T*0gh6f}Tg~lECL_wd3<0nK8>%0JDWU&h!U0pc2%;dt9 zxa6Fb2Eo&YHRw;ZOv_Z}AgKiC5&nknkEex^j#*Di{_@)pBMT7?3xeaLZI)13QfpgU z=DsxYXrHaM(5P&+0$&|tUD?P({Q`GSI09CN3d+Q$$y^MNS}l1z4h6`doX`2Tt{!vT z-Yyf@SdC->ow6-9*s;llYwpZ^%$Fbu2>01r;UF%0s2#4Jp~*qnFn%b*;Mw;DvPhgl|Z4`C2Pp zw0r#Y4o!4?yzu0G*$Bp-!9lImS2EP(Ba{j*Ckm>kVC{zC^YZXJs&PL|IP=%4GY>9g|cm9Tp{Bc(#z~tC{N) zLbimQXjbU|doU16chC+zJ?{<#9zP$z6st7v=5w-rd|ZJUdk$Ry2i4=;L(oUUjDS!t z*F6Z;UIN7IhIH5|=uV3$SC0az8pcViOhZDE}tfOIx85~Vn3~ZcywSABV-V{4Y_I{QqPN=Hi*TD z8J$SX2me|MubF?_VEA;&!LtrO@w;Cs!@{D(XvVtzzHfKG>CoU`K)*A8la-KM9GPrd zoqMzqer2XBL%yEC|NL{vmo$3eRi(6)UG{Eh7nJm3X_~j7-x>i%)HVF+z-Tk4)dR+Y zs_NEYT2mR>(Z$wEiwY}{S?Lz2*cb-X^LQ8tkSeg04WE+D4{Yqu?B_wbiK8~L9fd)U zoF4?7*gHrUw6?@D@qzJ=IbeWX?(S!7hq#B^E(%sRA%9Y%SF#LB&K!6eV!}HZ3H>*_ zXZX3awQx5?BwXKQh{YHPS#umT9YmZzs|i9sjD5Q4sqKB%7&UM!s>A!|!+UaG>&$up zJH^r_NX8$(XueNt>4gV-OX23rT?R*eR=zKL1B{g9uP7BPEradL3(wiA#p&N|0TIAR z(bYTBPKBiX$t@B{quBQLwnEur%QWa$#um5s;~(fV8B&!*Ugf@OlyGXgGb1W%!4oqE z`H006ysoDTW=$S?+nZtotOJ~AKc9eK&w&+jMc8J^BI&2(iAHfn_r`T^E068v9u|7~zyq5Yo`Ww$1NtggcLi>$9ibc# z6~yFX9%|oqcU_U?!LlYrE1YthP-Yci)D=2sPuCoVvyIk#*(qeSSww&DMjEqRhh}3R+23>ps4EeyaM6+t!T;nl)p)?aPlLf!k?dt0l z6+EBk=A=PTBj&dyeF2mxwmk!&}xHqoCK7mp7 z1qM{1Cz{E~1YelDUqN>gd#&u6I^r9p>w$&HWe@gGEy>VAj=fj|0-Q0D)ziDS&R!s+D5SvxKKo3$r z(jZia{#Lvh*4HJA%>JQh+{r2vv_d&?-&+EAI_6JR&ZM7p@H8>E{<{nrlSMh*c>cJd za{RGWrPWixd(I&!Sd>hy>4_x7pWmXq5X%x-F7jLwR()pvWp7pg>uejZhijzat?I>n z3a&@9A#+2rM@f&YRNpkh!>=~utE1}l0wz&qSCR+(Sof56Yk#X(nBlMe?2fUg%A(%# zG`R#iD64pDo8k}2u(oo1*ZNaZF;R_n<%*+J4JUS{J-N#pR;jA*z)l2b{fEB8x9x+V zgTMKjKWF&w-~05xN16py<7<2OpNCyaQA)X-gocJSL&cg98Lzkkz@KfgZ!k&IsDTw* z=32@auh2maQ;EEf+gEHDAXKYhy+neQ9~n3l&iM*Qoi;R3J%qjeUi;r4MV}poax(n{ znb&&DfJ5%S4UMeB-ydqHf?`Qa zc;~WHr2CNDdJg}uBDC@rnu=v6%wr2H)jypq{ozpZ76#Zn0-lh;hu-#yFr-J%aG5?XE4|*?AsYsrdlI3fB!4*VDzNTaXLh-Lf~-stsj|05@2JH6n9;yDPKiw+H|~^ z%lfq)!=@~y0e^L*`&PrBs#}xZ34}ioep$Tq7KjgsPV4n(yA8Q+a3*@bHvlx-(*4sw zA`@n3mucc2CwHbhaw^r@22qLSp=NKDq;3r5HN!(z2~lY z>F}&vld)uhACB&L)#A0piGH?TjY$lcym4Q7<@K)Z1J;R}f7(Dpi@Gt>thO>uVtBVU=q#KeT1ZAsDa zx+p5Q`c__|?uG0c2x!f3V5Bt>v!_=3qAa`j_rjx>k~{DeNe#M4qWD?{Z7Q*A z<~Bed5e2q3cAA{B+v)&5KE}UyX+7fFuio*c#Yvs?r>+^9;^c2SLFE5i3iNtdQsu+0 z59zo%#7nBXG}(V0r}>;g5M%&2vBu{m17pO@5m@|@R0$3)ZXWcFCEwP+E>reqU1?c5 z0oezPOObAjXMI9#_QHz@IaVci0jBtJI@LV49eEv-%45Q@dTqR7nhlixYa8IJGBKc8 z$k6R5qTTKzwFA*Z7>wX}vmvX3>kIQxN3w%hJG;8He0^W^tU!33I;iCGq1Q_5mk&Nh z@O>#vPSQ!JvU}%0Mx=oASACw~kG{3J@zv^H>V0gtb+OokxR=E-?)@&(0#?85uVFgn5H& zh)utu{2jp+QCt{w0jG-1f#IBUfL94&3w6udIAsY#+LhP3@9c7Awz zJcE`!bHTPHaBudUH|j*$ZrDG5l}1=3}nx-n|d*rlia2b`EN6-bVY-EoeOqnyWCcCAXoRS z8(`!(e`ohhg2{O?7g9*z(n7dGW&h|EgM;}}jXS5nC~v_Jpz;xN5Z$^*=J(Ce z6!3LLQW6rT433nP6lPMx7Jpipqp7c(5q1$CWWo3OEiG5R|7f8wFCjB0LL%#I;)kJ@ zy>qX$AoQ#LGWict(Id`aei6uSk>xO~W`?pf0|xLoqvia`0_*VuL& z4uRd=RW9=~y;U05rdVGZ2J>F^n|nJ{9jxbzgevX$3edIa{#pNOB&{t?>Vh%~lO&qr z=i0SfE)M$(+5WxSQPAy^rC0q34{h?U!-0al}VlreDR=e^osSdJRdXPZdGl zJjIz8bN$5WZ)@@tFZ^C4fh^dSfeS#cF;hICepIGMqEp}4sHv%$P)at6_8R-9;DAdd98C44DlFNQd|-uu0p*ensmaj)DF7l(_GMQTR*+RSH}eD?s*a=h z3-4a-AoY+`#;CjhTs)~vD0SjSEuNs`5e|Kl-1#A^#IJDKD3XzLFI%pR zlpsU%=1y()AA`$FD@P`b^aI4&o+!#kA8~rIUmuHtyrnlQ4|u-6PGlN+=@(&rf0PW+ zN*WNXUY2*S#MOi$4`i$6*PwkSGY;#ILt0V53Te}sltc_R@_tyosSwyuSc2{0I)Bl) zwzRP|(ga{hT174aofyz^zk`4Vn>tz9SGKKzsMV_~S?MXFXm-?GN#*fUMh(l91n1Jv zK5?YVmi_zE2`eG%?Kb7wM2786O{lNBaw7SnW3g%zi7YAqe&PSe_fn7vcMbK=2DEI@XVRgz1 zwycr-6%_~PX*#++c1cOofDhBtnPU|uOe%|2)s#`)K5G+NmWk0uPMMw2Vu+Zeqa*)p zupiu_D&&gzq5Jgl!%Q(UVEG@xN8HpP>YRcNWDxQ%$u==CH|cNQT(qvuS(rc$q%`!q za{QIV@6<0Kc^#B-;Q&D18j)Ah#~v5}cC8pLd55K-UGW>*CJ{(Fx}JKTf(0 z0FS{Sc}1gAgbVP@?65C#%?uAGgMT@vfvx~3GiDe+IXnYJDjV?C2B|BM4GYj|Ap~wv zIcy$me?m@HVqk?T%*FlU22Q>%)h?a5TXFxM;;K~h3+kb2n^SSr0-=fN2owK&EpVny zR$4S$?r|*u7{o6xPg@8!iNQ#sSIf@^BuTzhqx23|)h`(VB)bHgY_&Kt0vOgSzc{H! zJ2sv%y^GwCz&C(fw4wX9DG|K+mf6g#ArG88SAeRG*)e?KzCDj&NP8<@$LKIDBUYCF z@{}9ivXTh?PIKPnm;ItBgF3&i&wmHsYZQPNdr*?C7fq}^X;1u0amiOI%`s8`IaL4O z<@#I-YI(-v7?g}L8(dXtt6!(6A{toFm_+Itb90{Yb&uEK!+|zr&y0<|Eucjpv0W(} zCo~cE83I-Xv@jPTd`cD(KzX+5VP)m3hf5JZ{6X}4xHoQA0!J($ZIYe*Usv}pgCbKW z;A+WC3nX45hYj6tgM(QbnVADn>tO~7Yi@~oTX3|ELBcM`EI{2TX@V3_!sHo3x=Yzj zo}1XgY%d>7nO70T#E%5aGjkR9q;k_h#^0!|AXGXG9&L$dV{}r^gq{jQCDMu-<5IT7@!7ugCjr-F?{r=_ytNA_<7;(2mtNOlC zTvEXLc?-?c(P5*NhhUu){CIgbSV{%JYpL$G8mf^{P?WJ}g^9yHfsym}+q#QZ7c}K| zv#rv(jg21bD$tSXJUNA$BFwkS;~V}C(rfMDWT znN48Z-_YOR`_F&nB{;Xb^QaoUy;fN%cv0+KGOaZKdsAVJ*7xtzfc#;LPYR=1LwR;* z#vf#GWlCE)y2y!QH862cz2k8Iyp)B7h0F$&4Qzna=DKKUX^m7_u@2UPh8fpUOr=dj z9E#H=Xv(WUsGvOpCx3F?z7Np=sOFJIs|!CMbI%R}L^EH@cU3|fsECat&oAmNtz4o3 z8xk_J0lj@44LuXma8+vXBqZbv>_bV@RuuWb!u3|T@;pfF0!11-o6?jABuIfQA}IC@ zZa5?=vy`@X7d6oPCWR+SDLk-JtEq*tT`OSXN@%q1vAi`^Up0RKDT=a^k~2{o?mvk> zTBg4JogGFG!XbG8yX`>e5WUyc)dd=gyw4gey@u?lx1EOh<<3;a-{&E9!~LsjY%N6g zKRUy9!yy))w36mB6r#ohg+(+WXa4G^f3N;O&qfmi@-bM(MTvA@yRmMS{p<28%>F85 zlki2V9RAiEY=0_Ui<-K75 z3k{504~0G{d&TJ01_#(J$?i7=oZfVC_TJ>OQVY!C^!mD_1-)bQ%7Ns`fmQUq7KWx7 z7-}~8jCG3n!RHIbQEIWZWecJmuErI{-iG?bBqYeO@42gMjU5%2k=qqSN+l=jP#q$Z zMuo_gx?WFw-g``LT3Fe~Za?zf6n#@y$`ZXvFl{|G(RSsFXqT;02(A zFD~NrTD{63%db21PKqAVu-&%ILrY)F{2#u)0;;M%*jf+}M7l(}LFo`lNkt?iL_oSr z1St`bmPSfIy1PRurBg|1MN&XOLQxvOIVS)2t@qY)xmci=d(W9OznMLI@6m2Uc(0ca zrgL1{2PQ{nm0IWnftXJKKI%-PF$mrF^Muou5XgGzChMOuQYac2&_XToZi$tQT@z5@ zmo6vg=b?R!j8fBb7ZUI%(`|+sX`WG=e14d4;x<4Q{b2iT>JIjE6tLdMA}(+~%W-)q z0t!6F3-HL82Zcn}EplHcEk_`La$eD1SdJP{Ova%d4bsWP`rgA^=XrP{fJ12NgqS)& zPGPFk1J-fd_aPl8AX~$(S)46X*jFYEOW6b`%-8U?uaiN_B=fyAXuyn3iUt653K8ES zpeE`oHz~}6rqk?d5j3Ht!F{K4b_){_Kp)|W<3~WLASwyQ-JHZMoo_9VoWU@4jF_jfZfTNlS+PE`&q17Eq!=Y#`=6byQt5FQa)Nd+T2uBFG11~|SoeXQmBNK}UejIII21#q|bYqK> z=%B^B=QvO?=sN2v7FNI^e_pq?k~PIoe-)z@l^xglk`Yhj5SHA8oShveOoanEfqWhT zq4cz{>|PphVIEWywovx2gh&O=iREdfomSG zzn~DYk0B`N|Mf@W3L{) zuUB7f_2}V4(cywcy1G7ka?;ml+X30_8c@->rK(Dg>_&q}QRC?8Y+OHp5plr^WLYf`T%*IddPLUDFQQq1J?uNH4?wQTvc992T3F2U(+Y%e3+ ztUy6QHuN7O*OUdRTEry_qc~rm3_X5sj*&yHzG~Ba$Dmri!Ln$k5sN}1nC}0&%7Dh6 zM5!ZNk)t_~E{wu0z*qeJdJ^Y}panby9xTDI2#$fMyEL_>sYzwZ2Tn#6z)fzt%`Yq{ zv2R4Q!<+?_$ydr(OuH`)Kx-Q^(l<+}+=r3x9S#qy8t~dpociJg5@4CYx6#l*C+$j$ zYv67WFIU0M3c9rP_lO4TWA{z|r^>df4-asfRIZ#$OEcs*Xgo1f4rgfD>wcD$henm; zc|`qS#+D0aR_VMR2b-sg^j>P?YXOeq4^{A+i)w zima7(-h>u?(yq<{Ue6KkX506giTJs}CitDQZW3F|glX$@Wt&&-;S zfV7iYSy>s7;!MQG#>S>__!|+`pi(MOshZuMg4*D!8=VPqk0j0!vTANz2U0jqJdhEIqBv!^;;g^F2_@|g zQ@_{A{XB4wD@P&a&{p_CFR94g?DvcKBcO%`3#5NynX4auAD??GVDy&_aAfBf1WaSN z5|Rle*2;QWfDyQ9dW(efYnrIkI+7b1`f=`{j{@*U?0|!QE!zzI`&pq1jbr}|2KC6O z{=vqy7cCmt9Wwzy2W+#Vbk8nzk->dQNy+);<#f#ueQxoGJK-6zn zB`|LXA^c?8G!L3l8Kot|R9~{E8mg%KU?h8MJ|3@#limIjC@Tt`QhB=DcV9I?!k{&^ z*#iB>@}yVv`=^>`+Js;^gktw@$zI zb~J!{dXh>59UAc>qG=ZtkY>{BPmOp_FF@7L52S}#^Dph*R3G7Z<`4-Fe7)hY59$jo zG^)$y$$aL|H$)F%VH$v&n_Ux?qK3_AK=rA3rF;-F(Ur&UOHKMLtFnO6W$k?>RE!`> z%C|c~-q0|Ov!R&qAQ-n~j#|s=bLQj{`&(7uts%0(>IyhX|9&bonNY;h!4l7F`mqBT|7zFVV**35+xIow>4RY06IoHg zhiq~~8XSriFj(*G>bmA}2(tTpBYdusEBQI~HFb4wC#q&c(R4lm-jV0(K;wQ0j^UXw zBF}x^7c-k zgAHnuR-a{nwhF@&8K*ClJ%D<{T<{v;Ok^X34gCAiqX%|~-J56KL)IbIf^w*k%MY83 zr@%u_=`yNn&W)C2&4h$7!zmbEGzj>_9ET29{XNwS9n2RgYY*R2?xT1{m}#QQH#p4{ zM;Yc2QJq4;@0XZv4YWYU%&buH{qpL{qxt~0I_@RMpXt=$X`gQDUy!Y3&te3O2SV;)%J;e{V)aa1I zv6;@rPEwv@c0`veSa~OjuHnLB0~b678FbQ*1BCAcTLY~{)DhZ?fjV=J^{S^wdN^mf zJ*A{u3fj<$$tDQG=^tX_ib+I|Jmw-q;aB;5^1LQ`r#R4plIkeaOi4scv@Ly@RiP zrm~s1m#n17lJ5KM_i@RLy9fSw&2siUmVdS4)s9*3mM~6eVlmUXjmtm})g*Rp?_T$i zrgGvXw#2Yb9vFC7P3duR&WeNDeQ+sIiQQtZ>MPVB?osU2KAIgvXEfK{U7ncDFQh%( zi>eDBIKw{HjjF#Z=tlGLH#b73g`=>v~O#o21Xgs zAz15JoUm@3hqbS~@xo5|vb2bg=}sv}OPql9^J%%eO00Jtg@uMPz~XO*d`VkjB5vm; zT^KS4;$^+{%NkJ<#?x~63VU(T0i}}KoFr^W$-9x3zEGp5Th#j{X52O-70JN0U$Vr1P z2|djMsME4r@F~bsFXqE8uRQ*lBRCDg=9wWsOw`tP?STeFRyuHhxh@I_n1mxG2m&1B zYC`@%b}U>e07P8`w%W+qeS!v@iG|t{-EDg<15VLpE+}j|q{d;noy>?vinzNa%#_S< z76HRVj5E5s;~vP}(O{5MB0(Kgyk2{Sw|jvpz)96qD^>OQQ-F_8;Hh~Eo1THlvrg$s zve*_CnnWWdDzwDe5XKisRql0Ij+Zk5x!|1zQT=XwN+<>OdlRLU%o@jjwD*jaGY2gi$m$O87+UN7KGrMf^5CXaWt?(T437avk3dtPl8jV33mXS^;s z{2?j3N+gDagU0drbe9%kXzt?CrpWhDyxZ~kd;7gYq*I}TLRR2ah`b3C1esSR%w$!@ zL7=Ky=WImR4zrD}wl?sw1$ZtKYSXSSJ8ftUso__E1!OVb8LP|11(ASvh~QD1X6ZQ+PbxscaOmF-m zphMfhx{?egYna;UJ0%GX$mvs3Y)Bnt`6jLmW}L<laKIHI4 zCoM!l7cG&W?cYv^m1z1@2-tWrI6oaATty{Ev>hK-Q8ML#WAjYQv4Ge^X zK@}K&zYwBV&eVVK7?UjGH092PB=`4MPN+-lwd9P9D6H{EwCXj7|a4E;Q zuKQ_T#eP}YH**q0GdF{{Ahxgb#wk3B!l>^nr#SvzHE2J^XppYtZttBtt#+ejRpf2= zVU8J%xl!>M;jrTN8nNsH87A-p)&`0J)b%kpTlz{U1gsRmL_(RQQ+{16a;`9Az8uv& zN&Q*5dQmi9*t>EjImff)G@4-bdigZo)CH?|s@5zwPnpbgg9iYen|aszi&uxwv2ps`8cRlaR@L?WxbEC_VJ(*RbkoE9zTV$4~7+3{H8vF0%;OT{fPo9{PQsH5wo;(Az z7KmOPYT`_K*`R(<1YCpGqY^evq&*oSeVCt`7JhZ&vK|dohiQx@ ztdI@(ahIdXR>0QQR=xhdy}Z19X3C?9Yf$?PsH!{Q_F;*9-FP76yc$P~FXi$eU^{ci4vXxv?EQtrRY!{ z`gfdJH^P=~Ig5<5C6qn!QUP-obU*wyy481ieF=_9esEYeY6>$1ekDB&&dF=Q=5i3O zzgj43-R{PW-eddU$gI|#WN|8Kq>_DF^J@^mT|muv`;-^#{(+^sZN9ywm>*#_Q62_; zh-ODX)wcK4TLEYzAmGm1tO06{1=XWfNh9hr79|-q(D=y;FflTQ*SWnU9`0RNdJV!W zOs}1N;M*iZId*5_#od$*YEnaYv?0or`ucN7tNf}ZugYvN7_(u$!IkgYh^U9YfbBt% z)eD$GZLIOXG$Z)@?_uz~gAdvA;sQa&-(6ire zJ!=(-5dW@KTQ%5awg(bjbh^R24PwNtH|v}~m+z8c`{T++f+aTQ(67d&i(k|PXl~-N zag&Za4TA;|N+vQE@mno}p@8{Xrsv`v3*8fF^N``1@{v2TCQQD&UCQwWs~x)aaB;Mq z6FCL9=1fA@ry5d#1s0K?1l)w|X^XpOK~S2G?6w%@q(N<8S3JXT>5}ik!3P>vSWE2+ zWI1lASTyi4H{FuK88aTdp|tL>F|D+3K+ejn=2TA&^~@Rxdp=?#7%Y1V$am?W+I&F~8pjbNk;51&DaIn=SSvNR~4F?L`>QM3zLUTty1(Im{qMtGQc!? zJKu}ieH=OAh7@&X1m*HZp9PDmG5K+(lBZ|11T?EllXV_4-@gq~K4LpZ<01WB)lb>Q z!NK8_`UTqQwE+eEW!ARkk9@pacgX^r3Jtu{u#b1-7nNbanvh~tN1i&kJ{NLSH)hL> zcM=_VnrBhK5dZY)JrxZakuR&`M7CwlnIte6^)xb4>RQG-b0!mTBFtG7am))YYdX

L{&r z6G)tIq`tylrZmvHCjqTf5`5aq&enm6+zlvz(Je(}4QK`v!fpbwx(wpk zg}YuMv(aP=(<#EZ@*AsQ)Zg07`N;gJ!G*v5mT%OCp< zKZthjO##zO$=?1}*VI%Blx5-R#b0V+|EM@fhl0?;MH^rO*7eF`m#=$j_;DJec-c_= zwSj3yKQ-PyaAO3_Y+kAl($m|8o-YkKwb@TEX*NIYj?}q9cC1j6Xs16>*6Z+-+-~Z3_;dxp%!~ zC{WcP=_1;Yukgd_6DhQb19;?c4BZ(a&_+2n#3{XtioY_peeiB#31@0j!5Y(-`0W}+bHltwu%a?3`os72u z{-n_2|D-?G*zhXCz)FTfi{N5R#o5$*rkiH>EWhk*5?KIj8^70_P|2bYN9%wVNxIPU z`P+&cvVE9;xFDu=?Xv0XS1yH%doX&G1+Px^qFBLi75LID^s_RgdO$TF&D=16V4Ppj zxTWv%i}WoZj6te#I9W>0aQ)I{;Uz=iCnjHG~d$kM}ont3m}3fKFaR z0}Y(HQJRGeIHXb==U`@6LfaU5x=jHm#LooaNfzLfWvlwRaDq}P1GvXgHQI!lg~Nr< zUToChVDDeM@1GG238x=Gc2+m%-8d(q1M<1Pc>bNqK0oZmDo4ra7VCh~NY1eyWS3>S zfK~n=%&!N)twJ%2X+<2{46!}|dk2ezwSN~A1ePaikUvd~ZxP{^d$Kb! z#(}OQ#M#SF119Z=e!%JjqDzIEXlrBUZu9mSn0S1vVLv^;E?MQ&G%G+lK>jGF?8aa? z*{yce9H8fXTB#a}FGKUV-t!eU2Ec+7rE<JRd#)Y**_efsvgVh~u*KV}2&r$!*l3c}9==zzn z34YTof1P2a_aM$RchS<^%m}8Zx^&%eiJmsixS7$(@FG-W+QwO|M^_5w@d_(^Gq?`syCK-0ie>}#0Zf=y z;h-xE{X_=fOx?>xs#*E774D?orPJP%&!W8rx1rSq^Kg2FAm3`y;1K9QSo7tDryjl~ zwDw20fSW;r#-R-+NMDDFZo4S13u#uj+CX6xAHC=v=rZDSC}^N z(goN`DhYuh5PlJJY52wO0Byp?lk`G7@H)$e1a2*x-N#2fURHg9e%5pLoPfqANY*W3 zSEbpDJNXi@4WiO+M(XAaK6aAk{T$iCDp>L>xeBeE$ZQR`-TF7C_*~|C$eNsXwx{%N ziGD*iGUTj%Z6rh5e+`|-%6N9&^{BVT@lzzb-{R3w-u5h|QtqujpV_%NKc79#F$>lW zHLG9e_QBSR=tLth2bO{Ya(GeGs3$I7nq!+os83ae;YHtSFfm*H1c~z)GFVzYf>dPL z`|1+7>6oT`(@S%6VSOwNbooGu7$A);HjV~+ICk9z)_tg%v z6)F4__v_12+Rg#7p6Hu1V7slHWL<%P=`y_&dORtgnAl43wrb0(V`h3fR~hurB(S$M zAm~e)3e1vnV9a5er#fb$TW-Sk0Q|R%#DkMS1~L*%!+pHM`h^sUT2?{^;;zpxYY zo*`wh_ZSbAJqP1Y!uNNEycbj2_y%*~44z(&gvWaPB_KA~y`qm*K#4S4)sjx8*5m+2 z1LCmosAOOWdO9_bPMQeeoa4`gQPpzEZKKbvZEaS|b(MT?9&Ii0RNBv787?dBge59O z%zk)QB)Kn@4QKFP+|KH5P*Q<&DG$0##DGra=IUc7VEhdTkSdo_V3y1QEp=!p8fzOC zLoFR0rpVT;o_-5!78F0?L;-rqUQJ|gVP7{Z09#_PyCeOmUh=BJ%m%DsbiVOfIr|@7 z;0AjoE3ik)o(0N!Ho)4jw19&6>#ke$WunIf6cpF>^g_`FGysD|c=}ei4_rY^=6m!A zn3#dC7u=acopxZkl6svpl~z#76dri2N(_7hI?LPk&;;LCWpSNgnp6N<$-PQY-VO-g z+T*NhjprHWQbVHb{fP2qKLU?a=P-h6<)22<#N@o*+_dDpL~{+=z~ts_hQDH=pLF3! z>7R;|$O_vV7{@BvMtLmC%a2094$Z`Q9TW`L;HVf)p*C|yXAP%EwUVOxaKBZG>ZpVR zy;b-*f5KVpW6|qbg`gMO<`{?aoG8^RjIoqRRjA2_Li#}G)x&4VRA4!~q=ZZMYD&NU zJFxbssUG5XV{J!c&^}-HK%m!Vl)%>Ea1V#yc~#S$NWQOVd?4LsygdG5%F6l3dpwja z;3t>|`poet{b>v!@YReieop)HC_aqhn{a46EsaN1N3vq5Uo^YYMop>_@3vD$)0ytKP0|hT8l*gn( ziMfIN(Mx3pK7!tt-S30*K=AA8SS1ji|_P1RDfaWs<1Ll?({jdKFmaAWAc)KdK=jPGwI?R=Wc3mmzCLjd-^0j&UC_~ z5{~xvejc!Fh!W@+>VS&vu*28ABF4PHk68Re`@lHYI9KhecJwO1@7k(Vo6cK$#M=AL z#13;o9;_2k1yZW1>9cj{N=!$!EHrAqG}y|?H%(m!ZdX=URL)XI3FDjDa`qTQuQ$-Q z`sI(gn=cQsm0KY`ado7`#EeAN$jB%oL({PtUF$uRZnozxKT6D{PELV}Bd~ZlPt6|) zRkQ&T01mQQ3uaj}YejA8rwzp|dh8~~U?s+7!MMrF1#+??Z1W4BG*7myT2pH#G(C=Z z6e2^|A3d)#KUvH0@tfTzTS?^E`ZE^#MdVk&?@SQn+Qg2Bz7`hrKFW{p$;t8qFDDj@ z89Ijvt_$J>V?8Yi(8`yZrr|yT=1-;m-T%le8jlcCori-h9>e3hFtInh?U<zmD$jXbr)C6VTwFCixK=lK9=-yRA@jz zDwJ84pVF;xv12m*EjI7NWBSeXm$c!f3nlMJs=CcIsPbX|G1~>irF{9r;o-s;$eCES zG*+h`*r&kAli9|I>-NaCGHl448DO!-`k@Z+hjhCvO`cP*cQffmI}~RfO>3fq(>13t z|B=WUe2S-S39ge!6R{e;%->J;+ZY7WWBh7vfti3QvM$h4Xmx?NQO zJ;nUoThTVrs1{VyU*Y!aZyRKS^`nOa4#T=m9^QWgoKkaCm@kv)X_K(;Fu8BGk%0w| z{?>e#ajo49{id{~%pU%Y37uB4nrDuopxF9R^%0KF7;`e?;{?u9QluGpt&mC+!wgCB zI+3h9WXw<348XE#2(|}lk!31zL1Ih?{zdSYF|6iy@3zc-%!ic#t+~>vV64n%1<%Mlh z0~sP6E~nLUhV~#b4c#UJbi_TW;`OJuJczBM%IUh|_jN8Q`RW994@yP+GmP!sizDYS zu(32XjW4wWKTZoak%7X3<XZg~RX^ntG=M!TL!CmP z%cWDz?dOYjp|;Cz3UmZT=B@Nh&hKwvS5@`bxp0B&WM*MpS_Xbytj*X)NkWhztI$fR zHGbVGa_r(vX+M4@|9nV&gxL0Hq%Fh1hCajP!9aO`MmlbUkI(xazt`fhfgbwN zD~Lz~dL?~u|4=Q7p>+Okn3k$;(PX;oFi@ICR%T5i;!vjcbm)%UC`NE%+)Ms2n#)&U zSv~LgQ%H|0uVadqfW%O)L)uw-NX;1LHx%S!BZ1Dy318^Sv24bylX?eCuvA0aGj{{lB z;m+Fqzmrkq#58C?hH7`$vhcp&;z}|6N-@9cGHyKopTiWSm+!^mvBHE04ZLuF;FkGA zcu|{?|4;K05aDm1Nj)5*KjD%zL?8SBa_*o{;~zR$c9ypc2gTU$a9 zhY*GU3z;+v0mtXv{r!lBhK8=8JMW$k09E^&D@+VxggSl`sL$tH!z&I$kiTxcK_r4mld9HNuT?kpDgkU6;q=)DPdpX$H>!a7n{4lW+fR00JBYpeYr?j@q7eIL zYUDXbsbRYY8l}z3g9CKMP z%^jOi0}=$ILU{Yjd&ecsyQ909)y8LnFUPJrXU-XvhXQve=qJ{ zJ}|#{IrKSQ8pi8giJ;PcZNc>ut;MbhSG^_I#x8-v)++H{>b?i)US>fS@_$ zRC!!mYH$}Kvg`%owc{4A+q z=~?L>oc7Vt*z^qLrIj;yQ_^r4J`MHL_0ipx6X}L>9O~gOSB-i#aLHXk;`k_rEQZKtUR-loFPP zm_@7sVICp1`dPz4mx;JktgXl}%Fm)Qd37QB5Kd}UFQU5c(z=j5@ORF3;U4&C_BtlC zST8h+1VlHS!-cya@>*4`7yl@M-Ks@ulJ>%nNz>djx zhG_;}L=jfQ^bS?NUaNSAjV(9E^cbBtn?7smI{QWAfoJ?<Q-mPzFz!xGxHej;Va(?WF#8*I zBiiFpqoDwu;e}*M#~ymL>0)DokX$sxT5T|XTitIh||odp;f8-E*htq`r_Jo}hu zo9OzP*Bv=Ru(_)<-)bP8sjaou!j_)*3oXdEbp$TM_;`8vYxv|XLS8nZD~wD`svsH> z_~t(;1Y}f2Ny*EDpxW#qq_VV>P{eGZ*t&3wG&m$v3N=e{`#m!v(mFIr=>4tc`BZlk zCW>I7nOm6fSSlBu>$lNtQKsp1S!JJp&HU(UvlccONN9sjrT^gq^lMsuUVZl>wmX_2 z*kTPj30V0O^frw-eP^?S77?J_F#FvWs<`Trv7;KF!hr;3Y`EEq>^?JE9cgwQ3>T)r zIoqECqWHoI4I?A^nwr2#u}eanoJtFf3?$#}lOs>|LjwV9>qpu#Wk$Fj?V56+xx@y^ z!fAKtar2$ZD2VH%m{`C?MH1l(keC|MV|!hKp_C;cfbp%;tE_DYI4!VE9Q7m_tFtn8 z4gl)-9x;^|!RDI$i;Ig$|D(!5N=T@1vmhUv8+_9MaSa?Vl_nK2CCBi1K@9w3>uulq zAx*w^;c(KgKi41v6G?XR()|rur%&`px2@tz9X}Bwq>fw#s~<%I*6HDY%^Z*cDji5{ z7@l1u{X8~jR&%E2!GPu8&_TWcyj-5;aJkl|M2d>~G6=6<*$s#t{}&0gAVE-6V8Af+ zg_Kh&3U#}UmxQKfu0m_m+qm z+2sxFz4@arDqbkNCKr(NM0`(9{UMADk)j^<^cn7fA=)RPu0y>-38bn(sKv9Op^fIa zt#OXmB==34%vahU*V)1NmrC?(THY8W<2=QL=jrbu?Z9rX*`AM9Pp>7Z!VDbl&uf!U z`X0JIfW6AY{3Zjql@~NR685u-+1R;r0s7VpXht7ycLA$Dr{%2=B%P|(<^bgfxtleC zP*G$Fjtyj)17vR&<-R@jjK?}V?T5e$ymaToK?b+8bFNX&c}uxA<;UCD#HW?5#=U*GKlFHhb_M3roG;b3*c5!C!&BQ*`!XT11+gm;H z_EgDL4s#^}9oMA#|SNiYfz2Ldsi%t_!#%Yo^jnQOkI+v_bz`UYl>x|$7e zjJ*|XGZG=4WJ7-fi(bDVt9=LV=;76KX3nhYgu;i+b;TVt>N)Go%(1WwN49#tIvc^s zyti@0{Xj5>mDtJYue%5?TVgzsIUXZ*NQ~$z@L?RUX^`L>`8gK6bOPIHRR{{4#qo}@ z{Ne1^=Ty=q6M@&iG@v!3 zu`rHEf8(?O?7X2|731L=Fhj>J6S)FT09!(iRzX&w*C3*>+4xBUR1KL7P>c|3WMw$Fglb;=h906-vh$# zQfXX~kF0(Z+e9MNWp#1?8Zn%lKBk6vXwkUcXZdvMo8D4iG6;cFkvz5$(ua&U0d*Jn$>ajyn=YAm zCkXmxxqRHC-yT3#6sv13>8Fx1&_(1*FN7PuuXR#ekV~&y|8-E3teC%uuCku0i&%=x zob`nprUo=i6Q@Kmw=kp}!Wr{B?amZ%dkj5O@}x50%rDqe1joOc?MJr*2-?{uJ3b?O z%jG8-zckHAEjuxw8b6p*RP3V>o`6@#M4V>g^z5uWG&C6yDy-Q8ca{~&ICb#Xfc-xJ z_t?}G)PIAJsANYu%AALE?A|=f0RAN5-AchT0;9~mhW2>aX3662=P`|8Xiz*x8T@gq ztj%r?nVRL^yijAT1k%Rd5b6vc+d%TvZE7h$6jU-KC0T`xgX_+AmKZH~^qX*$_G20qgUcaT=xe9W>5)v=F~gKst25>gjF;w#h#Z2(+ODx_nzV6>$*0W+ zwV=|HqFRPV7n2u#Uiva&&oV0LFdhNp> zh%N|=n&Tfg7K=g=4iu@9)4TE%N4vE%z&3VMGnhuzI?T8dnhi@R1Bzk9ms{kfc#<(4 zAmSVAAMTka!xw0V+fQAJMq?SgsH1Bg z)GDpL-{=avKA4X5On4$DZSQVSbar?QI zWeo5T5uB;*s@lwtNQ4Um`jJsGNIl#n9Os>H&LymBXaE&E|E9m6pDO&!bQ>XOLIE}~ zloee_u81QS2>}p_bvM)9EQiWjWNx}o3Eb>LaD#Qn#ybv z$?*biT(uDwZ~Q6ib(tdipri+o0XN>i{cFe9PE=yjbVkAG#E>^duYu(R{eS|gaheez zf?K`HItIWJ8v-(|*7z@Vt-y?M-MO;52>v$&mxYCeRYB^iILnmfbv7u(ar;)0_BEil z#Adq(XV=4;Pj4nMEMf&lv6JOta-lT-q<-G|RWSr|LC|~{R!Q(Ts z7NM@ek{jZ-Ao(!U-<*7c{2wsEMBAp8J~|NVp2`7VsbaA3GmY^f z80}M%1IvJ`z|tfX$k`I>OL09q*o+(Y)-~YNxujfQe5OUG3^!JBEuP0e!O+&Km+ysI zniM)>44t8*c;MpcS5mufy5I^t{^15!a6`EOi(*h^Y+;cD z8H3LMM3(_G3hDC3T`bEDloP8k+mH=_9?pm4zx1hN@#VQiMT7gXBDqFV8Dcnus56#} zhDw}Lwq7XCN3iDwEHMNzJTZwnA&Q;UGhQWFGhU$Hp;I~?3(RK9d-nD!j*-)kDmtMI z?k!jR$ew99gZ1R8k6=>8v`npv?j8JkEZy_kfq{W(z(?v1LOfJJ)Muk^(fIkY+jqnp zUI}2;ihxpyy!HBPu$pAI)tlslT_1f2NH>W@SS+#hh)!Z`{qXp{c2YFRYJF~JN>73S z4O`xVUhzRhxZEw2Kw=flAo+;%r_bxkv!07j*FlM(AM~bgyCL8kb7Ii#6c6SDbXk?+ zQSVoM9&2^4dwXrJUo;flO@`sa0i_WtspI zYY&|;wBuRr>w1e)kiJNj12Q$eGZ8gbhkFQ)8zM;`ko*eSfkax5^eIMM;i*g8jGbcs-^hR^Wfj# zg&#>^pz9Pk)GDc}j5B8qeBD4lcOF1gvZA=z z3DB)F0UQY_n7HAtwsK#U8tkgmf^F5ZB4dVB9Vo?Mg3VB>Rz81t<0LO0A?;0A1;Thiqc9ry$$RcMZ|13L@yBUAx6u;1zY%_hM*O|G zr|gAQWJBm$U5W2z#llb?|2UBU?QQC^ZwB+nWi2*Vb%$Ym<To9|mVS2FR0vpZ5%P6%!SDlZ3wRpkzqxIyTLp`jxXcuBr%?u!@7j;}J zwJ5a%F|T-mWlgU>&>M}xx^Mw)t3p*;{@lh}yD6x{P4>3P(a z3)>yefwhzAmxCjtqL>7nG6+nzMz;1!bnBiIObi66rMSl|-P+(`a5G~6d*da(6VVpV z-BW^qXuwXSqWcv_68AA>uFA+w<+5HAddi$Q1tb<$)h zy?N@zHjdzX+H={Z*nH^Qa&H!LCuWrelHUvso+h2^Xt*Qf+c12E*Wr$kr7h0*$G#N( z%KQL~DT{wG{`0FNZ2S@XvU?5VVF{}T4dF|He?r|vCiSjk^j2^XZ?@bu5hk9m3#KN* z?eyWFP|=)tuPA^#=TXR&fA6@F2Blf}h;@KP1nY(_i%62)c4jMaJc@d&v%Y>;AZk=d$qlW??xC8vYHh+qN~b!2VWs%VAxXkk4cUT}V5KaJ>i+sS?ruy{Q%~A5965VQBeBKIiwt1Dn57Y#`5ZWmMSe!IQaGx+x@tE-@PB> z$02q6V$jWBS9*8?G>&rmqmz$QlU>!C$rSFYrQ zVbWGN59fRIAkqs=dsAPEL;b({?Q5AH{`}QKij8WfN#NY~8d&HK=T7a*{)zZs4*;eb z#Dy(Ov7V2a>I+G=xnY58*BON@?cy zGx#z-Y()+pLy^IMH}Hg)0fawR4C7bd49|U!K5J8|qEcz;U%%FCgCI^|g#qN+&mEe8 z!5<5hD++N6gRK8?orLt@aqMAA^`bh&b~_pr@9+>m@%ux2v6I@B6wf{K@gU{$Z~174 z;w^lndm=giQ*{_~;am#S%VSSAW;R?M11sKJ)mYWHMh85;zHP+C`F-!3ucyic5`Gyo z4*l5Zt0w1uf6;lcJA}Typ6dMd6_o_*_;%5tZ_vp7cTn$xvFrsYd|vW zva$~&lSf@Mq|&7rc9QyZri}Dh#QpiT$eX6!NB;TIVjLCypR&P_Y5Kv}%TJw}mxoM) z+a@O8WKi6Ws+5V0A~g>d!!RZ~Qh#fI85gcWC*P@KpA1RDn^Qr_<7$mBjX3dUI+GWl z=^o1#e_sCQT}eAHp~<4&cV516$CkG-4Y_Z~kP9#zpq!9(nahtR3Cd~G%x zKK64odWjny9z|c>_s5*Wp2ZrR9;?|#o(Z1g&jjXUp7F&jlvo9OLF1Z0Xf}$8Xn52%fj?hgvPfMFfKv7^+leg zxG+HAm&jH3*!A?~qkNgxdaHBgGk)GjT)xT2-ZkyC+H@LTZ`@ucQ&?2VWr{@yN-F$L zCDhL%-vp?87cCCz^qqz4R{v(De~1sJuI(Hf#T4G|+`kVo#A4?I6dVj0vLNpBj@23qoqJF1T34B;wN>6a-GwYK zcW-cZ_4%a2Eq1D-sJHMhi?So zV55ZTVM$|ji|w`K?<_*eVfX4T&D$e*#F8?^#vaTUPuRa%!W<*;X+$aCzKsy%n5@v7+l?^yG*gs*hHeX=seE;W7dAgu% zQyk5yshO(FpSQuxyzQB)RQt*1RPCu{$`qAt{3if!|9RwhFV^4SuEsXLc<_}oO=u{V3{jUd-7Tno7dCX;tR%wgSr=CsSh_2*(u-)=m7 z>_Ih?`+mlaUkslc8-ECEjl^(!*HOJ8C@dhnb!?>f{JuZm z!_0@{l0vCjSn_3i+?hY$?JV_l^TSOFUlt}e>ro+sPkr>s8E@tO>48s+f=nDel_wzK zT&8&wa4h8N;Dw5@j(xu3kjCL#`Wyfhh9g}YV&>rb@#!s7sNipz@r-Ze!oK$x^}7oc zs(?}mJvyk~TH1_!2L#_rzFY43BUasvtOq#M2&|LRfz!RE#_VCydj~Uhoiy&FS zeB4>-J8YZ0))krM&FAV_F$7F~SOmZ*8$=uf)tWp|z~_M}iPB4`JqZjHHn54X=UfJ{ zA$a?+Tnwa%h`%3w@Jh-Lz>jr)BXv7%>Cmo|K$+=!`MY8MF&&zX#&LPF> zpT`i&R)`*IPR!ASz3PZ3$~b?1f`1NGeH1J7D?VodZElp1d3SL>QdgZ6K5LldS-lkU z?~Uf7u(YkO!d5W#{=AZiox3XL>6+k{-aLIY?Rp;WN0!F444<(%qlhUN;p!58s^(w}IawF!p zZ3I}HXk{b%m96#5v}gSdaF4wT6dD$galEf|$^3iET6C$BUbz8Mgu1L!SF*C@<+Zgn zC}WI$pEYjmkjIdy@$lVe8Boz>Vfq~1`X+&FqZQ5`X5)WX`vfWq*4WJo#zG=|x}@WQ z68pD1EBm%07@l#ph{R>kDf|Qm_y}C9ShrIYiC zM}xtWZ?BXV)}OooGeT(TeVwY$9KRK`x3MbMhi+Athgmb^5|;ut@CpH>rYnHj`1p5eGvVO!9s0CU8wWQXzo>L> zqS}@FVK2>ve`PwRE=t|{MC}9)7x9@%*$Uoccd9(F5hy>Iur4%v^5AHHT{XS+Q0r9- z|jK|#G68YTaauCI=(D*e8e5HSb?l@gGWRzjpfq!Eyk zkWfKDN>VyRu_#Gtq#NlDMH*?il%RxkNL`wDAHbOJ@AJ+djx)-==RCRh+H0*{{4>n$ z^HnZmacSreHeSS{DTvtdHMBWc&xh#DGk_zDAE|lSoy?MH#)pDU#2zl3|XOG8#`!& z(y|1{cJ*S*pfbQKFQF2Dya&R$Bp7;+x9~0aK_yA*!H8lg-_uN;EpVDGyCosfME>-g z-%M7ecE!#bACLV~JXAPt;Zq3CqR9HE62r~>!LnZ+L8Mfppxa#KgoU-&>(}q(BDMXt z7Al8{Uv0p@yiZ&QdUPO=*czZO)r`dujI`v!P?q|TdFM}~6xEC{;3SEgRqohAChB`S z=MG0k(K6tYb`eTgqTp(98gFL(@#Ee+BMu(d-LkAqMes1O2PYM8n<)pgvR^pGcE4f` z+l&^5oO3`(t6pdp$*hqlnW~&(61D*K;=qxbg=PCwynhhY{k5dd0!YZ)qOE{pu5`%m z+y9;lHzkBUupwTKIC+>{zi7c!mr8^Xs)2tbecef5kiOr$+WpB{p!;}!slf|^mPt+Ex?rMF;mHVoBWweO z3yUQgKya6Ni9bin&aOCLwk${O_2EMpP_gPkf~*HMyc|cgS-bA*!nJ0OR>?SVJYcnq ziyyr>CuBCLW)CPbOZ9oydsm$7kqVUnzD;qz< z%_A0!Ro+8k(GG9m)Wxd`Pzf^)Rl*Xf_NkmHG0D(^oFk#jV@s z$>M1%VDk6L`h8kfg@&S%(pZ$^ycEj*3HeGbsrf|_wtwFWmlC?iJ+~=~co$nE{aO{~ z@;7GO&!Z>scW5vBcYnfc%SuPUcNZPh!d#x$@fB|$f0sLV5~Xrd9|U>Ym@yGg@fYCO z8QzuqK0zIxIrxc(9TWF?pzuyrg*9sS$ZJVamEO457Di%2GvUOWJxNQ6B5qv_|XV;<@OAf-q z+vdir??KgpJ+=eaha`Z{-q+v|jQ~z$^MoDV5rIWv9wja^@}$wufvzizj2gfPGzh!t zaUCQR`Ld*^@AOW5fJmMR#IFIyS>-(ipzPXX*{qx2fq!dB2Nn%;vbyeW=RB;ljjpp< zh_N!FY+QA!sjFl#?a#}OoB^Mor6I?&`oJhq1jBPkytIS49(Tt>Cf3DS0a$%UrE8@d z%78mYD&XQk<4uv!2&unk_x|oj3SZA8)abG9o)`Ud`ZAw?b&&Ta#wEr_`oFhd&atB< zW6eE^JFSwD$TWp zVK%sY&t)XWsl4KRHP%00j@FLD^wG+X27OCY+XsupU67ib&KMi(=j>80wj}B_2J5MF zc>aC)P}{p>JKIZz)cwvetF;x`o;Y{qy9FDGa);mP-(cy%;NWs0xJ^qvg#3w@5GX3& zs@~!6xb5n=`H8Gmo`bQT=b>8(F0N=sK%eKikPAK$bWGnL_um;O%s|o!Fi(&wwQU_R z+k^4T(fx!eCHbBW&ePzba-9G;Go>qCc5bT}nw3n^@mNpBn5tD@%T`tCt8gw2WL8(a z0)j}(^p z;D!UR6yiZ26ztNSCS-_x3kge7fTPQ4vYyPpy;yw-wce$ap$Erq3mVFSv>-!cUqvgY z)aM8ACckv-TvPz(CP06`Zm2>+0$q&8gDXfLSmKF-jkY>c?wn4#pY)?zq8`-1jL;(v zJ@YUd`p-}^j}C#5j|wvL{IUwD6EJ{X{*(>ZNdR$SAa`olniKqZr z$QR`;QS{DmT*~2tV)cA!dpHSj1nv(Krpe!NJ%mdHQ6M(7N79MGD_i3sI?^z>V&<>n zkFv&iI>fQKxDQ;z+~0CLNH5b1c;ZEcU0uqry-i0^UrhVINJDQ&Q=JTw&bT_69Fsow zi7kj<+TJ*BftF=FALYQOqn)Fd4SebbTkQ-wr3?)l2>vo)!PIpb0Nrm1Nd20r!zRNO zqDWC1xbN>ELjJ@_$HSu@<+{66Z4W2|E#M;sWPH4vR7zLH`8W;Rgoj*Rb4>He@ff&F z`>Z+_-aQSZEGDpBbO%z>daxE!PuV{aXoi3T!5Js7k`X3d}#@LG#rzR$C~18(rL zqobq4f!9r&CweoZv)&@vO7IEsO=t0p3c!py7rBR>MB&gdY6{fomP{k%(Ey@{{^#dU zS$5z?qXkNKj+XztdfKhx!j;(uRFpl%e@-CWU<5nxAACiaF5_+gdyBcJpOy6T4w`&* zvjyu+PXq5(Gw&IZ$Yy`8)|uVD*YdZ6?l7dNTRriCN+?`W9hlqX>XOly&hIY_zt@7d0+tGDyq8lKfdSz{!y z37EhOArNdkFLL9}bk43eEcprzDT9HCp+>*f({Hq3V;BU2*(UVxo+h}^FwXR-n@g7a zZ#8&5obhgOP%?|;%^!#M1_%{Hzuu23)Z$9C)ng*JXqQbX$T&2kou`n@{kexQL``F$ z3kpuSbmNHvXi+B7U_KSj1X|(X-ObW;An50~R<2%bXh<8c*qZ0?Za=}P@w&EMQSrLu zty|CMcoStegvGrsQsq8!)>GcGFx5CM?pjxJ9nV)eS)m=5HYqVt^25!LJ#Wb^asdbB zX_WbQvRIK>0-=ceJOyj*74vh@;UyS&fa<9yjgph?el=}H2)S0=67RW+C-iha0Q{4r2D(zb?w?6Gy5soiIx7&xgkCO4B_VXBRc@9YZo7 z)pWJ{>ZPgz9nxacT0rkZG9u_>1`%8_v-9d!t0+M?`->PHe6lN%e43&>-si^o zKhc=5+kRhWs3Sk;v~`?9|8tYJnK% z^Tuqp?S8nua$?f^1IHuTLVl@1n+k{>8ekkc9bYk%7J?kD>2?Om#b2sg66Ph_E1gFZ zxi+nGje<=XPvNf|QhkDh5UW!d5n~7EkuPYZK|u>DPtkoSCXGAh5h^h#Dp78yDyJOC z)KW7&J>eD{O(GtOQc^ng8HGcS$wr`g!_s6NUdppT_3zC*o?gPo{XN2Dd+cwfXH+eqW@AaAi~ZTEFp9 zhm1f+`E_z=3c&#x#iv9b{;yj=rfYz>-P9LYgxV7ek5Tv)P8@l5IRC{uiJ@#Wiq*Dy ze2Ao@{o_B76(v75XI_AR@cBz`^@A=Q9W=-9@c}Rq{p?3>!0+2P4!M2#jnfgFy0oL2H-5lW0iY5S$6~;LWi{i z_aLB8Asz`V^72nrc0k7g?5&PqQfkrL+Gv;A_p8T$L2J%;<4qaz(ofS}C)=nxDx4iV zZC4Wez<|xO|GcQJ0KL46{?>}4))T<5Z!wYagnh%SI%JDIpWS6IxmrpX`Vi^#qn1jb zzhW0v3%8DVl*2Sb=Rn^#!CdCh3mDipWptOuq&kW$9(KlZw%tir`{j1N1e~-P6k^U37dUcizqL z>lw;up!SqFzbw82X@*6GZRc(7h9A{E$5tQfmlAWN6A*N|gPUwe=C|eGe8Z2EKyA9F ztpgeJoja}h5Wb0DrWSZ<9g{B$v^3`B3ianq4RWM40gYsvUZ(M+? zMn|gje~Xut=a3C=ugaMxDn>4pJ^4>1OV|&4kV$Jg58LM1^Cr%NqyU=|D!50OOZLmg zdas2cWDzs}h?XJfuryK4HjQ0QV<`!?{XyqS8>n9G2 z=1afRnD5K28@-XCouo2*FWiUa&WH0ErbDE@1b5( z55Nes68XUeXXv2iu?8Iw0>aHJ`qtW}j>l}|9^3pci1mpOYC@;Pe*P_<;gVuxTs%W` zM^Zc?-t%ji)%4Bd(|Wt1NzV{+3O176lp<&|tAW2W@=<-F!qq&Gdt7B4*UQ@h!Js1a z%If3<@Uz5vdga+%m&3zp5R8m{pZj$iOt1sl^`$k}tN?IKx!1SdwZo!WaV~>!ydOjZmsvE!n6UIn$xb7&`~*4(NV?uUv56=cF(tGOpn6x&V?6#9ax z+c!?}w3?NLVI8=m89}EU4CCA18|NvtijI%_W>H68dkv%}tdKH?fLQXz@bXHe?e{8_ zeh;&;-$08UKqHC;=^Ia%pu5jll{*%+D6=0$uY(HVF6TO=klK9(rlls|O1f7B)S0vq zuW<+@$E`b#`1oX>94ytQFQF?P?wmIU{a#{0H+#AEjk4eLqJvXF&mC?8X5pWNzmWn! zZK#ks`ov8rC*X+%+4$JqEpk|p(vp_bF#C!Bna||=(NxIO3Na`b* zR5a^?DVwdzeX!P}f?%ulnTmn0=%jc3@&vtPied=U#mUa$RnI{{^TVze18u(nz^W_-I5(sxLzS?cC^2O1d4fB+TzF?D1GHDG) zDm>^%cWFTY7FY0tf_w{*l8Zzx_riB&QPFerbKjoQUlh44(fLL|qqr_}2M9!#;Mzk? z<}#B}=)g~kc)bBZ_3MVqOMG&@dcRM>yNAi0B1lT7+T&i{`E*TCP#Zja?<;q=PxH=y z)ePdvzjQUG@=I+ki94Ny(n+p%RR}CMCgVa2YRetUM^2f1b4Gt#`f^sj9%4f(NGUD| z9P=sA{X{QLSf9U;e4Wne>9M{SKb{PPoBfg|1<$Q@b2#T0N6cT(_dusd_ zS#hS(kI!Ooa?1NoyypI$VeBU%?o718Y7^28R`>oyneNO)PYCEIKOLXdn_^hp(iM9f zbf%oDw;XR8)v~}dIjQc}ig|aRVI=H;lvO3X56YdPv@zT&(_e##O*Lqf?pKRjZm(NR zD1$mLKwd~oH?)7}=Vz}=d`xxF`r$Uf4!XSY^X0c*Zwau*Nxr+B1M2>!twG$-3XUZC zIZ*f{UsH?bb)D*Mc_T;bX%(iBJ|LRZK+WyPmDH&<&Pm5Z;6FZc@W0t<>` zcLQH<3yA80NYcYeb1~Y!{Js{`+T`s~*lqy>W=+%(m6$3AEvj}i`sH^*gh9AS`<-QS z#B>&s|Io^4pe;`q%4u-OjmBE?ztEf|Zhaq)>I{hK&k`AvT9^v9ij-a))Mtl?SL`jJ_h zY1&YqT%#6y_8BiPb&w7F&qhOJ@6jWd&vAoL0pzUsLHp6}*MpRM#s2#3^V}l5ytBiV z!$l3hqQQlGZh-0Jc_&811$`Giv0~H#Wz^0YUW2|If_gYTNwg*RD*MmU`-DdYX0ZWy zBOM#dKm24SWu)Qb{c3&aM}B{baQbaQnjRv@m>PB!PG%E`?|F(~y4(RXtuH}Q)c~Gs z-mMjd1<;{<^Zae5uR#2)S;+tkms32^>|NWv%*K!PeSEuIY$(0nZziOH3-`!bom4WLw>;3Z~GRM6gbQ7Fk z^qYBBrKE}iEOJbaWP&En>ot%9jrSQuFBc^p!X`_2B|bj8yqljp=s%-Ulr(#?hIOAa zY#TW!WeJLDqrqpFE*j$$if5Hh!MSA1hkQVPZx?Hqj5XQ$agP^*RJ`$Q=DadK#4qr| z70fJ_Mtvu|zf0|IAEhxL8t_UQP8)!apaJ}Dlov^NMm1XCiZQf#4&oM)AmeB4jUjAo zpbyc1|Cq}SlwYLs6DUR%Fb)E2pfV7Op~prQTv|VDIZs6X>WG&*t0XLLJ_&xC&ryD% zS_;!p4Gn=Z>uc4e4uNT#q)*|98d2p4AOJ?{;i_%WqdGg`rhn3H!*R?V*-n@wpjW9u zN&$kHnuTgZVRg}Rzzn&)+ZUTacDx-W1HwdNZgkz`VIIN@8;YJ6c*JNJ?IIku>5%6FFx>is^j zKlI$VZ@Ni7_m)jC1i{)+=e-dfskt&c_%D{3x@M525IOyq_%ED{;Ps=jhGOL9RG->c zUJsGOb?Jwo{ibur^xwIhMgCS zbjV_U+e1pV>M5x+2l0|WU*QJBtkCwN2NLeDh&wqe77fYTeX~I|LDa90eI`*Zk`AO; z6^Z!k&Um8MbN$^*fvcTKt&)0r@jA;;2W-G4C@Oq9WPcZqj0p=LIKxRrPN7x7LxDQ* z-MfMZH79^<;AM59{Ir@m?wV?`BvX(fp(qxyPon(L0)iu~LMs>q^BE;8v>2r``c{&U zVj24yeiux}^E0bM%c{}p@#DwOP*C(%o}R`_2Jd?7OXkqZ2H*jVyktzI`DB5S+W{R0}2`U5RRu+E4p z9F$Qc++8?vMrvv5xrvrwrP$<88%>Ns0})+G9u&=zf?G3ZEV`x22g9R=wMw1#OrB6A zq5QOEd=iBm1vd>76W#xMa%!;ntzq{C z`P;7MK=_*0(R5Zb>vU$3ru^YFOyz3@q^o- z*7iM)&v`a&Jx6UP6sL_D9AY;IZ7f0xEU24-Eaf!j(h9ai27b)0Mtj5EPQP}? zg`K9ZxdZyd*MeWa%%ve%a=L9`KkS?dFoqOlfnpvkyPRB;T`9THiTkSo8Vaez_s?ad z5=wTGzRG{o2|qk*^M0r|9fLyNv?-WPjET^wv+52&qqPwTg)&{IvSs4zhU?}OJ+Mul z>2KCua+HN?%*Fa(uZm0&hH@%?hN-u1uC|l;@!azJr(yb;B1rta{Je4Wkirc0k(6Ot z#N8JWZqd-OuFgDt_ON`D?*lZ-5|7!)ES-mvA)LOqpp5n7?oocc+{colX|+b3F$xfR zoAMy8Z*YiP?fP^|+rvIwl1>`N0Gt{cR?J@NC5JW<*0Y8J+JMR9#Q|yK?QUlIR3hm( z@L_`EhM@s{;$` z&$y^{B>}~>ZA32^Ojz^~!+{)7r(3@F%~lm$QQz}~2uaKwR=e-g^4NQ~E?@v%pg~Dq zK%b`;l!o3PSD;_*NQ7t_=VRev8v%fs{ec^@nalh?sDL^hEKd39AHRMora!mYejfKH zU?YshjKVy1s86_cH&P%(|3cS?F#W|Ao8ieTQx}Nz!>$T)sQ`9>vq0I5RPrhP<^WLA z+fr)Afl{iw33VGJyd7~81WhiNFQ3>Nj2rss03ctYR5X^DUT5Ohj#L#U*Aj=hIHH?^ z8)FZPS9>&_TN31nhKsF2eg!(=vGwL3l;g{p&5iUgWwec^^a*5aLr@ej-A%Dl%xX@a zS^Sd(SheqIWn-t{lw25nTS1T>%?wa{?>T=K1Rj%zkvMQhf~dJdSFhWbEc>bCyb zD(|P5;B?G?#b_PTs{Z_{X_o;#4=9BIPYBKz+B6ge;a|-1Dim>~L)jCs3c5*+-x4vI zd3Cyl4801EDllw+fUt@L@VouGp|pTmA%eR0!P8wM&+fDB?rCaIQW4tnKGm#i8E&JO zXh8Ar7Z`|__>05`w%L0VKo+xF-{KdP&gXaB0{dsIN2J#tJeLh!a=#`Atrxq~jT9G%Z1*+Fv}n(kew*t2iC!2E$@E6r8ZPx4 zmv?gYT$4=uDEodfvo)OJ8$k{@t{b6RGz7%A>`z=?Kfwl@xk^z)|B#r(o+m-R-De;Z z|Dfhgu)Qa~cK}qw!?hRsmh};;ciAN+Q3;aIPs~qVaAAZZLVj5$%0m^`GDA9^Dg=t` z^~qNWC4o1!4BPxm!H5hj{98;dH^L$dvuq5jrs8}bHGKc%;hhPGOdtZ^p|1Kal&Ze5 zScn%PF7LZRonbSh)>mW^XY__ui?<2P4P~>SrR6>o#o=q_rZd~k0oBNh zZF6(-%g1xbh1ZO|UYdQC?>;k4(Q0yPX8*Y&`2c&P_;&2Fiu#UkjKC#{|*Xq2(QIF@H4;Wle|sxFj^%= z=dYhgTrWj~HA5o=+U8Txc;QZpPNn;@4W1_O7JIXf+=h|VAFaLJlCAMc!h?O2(-{qX zI#Yd!r&>a|;uC_NuQcpV84X2MxQ=|W7~m?3&dRD3G}6S7M8yT&#V5N#38DTqOhgSI z1T}cIR4ls_Uf4NF?pQh@;4U8u^uGNl+A;DcJOOvqN;Cu#DbUQ*R6%?T!F)J++`z(g4l4%`JsEhHj!445qcv1oQ-LJez;QHN+gQih<}hQKg(uu!hZ1lJh3W_t06Y%5uj3o ztcvl5hOF-o#fbY+3i8YINT?+X$*Ui#n_7l81I?ZLx&Kl-C>?PP4e5c+;Alc8Yx`rD zTr=c0u1R;peuz$YzySJ3vO_!S zCPyf|`GZg~RC^DaNO?7~4~1#j(Om5IAgjwu!yLVIdAjX`$0_2a%G7HW{XlC>fF0X{ zw58f7)_FPucd;XT^kgEcyw6wy&y@_iAAlj8S*aKJ88kp0qp#xW(%9XCPeBMh2kJ9k z*EW{M1GGw6@Ag^9#t*?fvtv^4a^fY8wL}hdsbI0Qm9;hC#zOg36tY2j;5?$$+`(;g zIraxN>JfL=%oaRwFa{P5uMX1o9dh~R>)T`6?F~xrQ)EMPVx##a9D!#U56nwJV#N8B znlQe=R>k{}!pHvAYJHKa{RBp)DZpXScZgeQmDc=N;mJay_sOseYF7_5leo7rN~4E2 z%hRAiHi37- zc(|hc6glq~TbGRK_5_)`qXMOjJU$|D0|uODiTc=l2v!(5XfYi@=o2$89UT<~WkS$n z^|dcSi^uGcgK@;2oiF9^s+w|9{6=MEHZ$joq3Cm4i_>Z8D|b?YrM^`HO@^g%*}8K} z1!PEyxsT$FW`UeUrQal|tDI_)cgQF1)0pkgmjQ=FN|F+_ePW27P9)@7HXQ|Xxdm3e zFQNvcd$Xv*e1)#xb!=}aQ<=@?d#@EllCP|UvW1-Jt!L3nk?kD74FP}Tp7DGot@_NU9i}*{4pw%I!P{)eJCf5GyW4T-bm5) zQmZi*H+_R_fNdC{7d=ZSG}{J0Pcgx*Sq}C5X)6QCrkP`Y+^}UpVW=fs{f%VF)*Qqm z!S#~MPm^SIP96IL&l~_X^#%KrnGae}^6SYud_oeP|Ljt--$c#qmG}0?m4}XZNadZJ zCiHB*VRjJK(J~k%=_x;EL>bpg9jF(o#6Z#$f`zOY;Ue5#6%6&FCm&X3`C5QF^8|kQ5dbE?c3PQQ zY6g>SH$d;;;80*hY_(91ZiGgb!6@<{1bDTE!nNfBdro}R{JGXfvEu@GJuoK3Srj*( z2wP6H=ua=#T+oq(D>E_+{HJ7qxn`ib@dzx_&H(bzj`k!q^8>T&-Y9Bkoit{ZbVG`< zY^$!13a`(g`A|PnPWU+@ z&y1XOW%L@=f3~HYa0?=}tg?Sv$>9D`?uecB9~H!B*{ah{Pqb{E9;bRqXAeo9d8Any z+mwHDdh*%2S!8zS2nlm%C@1i@%(ZLj^f?bcmK7F85!wDK`79=kg*pEz1Zo%Za0kwz z&d+|v(%=$HOVSfxwvL-b%17@cF4CXtXUXECS-;$$-K{6(CFrzhvJ!H zoM-X0*VY$Xe7F|_img1*!(qQe8EZp{Ad*s>y{+NBU`Y5#cYvQjcl5-$tKQv&o}SnE zRXh-M!(_0ifkzxb;EOfo`}It2GiD78iGgDjvhO4)%AHte-jiKr;M*aVYrk2BBd6?)#)wpW2($=|JfGs%JGt4=KFR5bi>g(K7bDoR@3 zg&+~3zqko|9z9^=Jk4ni2p!&HFEgb3pAC!42iZPG1UqUPF*lI3^k03gjUg+6emluDy2U&$`y6?ZDUX^;`}+RZhf~Dt zU%GBHp$)|@p=}SeI<^&qz`SIC)O=B<^6d@|9{K(`@q-f498hxeQIN3|ns%v^0hoRlYWfqb0FSHPC% zNN22=6`~5!*T^u7Q!(2%g(8U`q%`3rdxueI~JJ@OGZ?u?8|Y;Xy-$=Qlv)Op8}(5M6PA@Itb&+HkVR|nB4kw6S#gAB=L zjRHKM^GpYNIJG_4Kwr38(glw4>WmvSG|eG^>SC9O`R>^00Ajkq6>aR+a7`B~zm}~6 zM|Vif?=sZ1>_WgCOe-T8U=U@m0a7D#O>(gv&RgR{QTB-$fWHlG()J4=-q z@OiEWSeAKt>K<=WWd?MYV0YOb2%aYy(SUdg4s|KcE6Ujqj>c)o!MK~Iae@6iBK&fl zS>{aAT8ECbrSl~+fI#bAv&^cGn~-b$;5cw_6V5oPr{No|QptZ+N^Zz}+=jGN1y;#z zrM?#q+bNgE7VO(yplcl`<9yNG_6{_-qA{~oZd+1zw(nP~C|_YByXT0%Aj;6ST>i>Us-n2!`Yqg(X~j1VvTrGujqwW`u=QA| z$Vy4KJ;)U;w4$|WqbXe0MRwtWl!k88rdT{AT~lB;DI3M#mvc9RAN;KBH;_CA&1*eL z345?$h(Z$Zt}PA904DVA?~#O{)KS&~sx zPxP~%)8md-m1{r2n8`v%Cl1Og9YKODZ4hh}q@yx(e6kv>Tj3};hbL0Zoxur)dSVkz zP|65~NmbvRvOxoAayx6_=*zt7mlQ3WsRNT-X+$pIqGN;|xUveO>lRK4${+ORTTo|8 zg6N>q#`ua2R-nHtkFP2#@EwcwhalvY zh7xx~mMaJn+DTL9!b*#cqalAG5hA@e0J$r|!cRnTUP*9gK-j5XlbQD#Eu!|i}62Ge)yk+o3MLt?vZ8R{Buyd6Q2H<_Q?N` z(E5(ZbBG48wYrkT>x z`5JZy(vaaBFMl%;!ptC75x+vkq2aJLlDuLy*(Nfe@O;8=Cvay04-js z!W#FgAYpI+_}Xy`AY-qOK0f3ekJ7hibSkRZhX#MPMzg(p$5qyG$Y?ekFaY>Nyscid zv#o~OSFQSzyYkQW4VzZz0O@7;eH7Tw1>H9H(ykg2P6Od2j~U^a)FUlvv&_Ghh#{Im z-+?y5V4O!}-GM;CzLFpljzve34yw(xQ(B26M>>Rqjl)6W#%VU#JIWRB9t!k`2HaXC zt1XE39OtZGfkT^bI7=lh7`g*DCAGp;!SppOG>r!5Qtodq02d!lpGu=eS(4SvwR_>2 z)Igl?G^hZ<1YkAXzGjaz3P^%2IWt<~>Uyr*&R(>i&91w(Wu$Uhpwehh=e4GX3PSOK ze#iGnI1P0jy}Hv5NH4+7f%o!{q(Q;&7Tg}SfD4-G`Do2v-_&N6t%5Xcbv zFS^jZ1m=PDMZ~}=YL}_P49zG{a-K9OjFDl_^=v(n4&}-H8+<>N%9%^ie+b_s+vbdjNz|Djfh zr!?r9bU43%xa~m;w5}!lQ+OjFmmHi-3UUNYXTqi#JV$9Em;A+(Rftbw`JU4#7R7_a zn|0E3pl=`{axcrD|E3f8<;yLZ3$9GdlfroRWxJb{p#qKt*L<}-99T-Y?^eSXH$FeU zps{(1Y`vs8Odmfb52#NgBc3>VDfh|oUKL%JCvw;hU59{492g|SMFogpH&6O2TuZ#_Y`)ep4ddb6LWM81rulBQhrVJ2?^H>sfP}6CES}Dt@?fjt8Y-fAk zdjvJOH<^M79j=re&z!A_4Q`piw$%u(*PU*L(ctEN2;+MtKvR1e z2PSZS#h3^w2L=**oYEn8&x4?cFoSNyP~(MoT6@v;jNo6umOeWEem=mE@MhY~pwseK z!3iCZIa^O#7k~EkJY}73TtiSt14Qruh-QfQ%lfM@>H94m63#y3C9Z5+)o7eT-Kixyzzz_4kj*pN2LAoO{)tdhQqnJs zi<*P8n?Pur5(xnje%?Man{2=l$7Hr%gde70TZ>=Y-E8g(ja5(81q zsh9IMTz49;aEp(pF&eJ$(ANSF(L(I>E)gsv;07d}E@*$urhzdDJi8{Lgm z$5?-M$?(Hl_zfI-N4PY^M!&2+od0ICBm{^JSvWi+InN z)fQhPN=0@QBE4w529mG*GH%Ek5eMKVAU+2`$!3NnxMW-w7l0GK(%*LVZC2uavJ8_klCC!647~=$ zmKozi)xm`Ff$Ob?eZ*#zYRiLGtpnQCFh#k_mA_QnYf*yV zhRp%t0J4c#UcZl)QYjv(qTZKRrwdSlGZ)dR^1%EK5%*?wdh<%ZDI#BO7#uO9nWI`M%ew4QK#9iBCh-1Q?xyL~0 zbnoc=uafLfKKmIQX64-Iz2FyJ>p#wBmVUyAHQ_!Bxs!86!_8@wi0)hD?#<1@u#2QK zr>1T^k~**-OmI~c-)H|)5@N$z5E0fpfm{pj9|?QQ9YYLj2mp%GmHtS@nH(Erk6a}1 z&zTF{&B^f(P|%q@-eqS~vA~2lF3XcKuhFkCm|p%;3Pd4YnOZW4C{m-lhPC(Xtn-U% zCpdropFg)3*PVs9l-+Jow|%B2KSgV*BjMD-&KALXK1;@f-rykw@6f}cGAb>cv zd7tqO)D+}Mb=p`84L-bb9Ivp3aJ_g{m5hy!7`q2^KXn5=7~zM{>G=m5;ki|%IeGJVj@9C`7`g8k zigC&|#l_T9Vf8e&pk%&lc!Dg$ZF^uuli1?0@o24c$YX zS|S%TvlZW6&2oLQQld_VDd1SaoK?2;=IF?Qk47!VK8fPb5d9exsGQZnN!)>H#*6(9{^}_`#%S_FKcT+3$5gWqKAK*?- ztR_MP5(w)E5};q{AAe7K(Wnm}4HuV~Sy2JeD`rpMU}X{KvEy z|EFD(Ce(_*5fUNg=oM#&vj-j$xzyd(PT}qQemdV9q0m%pxkk31rc=%wov+w3v!6=1 zWVzanx=4hu0Csr3zj8l0dk`=^B!;eafe_VkyUW)fdZL+N&+ZBcm;PF0WxepfA!s`C zi1sQ)Lorp}ucmJ4{c+6os|Hov*#o1he%0=FX?_MHqx%Ur73$DaOV--tK_O*YXDbYn z4jnPosbTRLNAR5ZotgM)y94~vfZ)$0y9=&+1Ieev4qzn8i{RDu>_a`-?-4nSvv{Q7 zY~^h&*rmPo?g6cLJ4w&R^p_9TEnlww^U$~MrKKNUxF2;}2HT@ip??`H= zNQuVNqd>JyHTzjD)bz{t{#X1!p3SL+=(it~djx~-UG;t0^L*)9OFG=reQz)jaEnC7 zSh5rUeuaajD2xwf{Qyxp&Vl_Ke18Xmom56qC$ijx@#YLqc7ONzCe#9eG-KVdK7mMZ z^Zl?ztf#;K3W1=SD0DACGZi!Y>>I}V z;bHGP(V{WuZ&R3rN95fgo%Lh+XG`oqn;#F%9(!Go`aURguqYQbg_%Cm)E2Du3Y`<> zvlWSq*e0-=SFxUzdvrrZc4=!Q;Q*KSLEh@2-0EKTQ;N1bDy9Q&<|%Bqv9a%9-=y?^ z9;OigG{uun@iNzCE$jptHWCMH>|6063eRtqcnT9@cHES~^?QCKR+1>*$!)SSYE-B? zf}J)<%5oupXdE4xJ5ap#I_LA?@}dZXFvbZ5H_UUA)tFEz7TsD~7zKV~@5lRC9=t~D z%*8gypSE1Cit)*fz($K4{NG%)T zBdWh2^7r>vF&w?vW6}8VN8;gQoP$4MPj$lHAneioV zAAK=DCB|aLmP}0*i4~Gnw&>RVcIih6@^e?9l`lMkkU<0ehd=MN{|E4jH!jEQXt{Yc z)awk6zarj3M(X!N9Q>9UoOzn`ISTjUF^Z%r%5_e&D)UDQ2fsu9sah31xDX0n&7F#{~3Kz z7=4~#ARQlXazI4Gln5BGo z19h&gUA6h00a~5ZaFXGne>G3Tv6|zD@6SL}^-=D`Crgd}h5GOCyPZ9*_l0fF^0JHB zi~pUNj!+m-sT&G&$If@oyd(+`{_^j@Ffi+?_to?$?jFOZNg45J=brK=Zl`_`&AOUc zNYs`+D5glRwD7v<%)why+9R{9E#D&kzcX;|$#sml7<{2)qb5mTcK&yH2y2|tRil5M z#MR)M^4N1_#l426TnfR9Y9*Nj;(Xcg7D^%NP-?7JC66(!cNg`^At+Ju`gNMS<$I8=2hS zarL{5+4GEVBT-H%!yqHo>$&opPO+DXxjoZfu1U+i+a2flQ&mXPdA#fUs-Ps_uw|26Rz7`yn)-V~ID@_#?}@R++wsH^ZY>*)V^9LjOz42U&7e)nnp zOCs612lqIGnZv@qsPjbpyRaJQbyJxiDy!6Ls7a^_ZCBp4xFI3ZEgq4b>cc<07{+Du ziHut0_y|m9S}85QqPk?*JEf&a9D~D69)BD5NXO4&3)KG^Dhy*d>=wWMySfiXDn1>V z%N2cCNB8afV)%MWJ%oy;B&yQv#GHuH~m}5vVz< zDv6Oz>88GUIBk;ZGwS-??M!mJr_uZ=)Ivl5P4<@=&Sh=Y-$xD~iT=ERFEgW`*3`E zbY$cixOa_03Y^W`f_? z-X*49jH7CvFT8p$&b(Pd8}}t$Dzf}CdEZ=Gycz?KPx&@OqqTH+msjy-}#?)4nT9_ z7ij7|)qUf!ADfOKv)WY($nS2h&G!8Q5%y&|IvnU-_8H2x%AMyJmTfpc&}-om6luaKGl@(9-LmBXzUVR(Qg2!3p zxg`mZ%l)O&7{|YFfjpM5#%VY-L*=AhKKvNb(AYRK3ip~`SWxG`f01(aXkHNY+|iD+ ziduEaIX>l$H|Q|pQh8Ids)-KIC1NGG9KG!4ogN=7En&;!w{DS1xMLnw?lk>YLIm$1 zpGiv=WiVWEwWz2lc@qeVSV*DL4&S!xL-Vy z7W*)t)B1%32Y>y6az}25l|`8)!5bLJ4Cb-I3~C68%t!xwY9NE#5rz!)+?OJqe;0au zj9a@-4~sfy_YdI`<>G7;uD3)gyTcHnT-nQ0of5|sN_;eJ%tOF57XST&)!~b{VIkME z+ZWc~hlm={)f1Xv4TJ@oOv)x7>)2YHVf&to&i%(fejIBDKwy8a+ODKqfpdp)#-^=5 zMW?D-5>OFdy?x7(b^5}Ew_uo<+;i&WNl|Dx98g9eha8ubEGMrf2L#LeI{p*W9GvTU zjBt8u2=L5@|NEW|%(^QfHfyQVG|T62kpEu8a`1P30xq2@6gxPLC9yEg%*=Yr`EC_< z0_m!H*!6L$ko%qSAW)peX_catoY|&~MB?6v0UJlW=8ZP2Do{9n?(7uhZUr6nk2$Rd zr*ef4H*YwsyviJgCau3VCgT1)bH)jHW>tBgt;EBN=3u2C%ndyA|ISSV%~j-bPJC<8 z?w88@PtbCoyaMa@0Mn=<6qSp7wq9oIbrb}*o;-Q-AnEo>(OLS-en5)two~nq;!LuS zxlWe23u8Prd}<-;~CE8leBJO)Ot4FR{<3qq3q&g z3uW&S$tWYTB70@8_??&ZJl*&6{r{iW>wZ1DZ^ZS!-sd>Z<2cTYVk$bi;KtxJvxGjg z<~RXv#VGb9LJ0_2pBw;ap6bi@w%P{MjI#iI<+pN-@%B5P!wd9=zx~ZgBbBDRAzn29 zE~z~UkynS8zWfKb@McZpZJ!^gKTjF%_jwLRPur~{Ii>@@qa1~POyr;E66S!UXc4o9 zmxs|S{)W-wkeH3VJ=$^>@{DI-t9w#lY9Wxm*aQj58Qf2TftA|DGGNaG#f{^z20ef1Xfk zGMsP5leV;jVsOM4|91N;I$#<|!YSw5&+@|-+F9R)ra~w9%`cg@P(-nhdVz~G4N$ym z!1m+8{+?^*4*0Vm9WEifp72ZI0PJ1|wiwY%Pb${I>=OZdYGCFTBI^8btS5WmJuDB0 z0c<*oT|vI+IS7odawKaNgXv$QlyAU#aZ=gh*qFCLg?KGM_ol@4@=J0YebO>78$dJg z6KLbQFW*7>6oha&_zMOk-+?@`w7y=OnR`mQ(j}MiUQbrvyG1jXjX_6#&Lav&^0ne* z3wgcY$Hth@2IE_i{VYeiU*DjCRBQwzbEoC;r`Oa=OG;|mvMC^gMkTt+o4$a=GCEXI zMM_4NtfS;+1#9?nb=|+Dx95X91}iO}{S2zmdcRVr{`S|eo%o`+^fJ^;v2k$DFBTSq zI~KMtKBbzz8)WZqgJQG=;mX`IqI5%yBFHOdBk6IK?nGGM|FjnDVfaXGLSE4ME_~%! z$OoU+yMI;ZYS*i$xaYOdYz^;)czy>##x{Jo%6CFTXnsf16WD-X`Qec_ra1ll-u&Y7 z8w(#Fj@{i|o4LLRahDasl3+d$bjA=h8CfhWkcFJcfu)d_&6pp62mr`Lry&Ow2;I!q zDH@sVJq9A$rgCRPPGefUK`1*61r~stE&~gE3B51FfJzH*z^flMVK1eQ9ddp--WZp z#l@|LEbnLg&Y~M166FIYef1`I`;6+JgqJ=bBMi6T>i2k(jYGCQCu3@Y@AovT-CP7B&3VydwHSOjl>9jBh{ z&RpzxQI)=8XKp(u4MDW}>e1u&v_7{4{*IgnMT8^=5?X_(aEd%+? zpR4iv5cx)nh((?+ZfWV*#Q9V;MUZQdk}XGa&E1+=1%|NEXstD>wVy!N$JaLr2x_I- z8kq555MLz0(jMZ34ww>mfNTUTVR~$WeC$}VQY>LI)0lYGn^s58ixI+kukWQSqG#11 zp*NM{!8xF}VR&C@b9-C3Fvs)_kg$_su7i01RGAu>-!dM3Fxqyd&|d*DKgcehRjJ)dAFr9s^OrQ>cbm%(*P9 zuj)Q%Fz0oCrIz;1Zj>+uUohmpY%LGyBWhu)`(`$Wi1CH6JUct%2xr-6IB9CIiSEI< zG)EK^8Y%}fSo=ZX7U12yN%FYPexqnn5RsIPQoV#q_Co8NJ9Sg)rsao;=V*6Q*)!AOdlJeUh9L>L^hhtiy%J~U1 z0%H)}6N|kmm$R-w>ulIEd8a*6Me@wbmAhSEXenAF*+`hrwoh$e&@ZKc4(wftKb7k% z;-mTGNYD{M+DXcPXI}uc5+H?qa5~p-eXi;>RWNJd>vLoH@d5uRoS7nEfUcEIg>>nt>UIBmCe#y0P|$(^Bc!x`X1gXqu4}~CSstS>v&Dt+CEmo z6MSDv9#8^0xSYgV7+|*eDlQY`e{14Y}5_K`!s?}CWO?Pp%QG-< zPiadVr*4fp9|-PI>D#c+oQmwoWsAj%USdd`s#bJ@c?`fbL)%AAa;v`%s*^C|)mrCj zPqd`siWkVr0zu=w2JKjX{}bSFc6Z5kTL>y5)0Hj5Rp)c-Vk3e`L^%hMXp`$8_WYwP zjYoQv+oDwe`_89Vv+So~wy3`-s9Gui1&{o$Ae341tPM}=>FLo9bpkIqh(vrUYsT>R zeTDwTQV#Fk6^m73F1`0gl|LXs=T^On84;4$PN#yO(e(85JbvwiySZpwc`+$~PNbN| z!5vAio&*`<>X0CTsL&*OGw{^KJfj+y!E!L0tAUGSDL%U~Y%L9at(_}uj}LfC&UR~S znX*MJqZ0HE>UO_?Vvtkj0C&IpQY5xZa&ofG^7wgaxrBzT6)I9HDu1YyGAF_; zx#nxEfm_MU`$ua6rmv0@&6vO?DitcTn-0j!yObHaNH}?S@xBw%v3XoX&dKfnI44cy zoWyD7{pr&RLQs9ihv3M;XF{QV`qz)a=O^1G62~s=jEnzO3><#=;o>Dwf2~e+4%+ks z8#1l`I^^6h*}7Cecued$d|A8 z^Sf>nLIb6Vr~gEu;*ovy44Z8C{o7gmOU4)F)l+0|JP9xA0jI$W2D{k{#VC&q^*3Cg zG`U}CcF`P_?86B^#hnE<%4p^I&0PFIQ{5^zv59uWL~L7HpypW009DLR-^6501w^}p z_8IHUXu}J-*RMm(Y(CZeP-6!wt(t)WCC6c2Ty$zPji8f?VMe2Q*Ukn*_2A_PA0#36fnr8Wn_d`FOYo+4~s3g26Nz!;P3>AjO|p*pFgR>&#Rvsv~Rs!tnmT^ zCQBHVcn2~wp_~3kzCJ~cP8N!_O0qwd@_&zT67Vdx7wt{Cy*Bc`bm}JX2T!5@G^YSJ zL|_N)Er+1(Bcxku{{7`)e%M*Q_kp$Gq5RT??ok9dr!n#_2X$8hF{KV@5g#{LSsrdK zahFzqgvg`vc48%f^VkWuAXwR=^-_~#)MYb?0f?p9GiNC+AkpGEZcMsMu+l9ISLLEg z!}0lm!G#VEx8R}V6eoEkPb!6VvVXl>)Ao|>#Px2|3~AVUQR8RKiboO6^~;3;;$*cC z-kAMLY3VZ`Y=Nwz!Ln;36BDn?F4i#Nk-mtH_w~707#&eqLq2Vot-}YDXk`I2sBh8T z9&;7gvSJ5S2LF$%v*A1FPnYR`}q84 zLg((5;TR|$(QDxq!27wix-h6DPz=>cm`BVN2?+_{ZtaTk+gp7tpj3_*G^QT_5i6{Y zV)lRaV=VLpv!I~X^gYxMb0{*V`itVMxKb*YX}P%CGxzLfdnkd3*cH6CdLd=^g4VrH z4#!(2LgMqVePRLS=Ss?OecRzfOx;;2e8dy?7kYL zV`p~-T#T>`4GlMU+%smFuVQCdC0jDMejxzrL2VHA=T7kd?6$;fkvD2ApvGYaxs3;R z)oO3sOR!bhU2bKf;6TYcw(~A^f&*C)7=8A>(aQhMUtwZm5(XP<&>& z^DO`|bLr>ZjJVn5A4;Y7LK!6d;s$glW_!Ghi$C>#{q&iOSp+$}&3&Z)K>v-`9zqmE z(D{ji-s2$hVZShDV(O%l21zJ;ItcN}H3#NXU@@T#g3ce;gN zB)E+NHjIq-aU#?pgSg8RQ0-}Dj~G)FbZQ)jE#0h0Vg5dN@7D<<_%y{L>I`9%Ty2f{ zEfAUTz@CvoQp+bb>9tP)Fq-c#s>{-z!H5*VDwhg)(#ob4Kz_Le`!X;^tXXR1?*qH# z0B2|zs(3snmfXEV=EVUkWpW;dc2YO9;Ni{6|2UiB5fud`%fleLOo`C?kJJB=&d70W#0C;EujSFY4q0r9=`qJ7G*&?}O8_^A8l_4bg*RfBXQqL^X4=0S?RAj+S7+152IY$Cg}Bd5r}+Y zS7}8Ob>~BPJ zW+}WVKr z!})5=|2g+%yom1t?kVP*> z2qa@w6f74|ZvvAyz&YGD0c;`@?ddsk5B-Ftkj0qRa<(gt6DBMlV}dc0@{+TFM<_P8 z7AWx1laSmTj894`Q^lapXAYFxjAF4INwp+n{G7uy`|Lu-tIR`G*lPC>!l|@LUJ_&j&wm#pmq9z=D)?DSB%gV!BG1gC$x5 zuNKUssG(rtGi;AKPY#uthjpd)6ZpiLiCE69#%ktM9>-i1mI#bi|!_lUWUE$ zgm8N`2P0c#z}gKp(HhMu02J<2Y+IHnW+pQANTa5x^O34W7HFUt6S}!E#4bjkAA1VG z=u}S>3hQfaGhMES3g!skl1HlBzNzgUA!{EW!M*#?De(iL3 z)(gVOC}6n{%=L9&-ZC*EY`-cp0a2G-w>W;Lz$b>Keq3I}X5@&l{c^W`(T4aL^jNq; z5QqK;dpvCIR~be4%JAOHz-ESzJFvBc7T!_JKmvc%#N6a`vvwW{a6NuvpZ@Z}$?Ox` z|I7zgnE;sH7L*hJ$8e-?NTZgf*&UYhMXhd+0;S}QVn96 zfbydyZZJo(-G}#>{VHRt1NeEMveI*JrRYm@(1P>fttc*iof6Yb3sosMF7O1w!}I-m zPiWmDg(9_oircR(Qj+9YiEyQ-DnJ= z_WseDupf^=Vr_8=Cq}N1f&E@6l-9BDA~`j=;>9xEB^(xqg6YL}DIB3jW0g0J9j0#o zJS4O%{{Mf{XW^vrUzsAq{{7USx~=M@vFd+8^jefgXu3+;e@6pJ&q*vE9;z@HHK!@9%w*(*ZCRn!8Qxc9S!C5b8iVc9U<2{Rqow%=ZR;)UV3DNAxZ(E`+@y!NI;L z-p*oJ(jkfYq{WXn628iO1i?7rI=x8uYozGy`cK)bK87&stux-9r z1L%CE-Wv^$6>DG-fN87C#P2mZcNC4G%N>hpMqx72mgNqQ=WwE z$eGZUuE(5%sfT=0wBW_x*!k}~nniGZS<`*W_@?|N?{OF7e_uq#I(%47cBIZvEVpr# zy&gm`qq94nn4Rwp!^yC)C`8$KfeP>kKmleO0lgzbc&#%?AdbQpeb%NSXabI0T7^@( zgWrbTt4=~)-KoDn_m&B0aNZB~u_Vc`74Mfl{DB{PG0;|9|P6%B)wa;&5aC{9T)@eZcXaLTqb!n zdY=;L{drZWxeT4Pc^0u0!cVWnL_Q@*n4Q+(y-^89j6w94v{T`vF+R_OYL#FA1Qic# zbao}&us{_l2=`~wvQWkbNolJ!U|?ZrTlY$HGF`K_<)B?*jHn$@b5d&L2&w)DgCu^7xn-7 z;uM1WL*;k?3_W~)wU!NL0<{px?MzZjmBI7E@;Kdv6TX1CL0pcS;ps01#g~(lQ#uzw zHSL=t-k&h{z{aToUT?dxG__#VO`g~Roe^VzG&fg2w?~y^p)WnaR$TuImMnEG7Xcio z4ym}KU4ghi;}Q+d=Vdxr;kj&X7S_Xeux|}`d0M#ZHa%4g872o_>Z>0j6bQ&Z_`r8- z>Pp*g;SHHn8fen6j|T{XeX(p)=N=>=$>B;@BI`g;>_)4>Qd1|`(?s43R;ddHki+t< zhsEPUO|oaYAYfj)RWEM}ra$)i@HPyJ9(ICi`nftq9Q;`7a&}Q@;J$HQG63Rz-;kb0LUzXNp}hizmRc`u|&q}%a0w7czJtge;S(| zz}wr~!|afyhD!z)&wnMy$18+Dgk?Y1*EzE*Tk`}=2uxt8*>YOWoA?=l{1NGi=i;t@ zZJLr;6ued0{O)@$n>!NwWrdJfZ#4!@Yuw0gqXek+rZ}|^z*==&YV%-nLJIk2y&0^#bpn_T`UK6B*T+}R^*3@q zxzXG7$^UOW4{20A?T|4wK3x`n*4fr2-H+U=C(Ei;2C=?FvM0K$wtl_(4 zBVN?&~Fa!@A+SUVNeS?)h4JhY(Spu?^1sUc0VD_ zO->cI8@NOUKvJ7pC@C%do|%iK0;c?Nqo*^@QP9x%gIC7rm-DXOO!M`S`wqT`6IOBF3+g@hMI~I53|1au=6` z#L+1hDoA8J&?2%6%2`P)l&9zH{g9ZB&+QV6*I?Oelp7f-Ml-jvJ1@$i;19pz$00SO ztry{+!Avr4fBz8I@Y{*F9PSbK?wxoZ!fvsf84v91)9ThT6TRtA5=;ko(2muJ;!8NU z-yk^|;TGFJZrEWQyQ>7)5``%Dyugc0A3u@k^IsN)KWHX8*}|-8_bsE->{a2H{Q`cJliC4z-$0gF$2q53jNpyBeNM z?Ul~XNyelQ7*ajYOzn%4du%AB10zDOvN91+mxY?O?`cR$04nQ^EpopI3`w9r;|Gmr zYJ5z#13*R}6RM67oVp-Cb?URUJKhOCtKAT_Y=Gx3eXDdG7 zeF(MCJrMznKXNN_g0RXA=?K;?QE$Q99&2o+EgB#hjZ7o2=bxlntko3On zu{^)J9K0broG4+^Xou`xaoS!zj})|!ZPIR11ipzNDO;cECV-oO#<0u{(FH)MP!To> zCX&7367J&u;@5Yi_?u-8$TX5{jFf0lFTXAW3$_#p|B3=fik###`Ub2w323y)lhc z6bAm!M@O%lH~zw84nCZgJ9sUX<^^I`V$Pp`(w1@Xll(4-C=l9l=FHNM-<>Ix&ZU&l z1g6w3Z6pK$tx5!&g`s>OTsD^=1ZvF>CtZ7tI4av4R<5~JdxYkj25#&2n@T`iA>EUu z9lZxe1-0p_2_l754lU2IV5)FcQqp5#lr;!(TliMxKJ1bUzY+|*lQ7m`>u0xU{3rsX ztvsi=_cS)-|M1LQzW@O+4aFXIt~(SO%_kty5kqfOHym6v!1mzPm4=GI?wG5qgbL=x z0Rsy81;Wa)`rD(P-i{Wtgr=O{sat>Y$O?ny}LL6dxLOF2TAzUP) z6Q%%gOJI0qW1p{j2>|uoNU{R4-&2Uk{I5CkKj46n*cw3Awnq{5IfC z3#7h$=lNpWa~`_+fp?l~(JdmcUYT}fF96jfOI#P&RHW3@(H!kV@2m}hekE1Z1AUK5 z>8g9Q!|yh_+8GBL{Eujo8)Z=(Pa6Vh_BVQ+6MRGtGqK=1Kr=tNihuiSRP!4_R$(kb zP#G=-&XvuzGsIgmff+yRBF+p`KWz)qDc^8Or;{{K8f-hI*BXQ?t zX}qC6i^)$ivzQ+)yFVoX|I744XK9_3*iEdwGs8Gs&o$g^^)+aMh)@qnenLxE<|0nw zupD8&N@^KNax#Y7@F=)V@eLzfhozrCRqZRFlNA-S^#9ydaL;a48>weDa6^2SBUVaa zpo~E2nXjY55(1T;j_di^9=zLOTf%h!_SlaIXFlaaeMj?qELp=+)-ze@ zkq?IP#D)G{I;I*TrQ_y(*7URcwht}_Z@#z1CBjo2rTklzAt_4%k08it#^nz32=K@S zZ6~f6xNVtsC&GG^+QGrP3F*V38ajcyauW-{(i9rU)v1#QGYePX^ng$;c;=Cw-i3nO zACgOpp`r{rv*uh&A5?KUsv2fP*D!(=01`}0!sbf6#?XYshgzj}c(Bhfz4X}-=j@*; zPX$Sae!c~CK1(Yr>hpE!r5b*Z9&F_yZ)}%`$M3Ou#=ta;>zBToUF)hQw+TFanu6%X+Oe26v87P+gZkTDNmWdrqws@{N zifQKPL1lY}Lo+7}+a)#m)5jNY^X~O>=?^Q#d`Ei{Oy_|SWe|-3mjXuU;W)BNY;1W~Nf&ztQEN$U$1_J7FmD5jV*ar}EE@4u}N(W1l=nSa0 zGplZGEU3s&ebCVTJ&Q@1c?fyqhgszB;vinv2#O6TcXFH~$u?oVufMHzY}w)EPd$pX zsntvm{=5muc^(Ev3R3tbaa4p@|GpM#O2E0>TKB;|dxeF)sJPnxcftwA1rJ+hrDqo~ zgN;p1DatP=8JfR{2cVk2LjOKU?zTN`)(5liCZzVIA2EA^kJq!Tf>0fJ#Kclih|~4AKVB@V3qQMkB;uMkd56NQtF?Gld)u7M zXU~#f;vLmb^HKr27AXylH*nxaK18w+-(YsIvP!dA8eu{h7D+r^U>5HJJbC(s!5so7 zCMM!grJRt0MIb4R!EghYM8aG!Tb1>^&_>a{V?V)a0Tu=cL`?!?kPXNcQ5a>o%EhHJ zTVKJM^E8N>L!r62>gEu{vCT3;ZORRQYqiMY6)8wnOFN6b&;*(2m93vpekq(bH@o?B ztfgJbq8QNl-g|3>KXxaN+RI?89Km>Cv+q0Q%d;pp5pV=9LK${wlIeV{`=$r%QF%OoKmev^T!5GY?65CAglaPfG{t40Khw!r}n3+R3DNPG+ zcnKI73~ho_3Sm=Ey6Q1c&wKV8EW0em1S$M3o0sIPC(~jKc+0sb5q9wITCbr)f-V-L zs%m&?3D}vGAtMNLlqvlt4iUZLj5JcGEMuU|tO#X0r0= zRG%w&>h|;fH!?VGX9Aa_?T#-vflVaChbXdk zkHa`FlVN>`MZaPrK&FGG_B3ewSJD#o`wY*4z{6@c3jgasktqVZ;A&VR>S5u*WLUkb z7&wu*czB%`^B=8ofE&iZOc&S@SDPgebq>aCeE*UW8I~7Xd??J()PH6nJllg)JNU~P zNAg~v;?fwDZ|0w0Uj*umwY@c1&DB(UGR!WxL+^b31#g3kzOE%Z(SnWw?QmP1_tFQpgg)BGOGbhL6z3z-w-$%pJz(553cA@>v{VaqsW_h{mY^ote;x~?7%l`o zdgbu;|Kq$$p?y&hurohC&us#N&L_by|8rnRaI$crWT(EIe4^s;UkV?^)BKWaNCM;b z>-=b3I~9uo@_ny(L**pJBb&c8a|Lf z9VZwGiJ_qyI#Lh%S?7g!Cb_7nZ?CrFO#2~&XvDr(xPMtaMY;8AXV7p zBP?uZp3pI$u^qSwF%8wby*eGNnj)K#qn)XhLbUk7xwSn8^`wh`%UJ*uWkNA`M%0NE z*eP>MZ^hhpweUKLXg3uiF!n;_zX|()6+`mI&>w$lcyHbuWBEbi>t7H1&xg%|2SH6c z#>U=%SFQ2Oc+4fTfAabyZ$UZ9#$!Q?#5u+>Jo)#@AQAIpD7;A=3=h`$=l6z*j3DuJ z3G)s{Sel_-6t5c1Fueph2&mX+0Ebb>#L2JGTbfI6n|_L?rs8g#re_irrLC-Fki=rk z=mb2^J1|fw`B_&#s|A^4MhJ5andNgSDdn7;8H_v<$w9_3$7M=!1@HIRe5%pn+F9r2 z@fz4k9fg16>0Pi|Dm+urUm=1GimKAPMU)1IgMNNJ13i{%WDg;*FR$6z6)tlrzzkiR zz?*gah4{Y971*koaYiju<06cK`zNlDFo^r0V#CjMX9r=ZKZq>M>*JB_@)kkMmIyO- zObM&`*u?rNfHrSc8bI^|s<`G_e@p$;CrImGi%CGy^n}yz zPOPA6CwUJ$1UQ#})wdM#@;Gtz_v;GC6hN{Eq*DR_Swx(cNWr1fsFJ1I9NzS)XFD)h z$b;3V{_jrG(4-KY`d02t4knma0pO}zVJRN#NV>KJYYJ(IVuyWb}!GCDn;MevU=ed-r#c`mD+;uh4c%sJfK!K z2D>$O&763PkKlUo8@(^)C%J= z?Jikwp;PSNb`XEM_AR7q7gs8~e78zQrc%t;G-SRY|JZ-4cwlg%?Kc$^BXP=UR!l`(7`wa2?UEEI~i#-vxF;H8$7>xX6oZH{IOaPD{AO=Y;ww zreZ~8C^#udU6;MFy|v|v>&$R~K&6pTeRvNb`M$2UdW4_4CyaJF2<7d|Tj0o^JRIP|?Hmp?cD|5`;`>uh$g+AALw(azF((A@zcDfzLZy$o$ih3@gGAStco6+R;m;k-Mmi?mzXQ30BN zi5m}U@2(VpSf+oXwyyu@HvKtL&v8V9>r6`*D?`)s^+1cq=->F8q50?FrIF*JKR7N) zDQtKleIcPzYYcRJS^+NNcNoTPR_*Ud{BTEtOJHE2RS9$_Oc(AWj|GhugPAzO)VKRp zbkrZpuCYMw*HhbfkKwGpQsXH~pxIUdClRoxC<}oPomn`(V6t`->io}o4fO_; zl)uReoI=GuQ&VqC@ws1PWMMeb2-v4x^pn(x&W@xy!bQs=TA!QZThd<#AS|g=lnMti z?>zez7mM-$*e`|PL$hlu{&o1d{joKIII87`fuWvqOJNu5HP7hk%NT6)ak0pA z(JvA4&9M{2(kd!>bv?3XE^9Mr6sTTofF98ZJEUAY?tnrKG!UV@jXL%zQ2B!U9;t}K zTw!Sy)EJD==V6c?t%!K%V=sb6&~T$)fm#KVWBqmc7;NJ;`Ycs@lgx{^2=KHdm=s}n z09kB;NQCZ9#fn7TD!30noE!{*>59$d{v5(#HoGfRt)t?ffTO_4*6jMPViK~Vng+my zQETY7oL@3TO7w4-#bf>&Wk^fX^o$yAr$u`IFhPz}{_4@KkzJsBCS_unl@y(fVKOJ^ zxIhanZ_0qL>JjjQ!&kGH830X-Kd^yka+FAu(3w3Gvwt{wzsSz?gyX>j4**91p)Aml z5-}t^!^4cEyadwLZ#7^nnN>tbGh~P8$Hkb%=f~aGx@kYgX9(DCdR`Ddj^U9jpQ$d{6-A>IDV_AT2p+qc?}B9@^Qq-qhp} zR8Qvt+jpp3C^aCo(7H+{h#HB?phjPV%Rwm{ysgI<+W8H-d_^?tKh(VN(5Dr^ z2VvDN*5%Z{P8JC2Er?i+H^=YonO_BWKH~louzLOd*?WrrMo!5yWPtTuvD5m-f&5E0 z@;}3Xf#?d0vt;67itNwk;YBhBr$;NUdz7|ryFcYt=eEL2s!%&GL^P|Y1m3+Pqf8E; zk${%#8uX=vnWg3ekARuP`XsE6|Iw=X>de}%^Ys;=fa{{ES+Cd$z|_Ok_C=h5JP zWKJT(ixKPx_8c+nx>K`v-`(UH7c1%s!nD-a*TWt2YaulpR$kiJ=q1Y4D+v%9H?>>< zok;Vui*Yl>rlxcmuk3&?XZ7Bv>9$+zopBRq_XBa(Tc+5eN;xpQL-z)4Q~L(|q5h|t zN&M?zl(~&o%N~8bQl;1W|53MKpl-=%AwocEy!q$-z^Y)PJOsZF~jYVnld8<-5fGDgM^uR#tN0)Fr^P( zs3K6PX9j`IJt4p4_RvJh^Hce!(s?nfwR^8m@3p+4ufwE zHpC<(B&-x^pyv3g*uZ+XM@ki#=?xGlvKMY}?8>JxSVksrZf!*%ris$BuR7PlG1`cf z#RVpEhE3d6v<0Ql;@_e4$TVBQ%TO4Vo8z|l&T_yHK)Z}>%L4{VMx6~Jn8lznX7a=i zWW0ZM1vYm8SB&4Xd;hFrkJ5W zuz{@U2bj;S!^+U(c^jaKXO>;THL1*rB6lfL0z9b{Sh)QQ6@;4^S49DhzNS6U- zUsKxhUv!rd$kFL~ zzh_I^9L!{^PKa#k9#mjd;Qy=i#h}m|W+;f1pv?AeoeVD{BjXXv0pSU2)cGGk;Tv3O zgWkTTMk3L6l7!JSAF}H7`nl)_K{=pkM1J45+bu1}-x z|A*hZ4_1^>9MTUbKu#(PlB*@yxt$CknBV%dNzS7Mwv(%d{by|z87Co&`t3l%V@HS! zWtyNg(W`XPE-^Te{VXNsgkt9)T<=PQ!>$A4jbe!4knTTc0&jLJv{^)^?A|Jq~;tB$+m4Ga{s0W{t!w1;M$tbxp zCmwpT9C#gy+gN|Jt0cIKHg z7*x+Z;>`TOZF9Zb4w)#>9*OyHUvgIkE^XIH(t}o{Y>JmR`s0Kk2XYDE%Tr}7d|}Tb zr>6FW5^=gwivZgdYIbCuQ}-~t9TOdlTfI-7m5tp2e2NaBm<;TIR4E0iI8br`SMZgU zmb^Uf?ymUtDLcjej*dZgG$zVo4ql`vMk(kxnHZ}1EY!rnQLXAq`EbtQPFR49f~DG5 z8IJP4d(i(^1F(!91Ui8KlP6<-HPrJiaDLSgHc&CaLzItqoM6Ip?&d2mz$Ir(&-mJW z|9;Ef?K{_1($+f^=@C1?=a+t?T>0L8zp|?WAkC%q9PA9zvkcFR@BcRKs2@7pS8>?! ze*I<*s!rgSstG7#+U3(Sn_=gZ7~V*he%UE5u5AnPXOxd`d!V@b^G(XEh9F@ZcO=#T zCtLSjEpTKBH!gbeLJ?O-@_<|K{SgqoY6d?(5aM?)#-YweVBpp8Oa(3Fi+$I#i@L4P z#9K}@rL{!+6dw!_Z)1Ccr!70>@f%mscy-NH`{Z5YRdLH^@krXCc9`wC2=FHzFwnbh zkOQHJtdge)cJzT?eEr13enkhe%9mmyj7++>hwT^{jk5t=y1F-)&~eT^1aL4a_$L!J zJ5V74!rJLfmveRo>&Pf|DH-SV7w1m4>b3uUZSLNL8mrLp&bLF&KN8|4kCR{8R;m_b zs(}GrixHWN-a$-}&#FGu4h=4WYGni*3^d+Eb1OqaUMU1W=B;zi8G5`h-9r0@1LGqR07yhi zJbDJO^BRHS3yKtzP}ryd0S`Mk`iLw{XHOym6>IK6t*x+ZAC`#zLv3_lysVcD=#Bd? z(K0d67vH%P3v-A!?AmH{8WE4xx@BrvL->UzL2eoZXwm50SN6KF4tRE>ixS<^c3e~Q;Gni#Owtb?||ik_JvIV7}dYHIBCI*6!{$-J)pO)O5^ z$us_@sVUe`$IW;_6d1;VlI?z;n!tYv4eM;Obc)p8k!*c^&I3!i9|4sw-|6RyqUN(> zK^)+e4#7-2<71*FB=Bn>t6VX~3sf-ws1r#t2U|r1kab)O%*LV_%MA!%$9NVOUeDt| zQACN&fJr<&hq;b1B}&l@zc-f4!~7Xn%<%OFf8hP$0;IP50+$RXAmFYIli|sgzK_Z) zeU@v@wz;#DxzO~x)F7S@ec9z-9V)TFtNA8M!n$=!cTea;kGjszTR&6Qmer(s#JGw# z_E2rmX80wx^ZI7_|8!kP@%yO2e5>;0!B$QN|^f(DShBSp?} z@GbC%kKrzV!$uMFtnCUFG)b;O4M`xabVDpl3;1I0>GwUlb^O76X z!gH5@d&(-07M5<`fE8%Dcsj#hzUmaw--{fqc&v$cU!L*>6b#0{wkK58HEX7vwM;u)CYaG~jTENWitou@H z94lClSF>;W{8_L(Muq0!+K$X&=@##LK$=J|ei26MT{QE6HwpCLA_9(vjg%9SVTzGZI9PM!M$*66Zep3To-D#-Q!Gs)`WZj_=U+RRt3e&+lkqo zE@Ze70m8TLSR8_fc7S;>89zy&qI~%MyumFAFE5FDT2W9*pyk`iVi1X8vW0PW18m zg&LEKC*=DOq9$Ya^`L%e+45oG?MHCuBeQ{br$f+gDv=AD?DTtLh=@vEmACWW(~CGz z2-!}c+E{nOMmnHUo6j-MUJg8e$@HEVJYD16&7Wb47hW3$yLfMdP%p}QXCKNuLjRo| zcRWy53|7ETX##6~TI@5@S)!va#65rdvKRri%N+d$DR3i~!ee6ISrMn$bbv!pbafCa zz6In?WS1gHME8gH#TuWIpVWH4MAO{J{$<#LDe?!EwHnSSD{Nwq*bpXGP?I6HEfHIzNUb7sppPxef3_!Mn2hG$3 zAHanHaBz|sAHgobAG`rMhV@x(A$E@M+hJ?-Qz$V~;5#9M(Ua89$HDZ1{C0bBKbS&Y zvKd*ixU_r}w%1nJCE@!EBu_sF{Sav7Ge*uPMW?5;!^Th|Xak9Fo<~PO1@8fq^Q+?0 zqCf_(Qwhq(du!VIy4<9vvSRpQR=EijTqxf>mQ_?~VcC{g^sno2{B5E@?|9sDBC^Pn zUHybMY$L{V0xxBI22uXC@C}UX4zc19xq%?@JeOT3Y@K&j^LZ&1A@IvIhA|~v9lC4T z&G;FrlvOB1PM$eqVJqz1^muj3Y5^+j4D7-}5Iq2OW6!x-0W*WjP@{DJwH8w`&&>S& zJ%*G5w^OS6+!yENI z5_31C7#t5}0UbsiZG$K{wdacY>?y)+(95D#vYF=IzhBuzw6m?_w#F$ zp>k)9ge*v+P%V-J2JY`eNPAa}=(Y08e#P!6GFH8x8^bqGGPo`=!MWf2AJ<{HdFu4POVK+=Vht0mkr)q){|9)@ru;0h89aO%0sQ&VVImJIP>GVVm`21+41{psLFlB26A!jG z+Ar(U3zQ%F+g}e<()!TQfRm#NykY_`%`LdEu9*`LWcvz0cw1n_NbiK z9R`{9>AKm$SK}lK@Ey0PKK+VH0#SM|6wt=a-0WZFs(OWPM*!GTN<4z+MB^ zAX=bI5xUwwk$!aNU=&V-5CJ*ekB6(=drHQoIKnF|dkGLzIo)~<70n2arfOm?6{JK1!~cbwt#p!Tx+_k86*M?MOU0hCzbh!%e)9# zCiedz%N)-6wciD-4`^++zT9sbdRaZdH?%Y69@Ro}wIQFU~w`jZu~RHC#c*{k~5NMkP`K|T{N=`vChZz1|}w#ZhlY3aI4 zv(Q(1m!+D#CVF3zJck@JIdb^Uk}3H09@n!jt<*8>UCJs@oW>Z-(b*sD>Q#@~ntSU` zl%G(1@whsP(0EutLH&^_eaAuVFJ^?Ld*f`==!f9XE>z$0=tD8=c`Kqhl09B;(vS!y zcIP#tFKTRWO-c-Z^bLodH$xGxqiL%+T^l(35?q!QG?lYkZMiB|BQ7~Djd+z*>?}Ms zhK0y{an@HtxUu`pQ;5J7FLVMU1Lk+{TUVa`Y9QUHg5?OroVOgDN!iF4JHoKiyoBH0 z(WANZ>3L7sJ>I)mhfnJ0?++4-v1*0+mTz*t6fHKD4!&ce_2&~jACbeO9CE04IefHt z$ey5$tg2jPJ0naeU(}K8-@UA=#ff|$A5w>5*9}t6korqzau<&)_v*d1R_!i0&Gkyc z-Jo+Caz6=7rrb}%0O%S+A}pG)`y-?ZqE)lW-kH6=ukLk<+@*ejD+ywMUw!p>y{#}0KCN*z+df)J zmlt&J^UUqT?jfIJ#HUr)X3OV32!|c6o$SDQIT}nGrGq9rv8{~gqmRIDb`DU7JRy*aWh&JPfclqO?@e&g_dG;jo z#<$^w2wR}IJUU+hDJjPo+%<6C)LXFTTPTT!ClLqtQYXh^ssMg5KhKnurRL2S4?N?OCcG;h2$L`> zI`;-j&2g-o`3+;Agl7dtKVELXJ}(gRxJ$vL?&t|x71HYM&d_xd$8QhhW`uZBAGYyn z`a{=TDAKzh4~u;g+xV7TSv>S;_LB3m zZ^>2@KF@>l^XmAEtVLqevxfVR-mmrOJ~XKpGO+JP8?R281Z=x*Prs2O(H@<_vSOI$K4yF;3%nvn~F} z;dS&l_~1*RN1if~BeU~ow&V78IlZD9Fz1-ZZ(R!- zFv9tzA+pFPwlUc()tvApR~AhA97uP?e1()@&w4;v^k==E%(I-r)6DOI5N*p8@a9#l zu8Jwk`a3ucN^GrL=$BeY&wbrq)jnI#asKXneiMBg`o`S>;A1Zuk5unt8*vP3Xc_z3 z)DiXiqcybC)b*S2Mc}^Otq}rA$)^&Q0qDN#;FZ^3`k@s))$U%&;WDjc$RWH3TP*Q+ z7Rn4edrfesNZdECO?78#YTE`IKR2#(z=J8iQ&|&(qn?r=2rWgj`l2)j*-H!`d?e!U zyHpMblwlG=ZF^sEcd3q`lFBk!wX$L9<4QX>dugSx&MJVJbt5&plys-A38ryu)_XHJ zjc_yhMc)D*&_?Z@I3hx*k??u6C^&cR#_6sp&gL5K?&!EN!_D-zX*vG}F4~*7^DUgY z*NHCM7c(@GGpg((LDQg6XaBL8-#;%|;z?{DdyDr<`55K$tw?L+!Vz9W@&j@nGxCz? zm%{yo>P)}?9IH0+l#RZCtPrmi=R zz_5$$X+F$*vi720Wda(j_r@{ffPm7v{R}kpGYJtoNBf}Oooel8)xOAisN*WvX5Dwk z80xA$<0xsVt_kyGsbkWz`7}-hpuI4i^pB?gA79@cNOk-EA4OJCva=eLnJ66V$Ww_( z**l|bl9}15j1noK?8rX$-en}pUfC%{WM^f4uQ%t=)ARZL{&_yp`I3^PyOCI9o7ckIo^Qi!{$`;6SOa{ALOtC1t54^&3k;P4tQWT4Q zc2~?p>2!sTSDTAl^}_v!v>vNpcWYPV)^UVCNX=Nc6IJFC`3`lTl+fWvsa4@PTlck6 za^+``od?Vfk-^>-geJp#c51o~9k-e!oao#po_MY*FGX)GN8cKB uTSgOhXexS-& zKqbVoWfW$=j{PeCwzB-I?TSo=hlS75qV`cVTR35z$(h;I$4!>ik5DsL{I{e7cPt1J z&?PX4oFsFcTOGdtVxYNIK`9Y`XpA~=XvBl)HqNa>JIy`~SBH5wd9Ke9S=@p77L^I% z9^BO9IV6Z-9Mxb`yI!=mz>vK69T^W?mZaxA`pTy#?QrYDFlV$|?9W~2dglz^--KJ0 zKN1~GxKbg2g$sJm+ZB(-N<9ctL?zFU7Yw+_YXG7l4;W7(tGh?kMB}BAa3z{$zvyst zKo;swb<)*~-f>SPQS(@Ox8m{ilB!-CeljMT(n}gf{*vcwnO#N=hTbvR@GSiXz;>wo zcSlCT0)xy&&*cxhhXQ5x^NF@;y!`GuxZ>dqm$hjqlTC6xk3jQ*4fz!hiaXVh zM9d}v#BO@QaJ(*6^V`~%SvQ<#YUs2p+$W1h9snvS8lASvXJB0a-Bpp`XjgZf&L=IO z3+vH#_WhwNsIjE0LrHOu?%$06PSXV!nKyYK4G_t8=re^rm0G%pXS6zx-X=pITfyhT zjaB#GC#vNx)X_#7eTz2kwW(nB5}PW``TW6F>ifq#$IdwSzmz{iJZz|>2#4rOI6>s_ zWsZ^YU%C)1Rm?T8Y{Acd8cNk=*H@x+AK2r+!giD}Wz5+4DDVBP6E`)*tBKgAcP3=HAQtja)X(X#UC%F1V|l7?&}{=x(AVK~v9 z?=PoY+ky>(5Fp*GaRx4EN?Vt)Jn6UKFA=&Mu~k z?hO5kTWtz$^28QB;h!(Ex<5}%qL$IH>W$uTQ~hM$rr@XOUt3Fd%5$lzrf8~WOe`^C zR`W*ODrsu^Z~gNlFw0EfC)|;E!e(|OR!>vN`yr~K^f zUR}rT_@ZX@UfTN-0-u9y;IhQ2ssORcKvV(D?~~~BSowGh(8vSM=m>2p7IW8gS~aif zA3S;+FH-9!_N!T@%Cid<3oWY;87rF^A<_qUim+&1gYoxWuB zR+LW9S^5)T0{Qf>*&p~mL{jgz4ApbFG{#aJ1nzcND%tA;Y=OL(;SbrfseT>yfJZ~p zYP9zKb=XRFAgPQt(z9v2)`T6-G89*ErZ4Fgx@WJKk@ki5TNk`Rk8U2gP$Jrmd)2m< zx$8H26e0)=;_P5^h3CdCL|gGXF8o}nyyE%0O{XxSk9IfL6Nv@WmLY+| zx7U~H4yh8K?3e<;&Y-4kl^1JMkemA;S()Ev&E+t6KzF{xe0$qa$>5r`d6zt2k+Jjo zGST@#vAK_eRWD7VR3}e=Kutd4PM1xKm~EBk_X2Dg8}mft(s7uU8Ej!l^GZYEM7Lq) z_a~mEvC5c8B68XX5(}RMSLdQPct*iD$tS1Nx#iJ9pH=w*v0@KYJ2g!rb(BtYtLc|r zL581cRUGDiS3RGY2P0pFbpY88&3@pggb}4vIvp?R+eN>>h^A5fusp6GO$s#;T%#On zp$}DW%H;_y8f%Pz{@BN4nJFzz2ff+ma6A*Y?M^1ftC~I<*+-wL=xgz@0KvEh1^tc_ znI8)I5o_KkVAT&rcVuvtu7b3bS#8a3#4(w!Vx0_6^={8s2opN#wph|j*Kpjs*3efT zBv*{^9;&-ro*QTP@Q~C02pd*;uUK%!$zQAY?nrBXLw%xB#w+D^#8@)EWeUOR>cAsZ zthd*}U8jSKwbyQa3sxr^E0jZY_XDuTd!HUrD}M@NuATQ`2tnN+GjjH~9xxD7CBGxiz$eW3Fo@sY3H0jEXIYV&3<+*rM2urlE% z!!8hba5s7iZjAQvj=!f0nkae5sbWR9Zf>g3NDT_IuDo3Y*KktD`S@Tf*Xe$cFoS4! zk55CTvwNSXumxjJ3t4o2%zVdak=tv7&CKmk8zdEYPou3B9Ix^4M~Jqy*#u|0K>C?( z!&7|g5|=DOb#B{!GEYr=R6hU7=!qAgtdp)2`;Vbx%8B+kjJbM#4EVQ+G*(4xaHfh` z=x)QEeaB-`6lJVJ>ki^i4DlJz)FhXq+zenEqp1%!%w>9V`)%YyYENAiqT^AegB~kh zGheUpjd3zuA2p5fVf8rSHlM1`i{c@cfR^}Azvsb39&rOi!y_`8-)l`SwLXJMmZ^SI z*H?6f)}24i{L)1`%c+(46q=e8sah6SOW#+K^!cA)tYam7U0X$hV}Y5w%xzqI(AtSn zAd@<^y1HK6j8{$V;oEp zo8aCbL#eooC}lvV&e1deX(~~Z@*_Z z`28zfE}b)qx(NnKP;nDy&+_sPpMh&EZoOys2`QU^#>8_iSpxQOiUy|=Rp!IR%_RKLj_EmKL;A7Y2H@iJA?Q<%FDU5)yrEg^P zWA>1=XS{iT|J{R*6|An%+odBNOXpPo189_ZK~0+1jfHL>Bbsv$blGYOrtcFpxjg<4 z<^JP19l7c1ftKas1t)XF2e5wnPixS~j9OX&+#>6~N7uy;+kF%u()WA&eB*aUNByKQ z%Mvh+x5hGWw;|OL;v=x#7f)$f>R4HK9}|#$W?7eVJTlC({%s4JmVMs&90XH-jMP4? zX4X5RUOpQ<-7G_yudO@s`99@-;eLC&F%>yglao#ZfSD;M^7@0Wv4;by;amNc7%8Y( zIK3QuxB2-=I(A)i;x^O0&yPd=h!#lf3M(|<&RH9Vf0KLJXE z!hI!ORj3cng?X}Kqg-c+D=iq6VM(#~Ae)jMhQ68Mw0&WWVuMrVwiIlAK+lnpj0;T- zXp7>l6`Gk8ai_1JMGR8Q&&yuh=dk%fDB}$bT&RNLY*u25QGMDCAcGR`kQZP5>pbAAgwsf|hsJ0&nZvc+fNeh>4V_3^^UpStpC(8K^!QKZ@ z-AeOo9^kpxw-D~q^u^w(i$jfKQeeK4a={}G!Pr9WQ$(u8@cA{Porv>7w*O+WU2)C z=7V2Azkv@k4CA{JECJIIqV|8_E3te#W7Z+X;<5Bcj@0@Y>NxqzhlMdfEImHX z+HUX(jlSW0$dXB~WBtjmRiOK|)mUd@3J0N`h5` z;CV&DB+{XibZ~v4pb8u|U8h4D^OmL(G= z9m%a+3J@rPTVcs%ye)OxuV|4>fOL}!4>%i*XBoXa%(P!>bvoA0NHqcAAA9M>`d$No z#rpv{yJ=d$Ldq(Cb}t4yVDFPqC9z_L*A`YkezBv6&=fZMVHhc1nsCAscn3Mro-g^r z{$CW1yn>I1@vbz9%2hT^RnGEmo9~a!R1aV6`WGvJU2@uENb?keJx5`H?b%HeGiDyD z>+J);T4-_p8oM_P0(kKBsHe_?P?Y;Zh5?kQ@rkUb6%3F8{vjsPF9DwpxzGaCpuw}Z z_Y1g0JHa*ciVx9B`Md%Nt@HGP358tJ-PDSQU(lYu1F#oCAra&O|0H(lZ(Q`Hc>oTL z5TbF|)AKo@WD26K#kxvU1D-rM+-ZEYl3c`)sg z@0xQ%kf2&#RF0bj@l+ADA3?Z){ zZ8l_C_pWBwNJ1*EqA%2_*%)GNAHD7w`Vm9<2{(#Ujl9P!F|rD(t+XrDQ^Hzczl{6E zr#Xq-BW&OOFaT@GGv)+w*b}L-S1I21IqBPG-hi>12H@>mPb`8$+9$R2c2QpMg6rgR z*)+sl54mqZNQ0lOI|43XA5yOfYj*hwFK2C(^1PROAzp+Z5j}7i8%;4?2}~#`Zf|+_f(*wFF^>q%SXlzB7_QbFa8SU( zfBdgi}Cyr1o$G+z#@uwb2I$V7%Wn+z>2LEyq&k z!pqz!I?^`{I_S`^aM~nfru#VxC6WrFx)@0KV)XkybFJT z$k!0!+D*382mXL=S!pSOnA4`s&|+DT`q2A2k$4Fx%8Re`D?Ff5 z4Rf9#)7UFi)Q*Le8=HA9r8hOBoDp$jGuMBQ)2X)CMQ8R}m%v;zkJq19W%i4T90s*= z-u)eyI7}kRzFcE5x;#SXt&w$7Pdlqd4lUd-R5rT0w8*9>xJ+9mB%l)1*;;9` z{B<*`RHbC4P4N#HiAj)|#R1pKjTO(0j0pMc`4{vL^U4fWQEzt-doH)6yIHinBVDMk6bjxIh< zgZmjD2@jY%h^%))oN*d7%(rvG$_YA$3^R}A(cZ|tLhh-R-Z9oVk zrh_xw;)g8_KfkgHG=db@kk}x@wOgy?{5+QXQim&^sxf>AwYCrZ$kw5H&}dc71yfo@ zUt8}9&5UQI$V;*XO-+Le_ap0Xc3dtUmH zv1%+7-TG)-7ZoK<(F;9%6-@u`MBcm6=8BEGFw0jU_fVC5-9pPnRxcQdcC-%?6iAC+ zkvZBTtwR{_>dm4@VwVz~*U$D+SnVsSW|ODLg*aZbilhcj6YE(^4Uv(EL5KKMosKY# zGLCLJhVM`O6thyUP7Qp>i(*V=g{ZA($_x|?h(M;}`1v*9V-ue?#T6b68X3cqNtC$5 zWSyL-73CxMT9|RSw0P5xH=if-p=$iikA=JrQmw@PZ*xFPaP+~o)#pc^XOA5$4FcVr zf_CdXB5C*3l50#QSa;-a?YkEOG!FI;kJ+#(L^>Rz(C{8%NIIEUPfztwIw0Q)E*}_ze8&l<1gk&}&sIPFawYq9uyf2FOI7aV z<>jq%++KTGmgF^!=6~c|;Cp?|`VFU}3;1ylAljXUUGHT%lp0$-cEW7pKBeS)qBzOr z;WoqpctHO`F4g{-)FQ+aZ@hq8qb$_HhRJAvGJRCMj!!O3jzcQL-g)?`k6%h>_0)C( zz-67;=_45c{vW>>d!wd7;lT{!HA_W-B+2)Dp51*WsYm|w z7%=Y+j#_4N6cra9AuB0=xb;SpGy5{Z;J~7tF{)OPu%H)`LtCKN>J@ot1btSGo^DQ@|x9nn$H72 zzTzb8`WI=djJ9hdlLdoMZg=?3T0x4B5}?)17WyaRr#XKXTNsQcOh{+7wgRZowbyfH zj|Qd32h_fy0V%^u;qn=O$VU3vy~>2{B5&XuEF1lyj91RQc~)6rSFz?0?NW*+%zCCs zF?LMbt?|f!#iC*R?>^z2e^D8dHOC-Ga2@_L}%Y$Wmmg#$pWknrAdUXi_hC6=XE%q$YSsOHI`d%)`vnXR&SKJ zGM6n3ZrqKXR_a*=pt9*HedvkSUo$@^0@0J(wI0hK^c&a8E~)MR{asj?1msuE!V}_N zWKH6N6{Cgp$HF9B9ZSBx3sj)EcDxtL!w!JidX@cMXDkMaPrM)waLQJ<^f0WuJQjbr`#UL=joIL2~=q;F4$b(a!F#y z31_5}mh{VOgb%sO+tRT`oh=-rak-V z`h68YjkV!Qa!#L_ujEz7W}@&n03$!}(`(O%-g_R9Bs}DP;O@Rm~U4aem7$>=YR^u8DPC zr@kW25&}ai@1>*2Xll-|*nJ$;p&Efq16fvBYpz)rtpYSpNDc{Ch;n10X<uHWTDKYPcP-M1kAZsjR`a-)4Zu6iryzxS6b?A%YJWVaA|0!?8?KgmEAK#rwB(pw* zDCoC~My96KzPq(5XuGnj_KVTZ+g6ftK3H<%8I)rV6&KJG%qmv=^`XFXZCt;oVsAeV z%NhFJv)1#_)JQBzZ{$?khZ!!LU!|WJOuC2apz7lMm;HgTBH})#*Q@?0HP)!z7tbX< zj*mnt89|cOyp+Hjmk~l8|4bwp`2_(pdq|A`VIvkoyjFJNvxa8^-TW|NNL;oQG|Z3} zogN7Ua}5eN)>p*x#vny1O2L?W2hkVjRtGnhYMF*IJl8A=zlT{=f*-1$odF#xUQo2? zg#_h?Ad2_n*2m+aI;RoW%m7hUH$pVeeuM~B;Y2>eM8JV63ERgL1%rHaEDzs4h4>iO z_(W3GrRO4SYUvLNg1}Tre&rU5z(Q+IyE`5`90?B-l0&q7tQn)@X0>gbWO`~tRS0ZZp%A%?~iIa zwqDZ!x4liG-T9m%-lAF@yN0hjd;>_Ju3myI=n#v%W;dg^2B{b9Fz*{SS<8$QgdTbNBYqEHcno9`?;=P9F zR8*a|W{6@%=a5&MpAyKNgOsEXTyk+n>|#w9>+W*`R0n1Fq4ZzR8F=vn*Nf#g23wfQXXI7=V4O5yZzTwDV!ya#4Ix{T`!pNhoo z_m?kx_sJ=y2V3@h^z|E3&Iqdgl1nl7asnh>?2%|VsynDE=U6i?x=19CTKSn%m7EwA zQv6ZFozvSjM9Vz+-qavuW`=5|Bu~i;b)JhcAbPgJ(ZF9_Ht*ekjT*=2_~42HJ6I$r zXI=0=WzE+yMEvp~ZUZq)gmrR{vbcwSTHi(CAjaqc5F-|Yvm{&JX}*M!p_C9h47vPq z%_fNgHVTH*uY<}QMsWDvc$^R1Tb^zz?fx4?Vyfj=eByJnF!1*_{ZK;8r0GE=eo zZQ}tsf9WX;@lKhTS9`v!{bHB#@mVs-Mv$d=#dSTIv|8i8or&)UsXCL})&ek*&PH_& z@u+^bHoTd&zZzq8O3aZxqW`>XB2w1^q?F)d!?wls5#xBK{*Z655{F_0Res;?57Tga zpifB1*GkB^Uj5#^|3eB|>qcb01rqG81UHKQoW>DwL9&qmrEL5sLqFasK&dZOImVAD zEfUtb{#YihH97Ic-J$7P`xyTBe273r9=7)5MS9PZ~k zP=QezElU{q_deK|M-qfzMD!RY0I(xnvF^hSIt4!F@6XOB0`MIf&nz0>`aJ(F$^~Z{zJLFp6{S7T5(Z2qQX-iSMOmYj(##B%Ils zf|=z@bR9=ho4dXQ{$2P-NbtjX&V7$$6!4TWAG)_}070TiRM*>WI?Nx1WX9T0ua~%A zM$!R&OJ54&AF#)qKeG|FyRyk+Atw`njn|D(-TdJPk}y#06u|yd=IvM6*KPcFVWMb! zo6c8zIwj=Q1#x(f(0v^5CS~|)o*y;N#R#1$J6xjGCoiA%z)X35L;e! zVC&_WdQy^cmBwpVwilasK;!1i8&Gy&%?LWTX8Ieyr|7S-4(^ZPi}pxLzX~dew8Jkk z1nV!jw*?lLT;bNwY zm*CKzI)2dEW)oOkGd*FQr^R=Zi5u$vS)Mm9;SUt&CFBCpm21^ywUS3b+m1QEOV~`h zBHIY$V0$o8j8ndhy7254r{>i^pRFWCIHT!{lWfwl_9n@fe@6qGC4h*N%c^UZl;>0; zKkdTN2a`g6%rkM$z^J2cA8sWo8gC?0$nKB*E2pKv6}ZsaD(>~@mx9YxjV<28n**x% zNx+1WHkPx}zHo-*GsMB$Yi|}I{K|3pamlR`1dmeAZ6RHTAKN*tlEUgzWnnc^GCt@eT}-a_VX#8ZMRt@81zFj)?Km6=t1fuuwl z$6aLPg;J^pO8tLohL}rxzL3lLY)RC2k2^s+dqVy?-7pn6G=j5|%cmY3Y~Gh?mVg&$ z686O*qgCB>E9K)Y@zPr?AruSLJUSUB`e&>L3QNrv-_Hq#iJKYvHc6UpuN{AmSR|+N zrjf(uIw|~b!pkFg&_muGk1KP zGjx;^&#oA%-~WC)gNnr8uy}@+CA?SUR*mBGWgZy@c?@3 z!Dufy3oeQju|J<|< z@4B8!Cx%r3`bukci%_K)t#9y@3VoW zLY%sKHWz~aN>;eHt9kCjlD`5dvuhCN}lBtzmwWACrnu zQRQ55wf5Tl8{0S#OLNd*dmZF6L@^{14(FJ3w~< ze-vA*=Lq(COj8a16K8DSkYD)*{~i_0@cknmCU&oIyY=UT;~o_=UEg}CD^jjxb0y7p z7v|joUw%6O4aLhAlRmX>);^QnQrvr* z8jp_fhhmPf*U_c){oxDVVW7^7dm?wh2NzG^YRqX55mCE$xnxgb@uGol0tx8-12N&W6OO$>~|Cz;|zrrYXz;{P9^4iN6_h!6}7o zp3hE^JX7g;o5~Pa8K`BuP=A?}%2el1w3ZO&_H{D#<)3B&*$)n`(J%-M(EDK`^Bcl@ zxzD9i;|Plm-6PJ^di*RWPSfs#gKT4Cx)HV|LRE+eIot697%M&?yP_E$-8X!RTE^{R z(zXfTHq%N`K(afE4tG?tivuzhb*+Og;U22M#wOdpC*1PkmbBEt8HTMAX1Z;zL1{0c z-t%jF!>^Npy3t}=UX}L95{RMZ9wQkuc3I*57tDa)C*6a$&?vd4d~jR-ArcjCBoS|Y z7{8gUyFTi=G@Ea^i|!;^Ef1P!&X?SJ%^+;0eR2H-z;1uuK+j3G^_P}2qL4vPII}?5 z@K^epicRS#9P1wyQ%K!ao$&tOy9C8tEpU43jVCae4k_+^ybya&jxWz5rFx%@)}lXM zF#xDFB_ZfrL%~_x0=Mnt80bS)&a?Rck1X~X<>SXz3{U5eYqM@Cr)FdLl0yDai2Np7 z{!#DDrVNaw^o?#23cc9N1G(0d|F)BRNbC_HIPTbpS7NZEUm?p&1UKEj#^BHH{WC=G zJGN=z^ghXJA_o;A>xeZ$Wl&_w*XJcId~m37nL#_+^`_d^R(FTD9a`4~dE z>=qo(`jc|hgmt@&J+9&z%rKny-u7;`J|rOrkLr1xu6SIn{h@GY&%k9r%75%b_&@^1hE*@u4C8@(2UHV@tHgaob zqe7C@bQ6Po_v2#|_Se{!#aM_Ry+6i2Jn=bypqn0>XTk_^y|DeU|1kSAgxTA%w)$q@umIGOX0Y3Jx$hJ1rg(Sv zspGRpt)NPE7;s0FN7R}ky%dq!GxnPk%|=dW)>%{l`VrUzs;9lPf-7#sroM<&oklMbZ(z^O$~5Cj z^N!*+MfDyFJb1I9vvNz-H4wp>Pv*6|X#;%6plD@NSEHiSqoW;4ZSk_UO)jBGgnbLd z2V(pTn71AOnAuIwb<`g6NcEJl7jX-OQZ-ms%S~K;#rqfNSxLXW4O{d@pC@W13|J;G zJj2{XqVinZ%{pB@@$Dd+}jfqkb>G*_%s1BIvqrYXwLFu>}ktI znhqX#q1%4qo?;$gpBpIXj>G{4p}5TuT*w5b{bnf{cFPMlR%e@F&s zY3OI;Xx&>%Dpk_u9lQm6k}26I6qr4tq}36)a76RT9mS-xHnD zQ$C&abzPYl^u`wNs~(hHFF)X8`y^iOQ}JA1#{+3=;~y22(E zfzixVkMtQOm*%i+LcVJq_Fo>YmRk0?pRxN(EMk@cken_ZsS9_Wc|~UPscza^8;5O` z95&-nkB9EJqy#Z-i8@u%Q*=-7WN-MRK|k}{tKM6{e;)^1QrmT=o5g!bekGwV!rLlJ zzL>244~%St*o1eI4rU={HqxrPiSo}C@s7bT)WiIFCaY)MA@fYzGc^A7H} z#amK%gXr0!n&FhklO$}`0DyqYn1%Iy-{T|vixO}CU5W%bg}Nn30gN27u`>iyJ*87J=N84_9ZTa zzKvedF>RoBBRMRv)X!`_&vZt<>ygUARrQWtWLri*^tm=}V)GGaY?GpZa#KNZcbGip zSLe540NF>_l7fOGb3w&(oov2(CeoP_=shxN;(k-d7*9lcKg3E2* zMJv9UQsLW!lRpMvRIJrF+nc_}>Rx9y_lRjEKQ@)>T%)zzZ+X*aX2Xg_6G{T5gV#$fqI3pZB)5R6%g(SalvNTbf(GkGw?% z|4qE$2cTzP9UK~Y*DWT9BWy8$prg&ng|YQ(i&pbnU(4nTq-{U2)bKkU!NKu+RN8E< zwjYlF5GD^VxwIg*OsqnwEL8VSn*(Qb4`7CUl32KsNm3PY3_^TRc0hY}MQOKHRPyF# zF^r_Ii9FW!UyW-x1sd%PjgmrQ!YVNy`ruzd(vDPd-25dWcB_~2A2+JjoP`TGguu86s0b72L+lG3GS*0in*BuP=-E;P~bxHBy(~iSzpj2q#F7>AM|#GJjgM3mnjfV=-Zh zf)n|9+xiv1fj^-EA_+YQOjl@^R}Csrd<^*4ZBY&JcF|fiq|(glfV>-yHS3*W4wei5 zA(yH4*B4{D5!Y>vEYk!34;V+HXRKJ4!fK+wtqYYbWaMmbfE)ai&@8xSmILZgRg!bN z`KiYLrgfMckUBYkC8y?{aCL3L!cO>d0zgFJxh-np7YmzVCPzG#z|l_)2f%5RVM{yb z>T$z^_NLL3w7=mzTeBueLF1zvpO^AkL!1TOh|jj;vLTohGNi0L#?wX$%T>^bcfN6_ zgdsk3Vm`2V<;1?imx!TDBx*%1$3;!v?)fY4>7C!19z8W1Bfw7V*mIULspKCLs`tg{)gob#uh?i$+y1OD#Z^)IjrQ#q zY<~ztlCh+V63du}*}V$R2Q+XIR;3X1tiP{^4VPbo^k0mIrY3!@`kboWESCJuHBzDE zyXGi`;+c^DsMyg5pqDr^vu~TaRb8R@nQJvb6&>84hd@?Xc+4}xI!YZ0(`%P>-T&5U z5P3_&Mc(Yy&}V?&qW|%zn@d5#Dikkn`fJM&MhO0F-D>eRdyDRvR>HbR4=PdVw0PFS zpEr2f0M=3tmY{%0*|2ua0bL86C~i}QL+a^5 zKahYTgTH?^c8yIf#5elP@o-m8haz2$^LNq0L@I4RIAYrVVg~|31tci`nkjmL{m~do z!9@U{<_P2pTy?)x*zJXjZta}$(i3p~IA<&*O>P6i4LlD@AiFTN^B-_KUfKz#1UL^&okD_#xPr}7otX5kslj{cD9RqN!|fjkMct%L*h7$IzhkC8?jEg5e zT8z+)hNDl=whlIM1~WV|YSooU=-TPn>++w)bWlS;E6Pgdm$-H1^LuxornrS_?(8Mp zd7|@(R-;s`4A%Y*Q28GN-jOZ2kq$u%)tEAKROLfmRfoUw_`rUmi0-i))K>3bNJCrm zon5ZEzP!cg=&>o;Wqz_VCFsA_gt-ZEg|Q^Q7OeVx2kQ3;=$@vcRk^QCB}Mi9VDRWTEO-{nKELA_vV~yP{^65{ktsU*q%KOItM`kX|k2~-G1LZ#J>KrvyZ89 zc2b)OPd~h$8dD6+-!b1IhW`ijZX|T_IpHNfL4s!rC0oq3U=9_wbdkLMq*O^5VE8j= z;s1Jx4*}|hndb0rS0BcRZ)F-q?zC19@9v_R$zfc^ z%=Z_-Tm1*Yv}(I4{++ajq&7~`c$oGw1ZIAhNX?PLMIku5x1ux zgz&>6)1mTqo9h}LX_z9j>J9Bw?3>u-9PZv%02`AE-^7|sraDJMz&-{=xDhrrW#M2v z-=j)UG47z+c;^y#={pFHi!}8t}Jy*=`3Rk6Dg*rXYu0 z(FB}E`Sk+0^2t>uU$o-&HKX){t+u>7N+#*q8^5k-mOYLs55nJC6FHnNOb+=6T%2dB z?Y+0Fn<|Z{1pb$MM3v+X4?Uy8l-raW9-J~RG)P@<+AQnLoG##ZM)3UEpKsSY}Hyr<6xe)Z3}vNT-G|hI=T@ ze$DYC&2EG#KZFLOv<3d&{O0@&MuSin=YXS>jMQK5r!0SG?_e3&RfV(PXQ*X8lEf_D zfjziJinc95Cg>43nSeO&|DhB79scQ|k|6 zZT=WthQx-(8{3`r>L|@Ys!G}M<%KT)uq_j0)T1OStphm@$+}h*_z;IrpUcFhj9|e? z#lxbWkf)<>hePgnniKQ4O2()gTop7#F&C(1U1y9v_6fYEp8AoJ&}LduPM@5Py_xXsBx*Hc(j%$`RIHeCRlX_m_@h%Z^$g}DHQ z4ta+<&DtZeA4`cGxV|RQ5$-WRN+ngoa@yW`Qt)T)_Zs=UZo0VtAbyhPP+7DcOM~|< zZ6Nh89Ynf}ds2=D(sn+ySqz}uzZ1#^;ZbB`=zJ(KxE2)5EqphM@m!1w4H?Y|M#GiM z&vs#`Dm!JI8i*WP!=i8S9r&BdyD%I-wqopvG@&1XUa`}sjpw>rIMUMB<<&uLdI%5~{y-w5B02j>hmo`r$qq?eF2E^i0!uot=1REUycGw&sS^06#2o0DaKahEAFJfYf^=(K z^JeVKw*!c30G_As6UTgaZD92<0~J^4gpBM3Rz zzY*wn^^aDlw)w#$SY(Blt%TkBvR!I7yvK3&2b-+x$Dm$PKW>c-9{QlJ(?`CssF z=?QXTx}@=3*Oo3P7Nze_J>D!!IAb>oIj2x;0<{bZ!5y!UzuX*qwqx-SvSHK+44CO; zGy%Cft8|_Gx6kt$_FKra!#ckn!;6|kx?G+wa9sm<;^Or9%3btH%r$~4=TdW2`d_`S z$m4n75yl2|uGjaOvHpU*T+8mGOn`M&o)EpA*bwM!X=!M<}(?VD)-b< zuUJjgNCG+&G%?qt3TESg)irI(@TzZTdF_foZ^wmw!nbE9E?7T5+eaI=S^d1ZV@xqb zzLNc{PLbm2jp0CIVGA6gDRS&dK@;u;C_;ZOD*gQoRbDeqxd~y4ag*3Pe}imDIzZK? zt<`hhm}13l73g5u=8qU=AZ6&@onwrG#Pkn_ktmkq6LVp!hf7V<5}m4?0i7RXd)@Xl zaO%I$#tk&kj(6}!9BLSc$>F^ea`a!k_>JtfmjhzIzeP=o;P(PAtv>|WF6tD_{$7{S z^rHGhW(_81_vJoKkb|m+(ABUrPa&I8oj6beG z15kVvHt9R1QSHI-3SFyYBUL_}+4Z8$1=sj;H@9I=A-5x2mvmYTO$5UkuBQpM2L5u*%C8OQo z$FOZlodNaAXw#I>Qf$GO6x0dX7|Jr?fv7M63=i4!lvSE8uGc{|%mp0yb!}4ylMY(J z{(OUI8llzTi(#1~bbxuc3HlBG@p;_vA9}GLDSRzAxH8|Y=S4cjL9}H{wK4gNo(E|4;z zpo_^zECgQ*@MW7Vq5420(c>(#E7hc==(5w)gbiHHElnpel{d>yW5e-mi5GBmOe**Z zy1{mOn7d;rzRf{sY|Wnt<$3c|KDo^`$ZV`z_`r20-cZIbDn9p7zY2!`S3VJk*+KN_@}gQ#BS4nAK$co$>aU0`yf7gG!ugXxkLVS{oobeDBy!as#i4(@(~)Dw z%VdVYS(VGYKZo~p;&Z#=iS=^pn}9svZGwc*{j3L1hDwBzj=0O?q~9Xu^6X$^4hQ6P&h$skBkLA zWzrY{Ll%~h0UP||r8mV#VP?l|=aq?qDwtSwP58q`--slPo0($$J+ICE3p$TA%;Gng zkQUfet_$u%Fy1mH{7#sx9at`*#dU27yjDOrA^3B!IBxIfuOr9d45PzUJoz7QrZ+Ac zoRdn_5#8pVoewF9W@A<_!#gQs=dM@n6Q=0Wv2*Yx2w3QuYm#EME}PFsS`&0+FUr%= z!(d$z=&&@nzptEOy=h}v3h7RH&y3v2<&DgV9C|F+NTIIlpc=Z2bO2_~)^q#N6T;Qj zSCQ+GyWP{E-w&prwiv+N!V?T1;igo*rAa3z5`n<@14&4&4DY=Ec5J@+GH8R2-id&7 zDBEGcRb)R@Y{-8>K4LH7In@7f$_NPa5U(t)q6N>bsx*aD<=x1nkW0*8p;4OzhAl={ zX_~BYltQC%WLPasuA)V{0kdUTp{v?3{r5+0o3f5%TbM4C@c^cCP{19BWH4;kdg+~T z`2lDnEjtBc5n#mYb7XjIF`yi6Vo5DNQR5Nc%WDbtbSi|6c!P7SuNN8A4=wF?-942-kN zuB&wML|+;{FGW+v>ulBV#f%Wa4ew0IpyjbQFlEC;Wcp3?Fju90LRw}e*$@Z{8BAN< z&kFZ76++ixRm;UE)&+hrZ$R|d)S#5pN0=YX1k#Fu7r6xdhNOt(*2ITY3!5oMZCljFiNesuZsC% z3>R>UszJw2Z8PQwXq0;0L(CGZEEf;9CPx%Rf#EMji%KP`td5nY?sVkrNVPpu*PVL1 zaa<|J%FKaP$>IICU7I!<;?mF@%9ES>_~e*VCswQydin7$Jk3TCan? z310Mfs6aKEg21D15nZn~!;%3r2?_h_4sSslF0NBIKV}V0SNWx8t$v}apn#jWCR;U- zX=u_8VV^F!VqUVYeqKSwknqxvBLhV~-Kz4xR$BpsPpa+`Y$R7IE5Zbv-YVY%NB#Co zj3vcYB2!3@hiJrBSx32AOVgh=mTj>`CO-Mn+;&;iORth_-47QXU7#18tvx?j3WGHCRBb^#(^Dyjr(54TxM!L&j%XP7d1dq2{uZBshaL?xpG68oclX+MXO z8SsRh-x$6<2GNeN)vk=kw^=;sT7ML02$E8bmLtS@U|&3i-Me}C+n8?i1HNGnb%_2> z^9w4QgCp9j&0NHexGJp}lRf2&WmCw=jWp9tWVk2vC})r(GZ*;YU$yQ9k*J)4KgU)z z!T6@>RDA{6iJQ^x3vfZjYRYFm%pdh?3hzz-DoX>EAx#?rK| zU%->OfVnFc{AItc!Ke;9yH}xB`Q03cL0ZoT--YH{Iz%?_wW({~_yz-R7ac?j=(8UZ zgfHo@4nkl21!Qh1pU4z6f02!2l{Kt5S*YAMMm*nn!3guC)pyMIj9mp*VsWa8*k+Av zq#SNpmv>~yi9NEh1a*HGrk?DWl#jx~6Gk=K8`2`{QsELuQqL=VD1t6%07PX5R;!q5Iwx z9qqCXL;C38D@8yp&sE#eHWUAQ851x#3QUh8zbMjb>s4eIP3A}E`65+r4(THYyR<=q zC~`kKerpLE7y(WJGi=w+cX;+__d%a4JzvrAvwJcyXu;+IXn4-N(^s%^hf=BYcG|r& z%D|o*#QC4)Tk1jDiupZe=3;+T?^4Ao*(0;qFS6V@Rth{`WP}W@==+V#n25DwasAnX zbk!Ao9?v3daw-=*R-JW*a30FSl$%P7thWuZCd4OBdJ22l*B9J|Y*!xjCx{0aKvwNI zC);b~k}`q66J@}PnAb=YFE)iRWn3j#eQQukj0^@wm`Ft2`Oc91Dkz^*YGZZIpsz5f z>M?_$+=3{eAp;l`!n?E%V<9tOZqFdk9YrmX;}8bL1i@gRI-*<8c!?jauT(rCfsw85 z;|0hRZe$kZgS{-yF{4W_1#Ku{qSwtax0_An{5nqkc|$N|h!V!*BQx4VQ|qa@JF9Jb z@}Z-Z9VW%PoL>`P?7Mf$_m)`|2z7rITVputpmIxj8&#dR3&#f+VE zg63>wF28+Yv=t>XD;=hbd8*Z%^5E}bxig$Zn4#z}cAZcHx<}e%n!UF(E1PYu0sNSy z&nxEczrE-?Pj-l}4rIk0h1^*B-9SYL_j$!_?_&Ey3?l+8lsZN>FOc}4vAzKeQWwH= z=}_%L-jsa)&jZhn(;O+`A!Wbl$Zlc!D)7wSr|y>&7kF1(M7sB1UCVwQoBeHLsc&sW zNN`a?a52K6Q8Z(GX7p-<&Z2|<;#{%G_gm?UZbyic(yJQo?jj^%yNg~Whs*Y4!NPwL z%#0IU1Hkke8f|0W(k?nqejbjhj?xy1o?ZCs*o!{ie;)1~R5 z)^uI%HhMOPg$r3ewtE7inuR$J-HaE-BE_6KRi4W3mQYO* z=MLr5z0EMPws`RaOh~)QsLlN?Dc-IP{e;-HuNUrB-ZvL7A6gF=hv=jb-3iMLPK%p@ z2~;-*%h$3!)=--HmPIgWnKeHnGJvGoYnMS}q0;xmu$RpZ!GC!93}lR z&2?@UW{Gxs-lJwk#Vvzs_1xL?e18@bxW=JR$@<#__T6m;YPC2+0`Puk=HKs z8!2rXUUz@prPDJ#0at40ozb6Y$7%{Lf4=`PVI6)Iv^dCWO#@&Z_jRjtG@&t!N+xra}&xbS;PZPqSLtO`1OP&oPPgWjD35=W&%Xv4A)+T zDDm?!3)!IijOY65g=x<7j}A#@DD1ufmukwxFrA}|Q-h1$(fdiN^MO#GXm7gxvH5ix zhz7WDF7^DH9BugSq$f?nMm0$2IFNBufz{#xnb{vS-JvcU)eFK04up2P2RpDy=|M>$ zL4MK*HDx~&&W}m(!&R}tFa>|5^jSv502`coz)!gQRZaQRMeZ4ube)>78gS)J5DdsE z7O$`yQ8a6by)kTb;K0Z?7!7#)2VA`3a8fh-d6Ov03GO#ahr7G`ztQh&7;f5-(hfF1 zymw)uSAD=G)klBCaY`I+Qe-%7?E8&gOV_04(YI$P2XpO_rEj77uQhW*&sUwBOoOZG zv&}o?F1Ubb~Y^h$0OVN=bK0rwWqNjg)jZ5)$ve#=yM4`TjGX zXJ($~a__n4>{x5L|~%ZErGl6?glN6&j{MV2)6h0VrO ztO3I=Q(d{v)t;2N@#$v4r>K`+v7Fpu;I`()T2{;zZvFV5o2MoX&Zg3&p7|5EcLj$- zH_}~WGY;GST%Xh&m4I2Fv@>a6mA|fJsKlx+B{F^y`db9SiYa+I2>aMc!B8W7dwpnD zl=yU6BWX2Dqk;x3sD=IB*q8H3E-=6|6ap_*6knp9=zPkTt8eMBSx@(Wk*M!1mP{1JjX8VtesWI^_hX9ZPyL(!1$V&}__LAU_rB{ewXA z7focY2Rq4E=#=VPZVO+-I6ytS6$a)n!Vl)&`G9?-U0aoo7k&VvPhpG-NgyGcX?=ZQ zykI(E`=wX6PJZUhrD`fZwuZ&$C_{4mqF!IY#AmHor{O>0#)s855@lH2COf`Q^HAZn z{eK-+(n~CGDQTX$YivDeWaJ zKm`L^SzZWipbVWN|M1wEzQCxru>3~^n^^-)Ri}h`sI`3xCc7K>cPuAF!jdIp4m4m0 zo8DZde)}VPXsdG?c<7>DpY{R zfDMg#LoW1M1l+0Gu8^RhNOxMdPhBXzIyoHrr}hW1Xfk;T^Nb3;VN6dXclOrb#%Yt9 zEX9U{4-$dbr3q%bAMa!og!3p(pdM^{5n-vpCywt;m|8s9XaO$h3V zW30n8$gV37m2*bV+e1Xg-6CDhx+MRYC<;1bJiV!sAHG^eAl^R5DQ!UTpxyj+;lch$ zAPmHg@Wf{~4}i%cBbIg;)6oB~z8Euedt6_|gxdAs5dZL7QuF*803|{^Pq&uT<5gOr zg9s^f#0rk&hzYH`=Bh>l8A_AY9yD4L4k4{mQmGIT5QmYifj4D&-awF!R|u{q;ErCS zNP(ezB1}mRHA>WuecXJ2!3qO&n49MAiiOdK6pbul<4F4Pe@upWB1X{qm^~#GM!l@6 z^zxac&*0Sy&rhJNQjX4Z`#jXH2xe*G(HDEOnA%XHP`f4$7=trh4i+ypx_ zj>UJXS+zb(y-ckZ8K(&VPVcfs*;7|pg=`HI&Mc&nF!aI?3-BKLw6*w+nxUeLF7GF7 z=S1tf%GE3cqpbV-EGSSwKzAFAY@S==jmhMwp=W>3LEv zOgtrr2;h3-`XnD}IXZa9gX4=~ih5Z#fv-TL&&B99ZhunlZknpit~+ndwTQLqI)@D!nZ7}DejJ4sr9UGYxEFF zdZ$v&Kp}#)t3Th54D~i}~Ft z`^z!i#D~igHCS!!JjS;EZyjGsgR9*|vAxF`A)wfp)P~c}i%)TEDgJH>xFFw0xHx(- zl_&wW;YdkU6GI=`0;R9bGz}KA{iJ| zqc{udOkp^4pmw!q6HM>;zCQNH9k2CE-9?66t_>JvPQWfpI^c|fH1+|Qq;fwPp>SPt zkB|Ay*zoh`&vjvtw@XQhf9~UEg4v!-$BAkJ%7H4^m;}+_+8!28PQ)*(6go+m4wd5A z4}N|@CR1XmS5@2I!YS;2hM^cWQZo8NaK1t3yV{Nu!Drs;qt$pK;p#&@@xK-t9^@+u zI#tGaUt5-<7IkF&K2a>u(2OMC%d%@HQBg!8sUpohWA!T(J+{$)pCsr$Fkchn9K`d% zQR%*GV3Hd#!Jja$ru>c%p2MsG@;c=wSelcclD?BMS$FNA#XR6-(OkjQs*D#);G5*x7L)1J+rhQJ~UO;=|p!+LF2kRe!o-u+g&~X#)m}i z$1S)9Dhf-+xEgnzDSMs8TR$33RO{kREIufoU+}QDyLdidUWez{Mn2k=HPOgx`=F=G zfU+CbP0(jyD%blx^zTb~auVYzc&}DFkyC=yl98{hNHMC#qq#&I-`u1i5MS$I7GbuT zV}8qO@~+k1uy5)MuI3v+tHq%0=mfaC%OoW9)RG?fy*rTk><|gug>KvWsj|s_u#>+e zo(u5IVP>}8S<~Y%AAK(#ew#nPG0?G7OAe-~i~AcvT1s_DyVduOIqK3Stk0Z-XO7)} zjW6GO51Tmb7U`R;MRm>lpTQWB8lreu$4%8rAN$_Xb5h!Zcl2_8acX-vB@6qbcXijF zXzimA;^B~Vxgm@pd*bU`S;VjG05b;zJZUX|EO01SI+bAgd1WW@#|@TPHTe><`Dg_z z)aG`A#iWDn$7r)Syf2|#;ZP7oHsj8RBqf4gzV64PO;ys$&zvpaiK49K(xwF?&$Hd* zlxtgc?B_sEy9k^FB7Q4^V+ZHxx0h4k{h*ebdub2I^@?6duV?x5OnmonL_%mnoDQ9f z#e_&I5RycwG4xj0Lk4eq>Pzsj=(@4t*Z;|V_@3jw^3IX8cISA#fHq-_sV>S+{b|HT z?F5RKCj9*|O`WVu|B+^&a0|agnt_e|+6PPTWZJyoTfbAkOcdOpn?AbrLZ7tf1$<)y z_cYG-N0V9+kY^g-YsWackemLw8f&HdY$@}+{PySYnAVFP`K?dt^WCrJ7)Xi6ZhkRk z4iPu@Ng!rEe-%A?>|>uqnBl6In`N$lJs(>t`B{za@!t#(T~iBS4VhBwn_=hs3r!;O z4ZAtNYZ575T0!z%7q4*AqsJ%5di6l5j^i1&>?fkhE& zZp~met)lf^Ce6_C_L4ld9R*cC(qydHlrwpUhO{m!Zu!U+qgsPz*Er-^5xp@|W2fR_;;krp>y*he%ep-G9LzS#)Q#I$9 z+{vHu5uLl#;U!=!`Qo1&ICfKf3z-@wML4p%ARKk08QpeHEZUj<`Q8&7{rZ_Xn&_Jr z@qe`dcNqTjc%a;e#C0^|G}B*;_4`{v9?u+=ri)h8Ev@f;ptM+2qx|R5PoP-bTp14U zcZq4SKjlzUvQm#RYb?GM5p)Vnh#T6a4pGwcUzm?S!Y_}6{6jO zFZGDd(AMuA{p#}TXYzW)pV`s%Hp8h=0Lyt672v1Pj=ZtLp3_Qa8iA`L);M3wTh z&}7iD?GFcy4;CA{KD~PI=s#Z$=3A{4qY`j?5H!1FQxbhA_+j@Q#!UKNh*PC;NE5K- zIs63vx$%GYD{=u>*hbs&LL= z&vnc_Y=|y<*c4O4Qr$ccOVya(TJWEn_0kqB)!1+()9xknL+>>R5gT``lFg6blA9no zluifIQHCAmBF9f|jT@vY?0i#K{(G^s;CNL_V40(+$R|5b9|W#!Z8fE_t8 zel`;{N6&zwiENl`EGEGdZhhFWP>zK@KL54FPoPx7+t_A}yH9k!_@&LphFq`LxX`He zvAd~>gM+11qjwxriG)9CeitGvJb~LsWFPd+S1-WwPoZvXy=F0Fv)tK_>z?@&a56r9 zBz*6`{v7-_E&zA^@Ay+5U4QjDTz`0mBbOM#1XZG4dNy)x zF%vKTb1nE8D&me$sN*FanZV6l)LFfNosBPp$j?jMG zaXy#7JgB+B@SWJi-284#UE9e&W^zp7Js_bS@pTbFysj<{ksKcc1=Rvz+D&7gUemZO zIXRNB?0`gQq9;?8q%{iWjNx-Tym@m=RiP!uKLC@Mvwm~FcibsUnp=Bqq5*h5ZJ8<; zX8@{S!?+OzR+9mu?(giFZtS>lC({#=N4}rb3oroDxbv-g6agZLTMT+Japzy}`L87x ziGwp9H1nA{p{b|pRqsbX)_v9t09`M4Y`mrYy=Y!4qX_QR${Ta@#W|e3@|K=V&9F?> zqFS#{Dbf^)kie|L+$Q{K0KDEoGNIr{!fnWT7$>5H%iin5U@k!_m&G9LX%|M{sxKam3s9D^1UDg?zfIE zd%FC<>oV0OAa#MQ05B+g{wyhyr`te&?dNoiE9H$FPcvGXk|c91C(i+1^QOt1kkvQ< z{BvE`O}w*iN&w|!rn0D~|+1CB-n#a4jRsYLUhiu}~;|Jtp59q{vD>)vSE zf1H0DT>!Nd2|C8mjhiwFE1b3746H%WyZrtRm+i&D3dgXC`oQWu$10b7-PJJ$ikNiO zqO7msj6+>)0Lt4+;V$q0SRSE<&hR#G>i}g=6!v|$%ENa3*)w9W47P7(j-?xX8wj8} z5`YDE$o4ez`*E~cySu?Iu`!<3+mEXH)Gr?4NB`P5ZsFMQ%vPa39)BN+691&xcRV2& z748NEgK+6nS0+jELX&SaK;MK;@2*t)>wfk?4{XJ_#v=}s-=EAyP)li)0Aw2Zm93N- z{rvf0>)DRd3swhv_NfY)4%RQH>`=!9!JtIm{tM^-x&x0KOtzD7defc_(QtZLqu88& zi+lW79~r4_uwmn3uclAQd|bVy8gWk%bz(34%Aeh={2u;vN7+-eCb2!pEOjRvJzgUx)Oh~6E51mum0nDSlCW5;r_pBnJmLud`JaE+@7H}( z+QAz_cR>#xKHZfuq=W$4YQ-)QJsGn)!6Yb(zjr<#*EyoQwsLoL>$To}0z5y#pfhX& zxS7>EWvQ~ffk1cs+Pba0Mm==+tj5JnPGCcBKu=p+d+};6ggp$izdxCPOD|Zgmh6{n zGp}GUw7Z#dp^g!)rUYyvJN2~3ktr3GgLiOQ-Hx8n?ZlNF!j68TQkm_%$`fjQN&me2 zqcul9xqz8ojqo`&rurVdd7NwPvi@G8vr{(OZ~9N<0ZQV7kf0FS=8(mfXl_~-!!Dnv zuK~?cdW}O%v$cFk0iEDORf|mDPF=yW(0~s5WMOqWj}9D`fVL@z7fbj-&uwjC(+?2tqI|Sj}8q5i7`d9w#1j329WgQLG175^4r8C!$0;&L1#5uVMOZd(V%HQO;iUC)32;3Gh^kc3Nw&Ltma~ zjnOph$sj{$1vmo{p=pR|Z=k*wL2`}FwA^VgcgI4?NIg@9?!evLQn57oDG27V$1Lw#a zf~c6Hd*3$zw3{FiQ7Bx1pB{=ZC_|+o`NsV+c~4qRQRS8fODG(-mO{X`*>klRW+0Tw zd`~7t%r3&;TTar@tN+}XrQmj~)Z^Ss413oA;$-7bn{(Z=4qHpa_ED9XGBLuw1cV{B zTm>ME6HS&RcJ@n^cK~-nrJ`O}GRt8(X^A1d^A5L_;!+F}d&|aH9YyApRPbU&4GKyW zP{*V`nu^@7a*}clt6N?lGs|RW`p;!StBHc$RQzE|oRO}r(#o~HYVRrYzPB^R!aqFM z4^(=!S1b58EO>vy_SXA~xBkQ$GT_NhspJM=^NcyZ16I=Lxa|Ap)ho0Z+&4hXpr601 z{Vu|>06#M=zT0L{HJZyFf!BCinaqaOQZ*_{TV0lj|FB`XT&A$yfyKhAsXe<-XN*dWcTy{)+^_e`<4~&xlXIOXimE*?C(cWBw+t!8oO}x z%K!C=ZxqfidSrptb(eGc5AUD4+XAt;3wg(%jFlIaSq^kx2E2aTMe}~dcN6rQde!_Q zxa@gFG=dv>;!CxRX((!O?bf>B8_xsOhVxe#=;=lEs! z6c{Pu-zM!-Wb<#GZi%`Nz~wc)vu4yi(6pWSl_%dlEb!|H4fYR*fJSMJ*mvjCZ+TJN zsI`h-W5RY;z5Doo7*Y#b*v_UE9I6)ptf2Pf$H)8~uQ8y2g<(qT-TnrM4LLj!3ue=s zo54=|n+^@3w<0vFx6_C|m=37`VUuDY&Yc}S(q6gq*!>FKwx%AB^B#;skSVuA&t7jg z;&s?~l^_!Mxoa6H-ax>Y8`9*{eQEeXC>PX`*eq{4*PTucxStS73=m!YxjJQECdiJ+}48ArdC%AF(UX@iUw&l2P&DHC;9ze@qSblj)7d+q%B`<^x;#w9VL4$w`*gO0 zDpf8uua~1ahF1!n_3@n5`qVA^gPrL~kn0Av>eUDE!_F|AODBs4!tp*2kK+%zRQdGK zN&Dr&5*<0HHMilyz?*EIFWGGcZH_MdC27k^Qq(a4FkDV4R+9giW4DAGurfEZID38g zzM}VG%*^1YCSY4F9feH)UKTIp3S&xjl+HLTMr@%31%&u#{v?B<1Mc3xgG16n)V#;d zG$7+(!l;5+1@LI9Jm;Ub?KDi$Dlp4UWNT@Nn+NeVO`l(QCqRr7>TJ8vM|MlK^6}1K zp@}NhjT`kFYUbuS9v*Z)PHNS{1e}(p+S=5Vmr9Y0dl|+k!rFr1?wh9SenjzVD&RjK&pjQl#Q-2L zF_PWl9`wk{4=s9H|cn>u94r}u#8F08LB81q#~voM9sB?yQ; zyU0T%p|P3oNt$Mn>nzMP65_?U0GW;?kjL{KngZhd{3R6E#G<)MB6(d~n5RN$6%)kY z=8;TaCUJQn17#Hsl}fo|YoJB_((;*!1|^pip8o1X{L9B4@cc=aA5(?!q-7|!r#LxD z8OBha?IpA&`e4-@i8yW+&e2{}55l z%w{(H29YfFW~npzjg(lui`T1HublR_ zS9R84$VBPJw_=NU>4sX_pHmSSGC5lOot;3y-nsXH;A=w+Zx!g(l+NMO8{>;uW}lNw zc}Uiu^WCU39QbU0&ZkSp93c-vXc~dCq>E{K%M^BvR#XvnOpz3@t+&BUql5!4pOqA`r)xJ;s?g>)`o;d| zB2VbFO7U$$8pSMQhb2%W1kox!d^26=hE`L6_}#11PDfr!R2fdWjD~X-@yS*cg0^xfAEvT|T{+9R%p{*^|aUw1l8>gYIvNkK7Kb80O@&oz zYikQr5lv5JzLNk(1Y``$ape!eIwV4PX9>15jB$grx?1XOEw}Yd0JC9N--aE`_tpFP z!z5JRG2d!w(0&3&fJsMFpC!K(hJ-FM5mtZXM-Z`5+909JKIZ5ymO0M**O*UH|8D4G zUIB^hEfHwF(SK4gMmO~wLAZ{cp$ludRo^EBS>l9FdK9f6S=^>H^dye z()$y$=V-#9DT}bWzXsEO4oJi%IQ32vO&jG37!K1>2f*wb`qya`U(!A|G84JE3(?@X z-*6aMXvq3=~0~f?@wb08opO- z)uVL9&g`pkSUXEbeC$8Wsi5Y^v^&j_99H13$VkqPR5%s^CNMO)Lvi0v1ngKMSm};* zMFxtT@!|YCevUZT7C)GOC!ZausuFveeyKgX>VSso!5iz@t;M2P1eOY`BPSdI%iQQ>CKo`_4dU-0`Nse1!@IFFBlOX7I*+o<9!(#?jg#u8s zfB_E#0f~^G{nDVaC?b^NaM}KTHa^DcJi0Qed|EM2QtW$p#Z8;@RgHkfya6RjBP6W) zJ~6-_BbLNq30UgjFWyxf_7YGlRE;(a9-(6~XB~lqY z3kBr&PVI{Gw$e3(}iS5gaBpN>Kj<2iQGC8 zCi-m%%|U$F3_@OK(NoXT1tIGldkA}s=#do-DvIE*Ok&0qsHn;ndcGcCgMSAne^;Jw z3~-;|?H=X^&A@}p_aMTSaP7~zX)P2~bnJ zHRwQqCryTVwAT16hHpTo`YlCcd{{CPEf{>X5RcRMdme~FGx6K%cwM5){uz3IgBJvu za^vZZ7F%$*ZrqpQq*EDX+?_i(m!cfXPs< zl-l8By&mlfNiRTWbHXo}GP6Pfl^y5I4^l~!OZC|@-r6$3kTUJh6B;O5CfyxD&on5O z;eC?#*jWvd05diB1FN4uh<6~{pMS2izwSr6Mrg-quzG z15bs=q49A`jew@+#{757W0b+?DfmS(kFbyfs!|OlJOECzh* zW{O8m1eskhzz`*oVzk%_scXiOkukx7nxt*Zxigxv`&b*?jxgJ|7Wc%Ak;KT)Y_$?{ zNN<7>Ef!Sv3|yl-x#hFm%CCM_-TmH+a|GRTo2`Mc!&M+&r+|5yiYTGJ2VLmDQAH6n zB))OD;OBI)I!4eekaZgDQZK@G}U&Cs$SO z+RV`b&ijhUXrV8+BTmTbZl~8me8ig}w&M9_-KqLMRD040?`*(7uvR*%a(T(rlWbBJ)!8YqbHwvuq&nATP9EU=J>v% z00RMMU)j?bI?%CaX;zsUMb6lEoa3jKB=XNKu=@IBxZED*KWlYvY^-O1!b>=OJ~i1A z@8m-a0q!44!lDtZzS4T40(hmEh6yKQ5j6}*%*bC}jG(O*+S=9G-KJL`4uFGz_le%@ z7Bh1f+SB$LwC^B~YUxXfzAiz~6oa$CZg4`?y^kPozAe^$wX{R4fB)x}PYesQxh+J~ z@dTljJEfSrax#j)3cF%DY9%BD>cSw){|JGXzOBKNwuJw^Q`EDL@=B92EM zBMbQ+w*DNN!DqY&ip&^rXyk4$wL;Bb7utb3WPar2E#5BDd~zd%>k|zWP&vs&GiYz9 zlKVlu><4I6OkWPn#JdSO6XVb+@qrDc?$6^U1LQMM{OuirY$kQFx}W8hz4I{_H#Y5e zHw-{6Rkljt+jO-z#@)}TbIu7$Fau#d2xGjW4^YM?j+KLun^@yg?w%SnRoGc?F zM&T5l^c&?oVycc}QOI}$5H7I8@OBs;PTL&kuGqQzotlFBS}#5S09Mkt?B-fktIdZk zAd0qu&_XfWeXc0^A|Y$ne6945T2F=J)Bbz|s|lr2(Z!i|ids+3twOC&DEPNW)yKbs zXz(4YXSPbggJGu+FK5{?`VeW{ypv6*L8a4$&5mf9?@l-Kh{<4$`!ZSUq>7z{jdB%^AJ@p$@86TlO@C}|c{;#|R*+p~ z?{j}$K2YJ7NKg49clS@gH*bz0nt=3)f{2X!jQhs^kHw;&^>8r7bt<6P3B=nB4e|a- zVa`8);K$(FvaTB13G%i+2&FS{QayZgQ`V=k-TVj5Uo8N-3dU@D_M^(iCO{<;1z!3g z7!la<@?0!scXoxCleT*{S>5TqISVR<#>$8mIHgK~-RnVT^Bf4w-(XWq)<<(Y?vcc~ z>^rPM1t#Gq?7I?d-=-UJbd!xDH`-%V$I@xKeuZZ@t&00b&X1JZq)RidPBi4`zgWs> zncs`f*RJlBflNlW2;wRGsI@#jO|Ri}zpGbmWujdV>|Gg{YIluRN-f5PYaNR`J)zc; zv?=tKf9@*rsREO4!cdrPhRU3uH(uj)tsclC)1O=_i^dRh2rGT5h~jk}w!6p$0b%dz zx5rTzq? zf-_BHhoE@8@v#uuRdvgtE0^4 zsvPS^JS}V!r6l~s4l&+tf(+){D`f2$9W5q^LZ-loEv7eXWON?@VnQ5!qLDY*vEfjI zCyf+6$YJIT-1vnQ^SVYGIMtnb?EGlF)+~X~4Tatr>Vk=t^YwFQ*@x7U2#YFAtTl?w z19PQY+W*!b#PrUjx@$ZcDs63oz*0ipy33n@g*)4zGgL=EewykKmzlM~Dxez1! zSI>EBJ74VDGI#cmzB4JX11YfD?)@$9j{ON(gG^4_WPga=m@kl_sxBuU4#u zN*(vX;+qA1y1BP$&H>2Ag%i)*@BI1XYj`gBp+(kxA_YZ^41KGGW3E!?+`(|4p&a>$?F&*Pg)NWq(2F{+8TDJ*-YqZLS|xAUCKc1 z>jdE>W9xy zH?1mf+(HBWNx3Hb1*5qTGb{saj zEg=)R^E7Lxl|+Fa5okgJ`r*TcK-SZtVDNqS$ z6}T0sF(1u6;-#>OP{)g%hTWwPLJo*k>)IHk+AfJ3yI>j?klycH2Y010*ONKk>o^R` zR^5(-@bg!{ojD)i2Ldr&vhXt~jHFs@2?Bi7XL9gS{@30<345FIS^Xt9T0WbokLrYv ze;MY0+o$szeb*mrRJ?}Dd$Fx0vt1j06=~s!D4H_^t@g6vQu^~CK`GVQ80d!?-GHjTAr>~(-|{o z&Cb_4VGM_O9L;RQGsYe+ao?=z3D}K$vnuMg#4|_Zd%sJ~w~yaa1~WcEMsdHA8gjx? zgI9ow;@d{QQ%Kle6SG3ge;E5YuKPPsij+WYZ5XZRBwM9^7?_`a>B8fK$M`u$>_?yC zshHB?78A%;;*3nXolYl5`nR`IW$uj+4m-@>QqCRYbxgOKdGhuQ+n{S@j^ihgSr+WC zdkit|&nDxDRsL)#c>T~Al{Mq@t(Fi&hyjet$ODt-zm+}BW;Xhf2i%!bPqyarMfc1~ zhRQw>%xgTx8&HVz3)7;n+ev$qz)N9(gT2+0KP#5!dg!8R^{PRV+hHRB#v8x$w62s) zUpHXCH!^QE*@%N*Za7luzDi=+4Z9fPzlikf1`pd*i-qet%%?VHWBMI5I~qAL2+BtT zZ;%b?5KIjgnVG%Oz;?K1Xv0=lF^4YLTLpENg~m~JZL2I=%Py@d`}WjnTk}9>>B!Yv zGG~sJ=+xOZecx!6=uVR_!s*LRiyq%AxvUEDg_n`J-B4+P%jujArG2L8GvQJ0Wzq!_ zDRmHDq|nxtV{I#bH08Vh^K!@ctba@3jv@>cG051oGS7bS(niETz)pTyQO5gC3%Dgv zypI_Em3N)jfHF;FYNrC0wN;Wrhy7ZEajozM1VWA5t6B268-)8dr%GIcH=xYZspbAD zcD=?oh#3=sWO4%HzdQ?_l-%zFvYpK#NM)EjuH|wz;b(X^vEmGS!26*hrKRYG$vu;k*4};?*HUSraC$k&dcqSFPIA zV7sVUZpYXtHhnVzU|80$bHolP`<|{*FDcY!ojL%B#gpeIl1RUb4EA1&_%TFn2X9bE zVCTho+{uN0CgG4=qh4nH4WkL(O4hctm!`}vP))H{>9T*_d0*K1KH+UrB`7sBBzQ8A zZvdfz4wqkc$W3524IFqt&!NjMSN%ri%RROpBeY2vI2{6<)U==`a*wT~=4qh7hs z+I2a?zYW+Rmq7a-EJ7FnNa*6Yn03Kam3K?3?gP?fH7 zLqC<;dA?I-^F7LEb%6@M4j>AnZ(_{h?g!68;3OK$mN8dUxWCvx%gG5AmcQU^GS{j^ zP-&Yb`N3)D&iHg-xZx6i0A!KaHs+?KEd-ToT^VVQ3^XxHYwBA00ly9frt)1>&ZG->D6k+ zTR)@ElAOm}GL!T1GVB~pI>XLg0228yz%x{NGCy@f?J0gJ@KOZ}@iFc!hQ%(?cDp`5 zwg{L%9xi;4SHI^&u-!G9ocx$AXZ_Pz>_kwf&{xlFPBs73e8|Mgb9U21;p8JT*4=5w zeWLCHJU|G<>iaZ1YuaLRBp)dWWs#+#&ny>0vBpq6+Ps|+s&&x7(5KBKX6-m!!FLao zAHe|%-X+4K2oGG#z?^S_seThAAc6)&AcFht9RlUk6&?WPtA4}!RKBT)pLfzp!lAE3 zBbk8Yt-(F}mdsR79r%FEJb*#Bqsgc5lWGA+X{Sjb6|q+5gHH>Yjc-pW z6pHd|xa{b4;+68Nrf2GtibPb_Id68$_!E|M%lqb}RsH~^RcvRiRnTbIAt$-y8-6js z*7Rg8p|?~U!x@(&Fx7Lk+*0%2Sy89Ycg_i&K4&+=m8=8%P27H|WLm9dmNtBJuwPaW zL%j@i_iwW%YrL?9L;TKO6!g2sdtEHdWit1HhkSjl^T*$8*jS+IsSku~E;U^~AA`B8W4(IO1&RA@tIs)kOz7#OK|6nEx?^@qK#)J~t<0akbIR-PfX z1e1&yZziGO*k3H+I~s;)agjo%ink}`c&WkdWp9S52uTPMRAvXdiLShb5@qeVR0BH! z0iu>n2+!BPznuCh<}MF$4I4&c3>o%ypD>sJj;GDP_1d@6hq-G7s*<>E#cphc7t!$T z9@v2p3`w1_vbv1&3?Y5ZzhI}|mL*SXmD9-RdFBdBO3EVe-=YBR-Kuk4D&d3#lVm5d zQla49TTk@`X6LB+f|+^AQ%nf30)%!W01DzSzwKP;-tqy2&>=%S&E2$br9fZ5rNq7z zcZeD*s+E7gMDUb{!t&jgX%5$ea_CQ9v>9GUilv} zOb!K11NOV@eB>1_xFL0iBK0V^7+b!NA0mMJFyx_jRlJW|qp1oEyDeuE2rX<*Ia*i0 zj=p!=ArA;K1vs}1NW}x|k7R%l~on)Rtu#pg1rf!_PvFTtj*?66Qn2`6^dAumcTQqf7 zZ^{TlnDB&CZh9CPJG~JAPW$upKHN9UOiF&dv^ddlJN{m3PTsfJ$0%xSna=6)SK*jECv5cvz`TGyg61r|Pa-NjTxmIpe9bM$nRg&Bw8g zW;+4rb-qj>@2$2a#!Sitoa(`d%IujK)yPxQ1JKlG_h4@LcV%E{r7frR3@KRD)%y6@ zG7t!PE9_e`r*FABOVXMdoTlGO`gEnDh?y%sWERp2QaHZB!xiehF?;AvhJ5nwX_W&2 zujV>T>ldZw6DWH1BdBFv<^eiabl48U(oi{^3|M+El4D}r_Rm4jd zP*GGrjyyvuD%uS>cP}2R{`mK1B+UW)hx%jp_T9gT9_c9R24SV+=A7Qw`CzFeM973< z>L-o{u_tjK#p& z2^U~)fvQ!`06@s5>H%IT&DW?(9O<=5X^0g#dy5uhCTS4TUP2sQqD=6SceMD-Ck|Cj zxEM4m%cc8kDsE|3-gEe|Fl+I^FbqPJuxXZs_pxU+C9?U`ud^4WD`pem_{Mc++{@?G z09}WPB2JscVyX6!;(DQ%D^x>)xzY(Ev+i`rQsWP;HXOa@j;kQy&Q8EuYPTAfee-n9 z`fh1QZ>D4CYuwGb5{0gn!ta9?~%#OFKWlJ#1QK$xr$uiLu{S4&uP? znEW7`Cpt+S_4c4BJLywob1W_T%=Ys!`@g&CNRs#K6Df>1o=5F_ew+ATUm#^fl!oP{ zrC5q=6Qzi!Xo5uc(L`a+&&Ay(>Q0GIP*+=?oqTGL{FvTSXm2{ zSf%i{#&@H^RAJNC!@!_Rlig#xcDD!!q9))>B|6_hEgY1&;rVxV^Ld_Oij0t_x{!Vn zV6T#8JmHhfyG(3s;J&Y6s?rp`#N7teV1H)K;aM+)+uD}=WB z&Me&$3#Ehfgr})3T)|W(*rRZDAdNH7v71VYANy&HKhLg6t^Xl|T4$jNAwJ_Hj|N7% zg#<2W)x%>3)rY$xtr8||TyF|RnB*t478Nq9mC~xnu zb+FZR)rrL{#FAd4T#K_Ddfq@aY*?jdNQw)l?R<~KbN8PSwLn~ZLr`8jZ9lt`jaVmC zR1|@aSdFRDWmqSpJ}O;)e58&1H-B6M`E!&K^%Sbx4Mgq+{WDCR&c8hn_*cnFF^G@G z`|Y6uAF1aI!z6gn5WyC+8g5!c4}D)ChgStbr0Dn|n0Rue`dluRB=lR+@+$>-JkZqY zcVGP_4digy3$L5wP^g=!HP4JRAAMF*`tnEi!`SCeJClz(V}Y9qgv_2G)(2rqU`5kTeDA}PEk@) z){KWMSV74AOO`OrmBGpmxx`Sh-%LJaxetIn5*OIU)<8Rdo~h@vq*XTF7<(;^kNJ-U zzUur?iNPGL=eT(<1;-D0ucv2XBCv(;^RCR z%Ts>Q9m&>Vr1z?=%{Qs-MK=)`7it#6J;e|`aGf#`?8){*LecOfMqzN9Mf^?*d%6ehE3e@1P8O+P%WnwiFGaQL|S zUBlUH;`v;?u*rNI^;?>0bf={XuGT5J+4>upKg@2fuy@cT;#lLh2c}eF-yK1rbYoYG zD@UBRhDe%5o#W(GPn)S( zkF#+2m(4n|Y^0uG)M_qKR#Orote3Zo)!TRsOw%y)>#gF)*ac|qqo5t-olf2OLcWL4 z2YY*Rm&F;QoeO%)Ch539TLzG_M(I&qPRBML$QK_9=fLHXg&s>9$q634G{zO{2TNHY=mUMW}N$?(WDK2hUIJjDVt~g}pFX!I1yS+@(+X1AjZ|MOsyX;< z_WvnwVAgcCDCza zHQ7~IoBYZ4aCQ)2^BVw}-5?`7p8x>{$iK|TNh;eU&teh8QeCjyl1C7u~EulW_M1kG>vlG$VRVI}m%^>iE#-Gl z0qqjpsS5Qii?i{^ z=3w+A84qay9+LfPbf3p1cRGW$Fo;dCbW^3P;GX|a3uvVjI$Ys=Rs-rkkaif0oB8K) zJw>4YdQrSt=NiV18DYce$MTu{G1+pdmiep`Qqf$>;AkJc6YI~L6co4+{qD>a=9JMv zVDZTT5!rND1Z*l43|Pdjcoe66t&i7mj1Ww<>&?~-gBY`4!#^hnTNv+ai9`&K0CYIG z=@3)QmVS=d`|7PX@D59?x$Ra0K(*EfiRGNWR$WDj;En=iv#U>a31jGs9wDzwdHHS{2kAN> zQa)+d2-d7FR?Ir)R|;Pw>SG|#ur&RXgAiU+mX~d>OP8}(04J$mDlQevnBJQ~vR6wO z<@?^<(F!xQFZV)%qxf|CcV>)BC2U)*m>8$%mDdvW-goUgTLaQ4108n#jr8hZLv_r3 zXf@0L^nyTsBQhdeE6ZvGsVi%X6;Si*&5?|~_hJ&r#!zjltrO{<@|!NUW-3^VKxX}T za4D+w6JW6QTO%*M55A?Peg~dT514D=M4rDn!_J&Xf*7t93vK#5J1B>4UK+#WrRh~KGF5*IRMzan z%#Ao4O|F9EXom-of8vM84)D~Dq{$SX{n3#NBxjuVt*t`GEt2-FF(6IuaBuC-o9F@` z32l!Y*LLT#Bcd4;GK!JTA(nGpqOz~X0M-veIeqS(A9P3w!O|BArF%QefpJEoeA`_* z>On6*Z8Xy441tY1D`L-rTJ$~;Q&LEE;l|Q_ArZGj7p?sJK^;F)!|jIxua*}3a&;_c z+Q=G$sr{w9>$z`C4)XrSmV|k9H+f2aqcduW&DcEvol*jC8Q0XQLVR73zV~)iH_(dF zfj|P6ahHVLrD1zC?drk3|{Nj!Zb5A!z#D${4Z_c zD`2}(EabywGZ0mMj~542ll_AphQj~MFuRcMn=f7R0-|g>7NefaLv4$hphQ<^78#w5 zm?N$O(VHsh-=}Qo(j{sRzN)i%vgxlp_jviTjL3GTjUOrTY|^RozXr{<2)CKaQGzzkU!?_nH$&fJFD?wB;H*rJ2mOeP`SdM;3=As_Ek_1GuqF~zhFzwL@<4N<|opj zQ7tKbxgCf0K=nnVYzTQ~V?2KV@!i4*dim<>z62!-86A?DDg{C9@%O(_CNqXsGVpGx z5oeE|Y2=>bdWmOr^RE^_G=0mcj2$Wd0{rK`ZLj0j#_Y1hOCvpIjC=hiz_o=ykz1K{ z5n)emS~=UJ8pk_UQ{t-0*M0&0HW=F+FTdF?4U$Rm?&lk1fMC4|*bSdL=w|jyF2C8H zW~VY#tLp95`Uz}U1MEVlGq% zKF2y1ksTLm9u@RIlK7Zb+A`G^k_T$IjHqC@*L^GSYO;MG@=-2#fTL)J7F z_Njfy%?Jq%aOsq^O4_$pz6&JzECPeakIZw%Kz#Db%CcTdWIuvO z6#9EN_)$xFiVoyw;k@SZg@kUsUSa~kga&;&Y^?3`Q&z4K^4)mTuPG1w zre}EnbVivqH|stL6q+{b$-xK(;*L3i;QmH?>RlZI_3Vdmzw2K zA7zEYAvTtW!UP?nIRosX_+9TG{rR)1(4HrzDHS-_n`RZ83KKdin*yiqE;SogLpokC zxme^V2I_Oc*7y*o{Le3cNPx>SxZs+_p)#t0S$K|4+2(20OQeoVnO{hgNg;%OFx|zj zj6^D8PZhdcfp{B>>h<5obbcm)`1XV5r& z2!-AUyrg9GL&jt{>ZS`P;hq!03j8CgeKXgWufTjZ>wMW z2Y^CVZKI&hefEygDS%_=04Nxv$eCfB|Z z`EKTnnVHG|Bkj8bschfJ>vUvPB7`EO5)vhZa0)4z*<_S5D`js^DkagIq|6hEj+q%U zl2SsllaXYv?9K0b4vtdZ*XR5DtEc0f`?>CG&--3Hr!f=hra5F!9b!bMhooYP0|h=- zCuTb62wlw*YR9;XBS+qTlwBXA9(P};sfP`c5D($3>&%T2LA5sgSrK=kk3`RnC zGtvk-HaToaH^cIVi5f}FC=`M}j1SN_`%nilryP8!{(i73Mb=+E#_}?pBK!{$JJ&pU zH{`cR{^3c#Jzro*jB1m|OUM|SGhiP@r%P3Cn$DkU4Xj6eFwUbPhYd+(Su4{d49QtCTrAQkD(VCA%Z16lW1$DQfBCD$v&z*)o zgo~I%w{jXxf7OV%=EKVtN<6_}sW?S=x*rc2xu;p9XEJUL+}RqJ*iyfqH2ux`UyBOo zPJ-$Zh>hynJ$rHeXzKZG&Pd|eq2r0CoUQ=6;)Nl{L{GBfZX_C%6xKL>wa-B9HRg?2 zs<{Mx9G_QINw)TYGL>u();fNMs}n8WF(a-kO~MW;t<^iQYv4L$3U z4i%J*f%70H2&chyj+}8+7=ut%O!(@~;+iA}=SCH2D1wDo9q&o8YjV-Zw03wC8KV}7 z-t@sl^Bul(y~!q~mm06&j*j$wI%)d`@1-|(vAIkI#>hZ#o+9*GmA6TlXby zJS9xIaP-0Wrfx_o$R2uWG%{&ftkUm~^t9s{TAp`Zr}Mg51%t9?#twvIG(K>&tqCk` z21}Io?n?h5=W#yMn$qd9J(->QLUUhDkycuWG#+I_CRzm!p#9WB1qq^g<33AD)mvO~ znPn5{4hh#{b4^Sk^s6VG$+Z4(K~u3RF1EB+kF6l=XJq^s^i=+UdBM3KgRQ6D<&Mlj zVQ!}gY5TMOwNSbcXoym`qNsE#R!L=#Q;(95u?dl2VS^0z0le*m+2ZVsbw!O8B+8^E zRT~4L+*waP{&Ei8{mX{82 zep94l-qZnyu+h(?|DN9cm9yY*OE8O+W77BO%MyKt(j&m|4yL2wl&(cR_yQ(13%6+q zjfcTO=_#M}h2%A2)Ej772kdTJJ8!Cj>5DIeW#7ITkdtfe)i%8_@LhU`P>3&+@aboY zVa*5ML9!SbxOsNf#_A&Uwz=HYI^?aKX>E#hjG3^$|2({Xs3unHlCXZmPpC8SLUX#i z^2kx4pa5=-Qn&;WI$i?|t^G1@(m=b*nvla+&mjH$+jO$w6vX{tQjxP6k#1Z07GX*e z*Xfs)2~BzJr|ky^qPEM@ybrtOW{^kJJ9sFfddElw}l-r%T#Xm^1tl2*S=&o1DgLj-@3MQjn}Df zFa)&4C#2lOcU3f0sYT&STheO+1>OfIe`?Ilc2YRi@8*&(Vx#9`)pWvue{D#|DY}rm zQZo>V60d*wE3*tNX<(Rxm%#^E|C!m%yaeI*3O!LecImqlBEsX4UtP!o?51S}1p5o2y z2p=8{n0xnHLUgE@zrtds+QLZBX=Y@qyr6(t&Fp(P*#~F1GUMZ`S)Hayb;lC(v?#cb z<;SCU+j)?5MRQ_i&xy7W6o2hfPd$XEmE@#565EU{Klb=%(6O&sU+~&q=BC{Gt z;+g0Al0NQNj0(0a!e4!h?f*AX(wz%!SM+T#Pt}pB&;N-6ISU!U(X|1y%iq78^E4cLry!3X6R+_`VO|l8jt=u)v@8A}p(&08C-t)5) z=K520!}kM%bNa?FXZ5BTC9=o7ZOR=OyJC0eO1?JR{pwU+m(Eia^=FLVc8({H2YTm~ z9*>K5+Q%JRVNUlbR#-;qz%>so9+Z!9n2W7fuIG0b=1WlL;~bTGWtX+iKTf>#$dFn| zk$Buzw{)4vWaJ7Cv2SZP%OR(-Bp*979m0@nm)^HVJ=R8b0?`MBS)BPc)Jff!m8m)8 zGQX0F|C}9u5Ow52M5M*+gt5fb7i;LnyJX*s>wit=6lz=dyrirGxwr_qDeoy=@joT8Ns35(y{E#ILRoRETr)7Q-1}av} zBF`sc!DHY#w)kJd=j?EEomSAKH1A|s=Ozn#$u<(|^BIFhyiqannOty5RC>AOj=_6I zd{HA#WAciQ<%&^mWur{e+&v$&x?4E|PoUw+V znR@#V4-{}jM$6J(ild;&oyy7$fC$R(i!M`_@0*$JsfVRp^2s@D&uqow6I@3Ei`_pT zpSd*LUISexj&&8!1T!yoxb3j=UZbjB_3SR?odk`OT+9bzPpXt~HM zDn_Me_vWj)RpNi9Ru3iTi$J~ksz{5sKFeg?FEQ?EoO~uZ^3+sgrAT2WQR|@MMmqx;SL!IqlmLYZ`_o>L}l6Z*Ch46 zh)pcbo$bzj*5V`TD(RQH|JIm^U29sOxs$`Ew_;(_KRANczE!_7*4{2L7a-=zRx_vF z7wfb2>=&6M;X~OocP*UR#RmWJE$Z3R=o_Frdc%myD5BcD!}{Ks0os3d)FQWTY)Yh! z>UAxHc}P&ALWo9Fy(DYC@aH|xo^75TI)0F=Y8Z&SX`tyu{p) zM3_T&!rWwn5V!0_+v2P5g;5H?A=~eAbJ0pgUhCVsHk@orTupd>F=)iX?PB5W5^is{ z+ddt8)h<@j9R~kAOMlhM6VvBX8L=a~ufB1>r%5q){7&x6KK>d!>rg&@4qtYsyhV@0 zs|Na;6Y@eXVq(+HA#}&Xlit2z$q|vN9`jiL@&LY6YUFX>R=Pvs_uo18y7W8g2R^bl zYunweCNfbTFssP_O2}lYwov?`-b4|9P4Av#oN#&I2;RJLs&Oo`CcDo@Y!h3|wo~=v z_XA$-(zQHkI}$lQ1lI$dt*SAf7K8d_E-tE_aw@#FRpOMygr7u4*8!pWE0T^yDy~Ki zg%+y19DTm7Np*lqj&is=t7bsPTELtM3nk6R{c}ILj+Ptk4oa=lKHC)AP3o}u_&rY4YmXEa1qavUgJjRgqP`(GHU+Cm|qq z8cG9_PZh6Ax3bFh6CZxEIqr*1HAl0|Zue-idsU@}TXuKGCYRO~D{1IvRbz+A99|9F zor`HVC;?@XJzSlhW1}V=(noq+z%P1>Wr9;fOw?6!^R5= zD+UUUHKJ>C-P|T0hkc+7mMWZCUmnhN68*MRd@A+v*+M<8PHFAfO;?(= z{qq1d-qr(e11{EDImZ>&INROHwF@2aHja02Q!it`rb3PDiWKYC`Xrk>H)Wx3W9l@L z0LlmMXc3wj1o*WFwebqN~E?)V|<+y&Cojo*9!u0A``(JlRa6N1oiMQ)* zRc5X!aO#)0aD*eFvg|HRHgYxG8_6ESn)9C02+0~~8dJ063veWp!f?1UXI$uh#?^7X zRMzk$CThpmf_;D0Onj$7G5urdq^X5Y3(3izcoRInw#3r##lAO!3QJDq4Yoa;n&syS@joM?(oOol6-FA+`?RS3a%3ES7+WgTON?}KtViXh}Ek4P58M|IQKEI z&$*pNmj29@vy*i-Ica^5T$|}#pEz_lC7+GxcWxK$gvs{OJN@eH9edlWZ^qJPUj>PDP_=xsh?Sw`UIZtNW+Sdaz z9iY{RB!WBxBPdEhQf(AnZ1ri?JP8)VZDz72mu;q*Ww!a|jLQ#pmXpf2&)*yEhd8!` zRn4yIzSSp}Q-PtOm)GBhon#Jk=MLt3R51uTHYUt=B|HO_8rkVOCc0C)Y-cqeK6n~z zfn@`q%DC%K50(?Y_66*Mj#5LPS`uwkogEMlx8HRv3*;o!uM{p#QyV+}b05k?+Jw4N zxHqMuTqShy$FPu_l^T$+sVbJN(tIi-Y${{8$5?`Urq*{x(+l@*qFw*qY^(n4g_5ti zlT``v69S*R`3(~k6~yDB@>JAcwivsOc6P#LfW0GEIj>pvyYz>))#y0i&FK@uP_wt5HX5^&Y>S$uHeK${XgdE_JAbCC zi^3U~L2NJ8-FICJEMyw`No!tbK2>ZyYbVn7sMJfX*Z*ZvS|MDHc#F2my{;*O*SR_2 z8DauD{LhzkpZm0b|E%ed%V+aDxgPqXNdb}3_7AFdzT8@67~b!y(~}#;;ZeLVM`d*0CEfu@z*(o$;Cd?p>cGTz&Wt2s8wn!~~WePxiqJ4DvsDn%67svf~UZdGFMm2=@e((?wPB2xVb=H&9l>i ziZjv)ATIg&(JE}}V`nH>(9HCAXX_yJd8)ORgOcCknb;4u%_k)$BP8l#O9w_0+_D*W zO^&ALW#=*4^n;(bL>j92G+@*;O@&B zskV3qauHO)lU`pwP3M`|GR81umoqf;Q|@}e<;>1!^2~LgND@62jEs$grq0KGQ|oDu zuKNkW!J*B#bnRGqRZY_yjM{VJqa!a0ZPlN4o@MZVdEU};XZB35MN{H2Ri-vy{WvO_ z=^dp)jrN9hFIwCLJT7Oak?E9_Tv7x};rsN?{>;5_Hd0oN&QwmHO+t{xOF>vSw*FAF1);KXHyc?1ZwDgsi0zK73Aj+aqaNz3l<-Ly?J!n z?o`Z>6R7ph?0y#u$LSVdi<@*U=KE2aaoMNeiP^#(m=f93t-OkM86QI`AYkj;^yZn^ z(R5u}{|DdAzrjJWB^zsQHf$2Azb8KVS>j~BeW9ph*KDVvo1+zTOZTB`Pt%)b!#b;V za{rlak+^(PS+@r|Xi4YdeP^iq@7uE!gvHT!baysO=9rX?8K_n{4%|^RSC@0lTeqWZ zBjpX1>xWyi>HD+B#dqdJ+0RW$lpXq<12?c6a;Pd}Fi~DHW(vRieGBeh^fMU05~!() zT~Bq3WmxJ$$mrIy%%pat7#nUv=)N}^6BDRsZr?aTqPzDE{(*a9Y1>17xb5h9^OMak zCSG0SS8*bgr`@K$&iO+er&E%5oQcfslT)K#JlyU zHqG`n2@2g4Pw?qle>Ga7jhfWAMQNZW*HvZGZLa^B5sB~NGdfXQ=~ViFQ=bB#RdlNt zMf;spHNR!>27J&0>nPF46t}reC1wX-NK~6?(W!s0Gd|M7*coV^qs@`ylOf)v>}#ez zb?Y8xT8!GWYHlKIE@NUu?}kVB)x&dv=J9=GYiB>R%qWXY^-R>oPegO*jfhnz7{6-T zdEN=ahlB~eJ5*ZNl~b!wjUEwAbU9-?vNJd+WpH$5ovDrr-p5EnnqzP1{nct=aCRWX zjd5qR*s*;cSp$;HRW@cRYvueA$H)x1h&m`z@H2}1FrOywEgvmvH{IdZ2kntorNYKl z9qsMy72r|ItSjN7Y>?U8ciApWhu>f{&cY>T%h3R=5{0$Z^*Oz%y5!T>kMGZ&zGFHy z+M5xpcs=M`Nra`^v}?QQoKj}%!*$1ONVJ_K*)TYXwa|jl-WGq3Y~z-)foAi!nvxMq z(IzqM#JFP`NUqy-<*X}~P+^V91k23HBeeDQ!Y$);TOI`mo}^K!AT|EsqW|QrfPX#( zz+W4AfDaY9;D^;td9mMZqq!A|x=k+8nY1c7gseuDvz0#z^|(RI6&?Yd9tKi77;uC1&Sj zn6BE3441tLq>JkV=Ugae&{@LUsy(!NHmGS@J#+lKVw6U9SC!4vL&hjH7wb2teu{nP zN}jReRFX-AbeEikUnF4u2H_1C9cMOJvozif4bSvZJm1|^8AA9D&IHx4R<~-{EA7}j z73sC4>tkNx+oTMJQOYxIn`aF)MEYK~oK}_2`0mABzCrINH!100^JcfbVx1=yi);+i z8*Ft%ka#!9hIUqbu@}H1N4Un-=3zo!m4;g+V^L9ZZ$p-l*i5HHfN{X&PK*U_K7m z>jg?hSv~(S1!tLyyU_aQnF?$HML5D&G5QcPxV*@?orgh54D|3<_)2sa`-ta^ei$mh zOCKVnlzSPj0>@;>1XsywaYdw7f@I$q`d zg#&vD+qoB%=W|1h*`?%DNn3Tg4;hsv59m=1Sey$Bmw}6hSeILsQ_C>D=wnCvPetD* zfS2P$>Si%xRnOoy+R*PRqu^Vf6g}<^vJP*nw?~5h zzLh};xjyKqy7|*|%ofay)26q+JJ;G~dTgGX;{oM1x(-7-0ziGg{2>;AaUto0>zSkA z{Iny4yw%sHSzAKl5ll{CuJ+NplU=G#ZP3(t5)5QRg;?L!Pu2itu7{T0b|n`09BnK5+hM%b~>gI%B;f!!_j?z=MEx~vQDG?RQO9+wxx=Hi97Ab z&q(;bk|DN_%Q4HYcQ_|uz?E;L8C;x=O|>*cvXNbfB$*4}MXW2nz2Fhr`eydQNseE1 z*%F4ZJ0XohdzKC^@L0%08(aib7)FeJ3#1_HuC#8JC*srK2mFPdBVl^oR9q+xA4S8ysishO+zm%>5hp5i8RZe-XpOdnC}423GOzY|0;$D`AbGmhN9!WF5v@@ z{NLTDc$bmjg>dR$0LE>EbuVb{Hr)3VGwsgh3MzPmu(${41_XRZnnf+l>m#|Vu|NAu zh8IQxelZiB;kmd!_q;ZErJzNdPvvxmOv<>$?l`x;D(R76y~r~%f8UG^6;<`~H(Wu6 zeW+ef#}-dTKz;EHr3w)xANv_F>0O;wtf(Ru zRUBj7L&VZ=4@#U;6Cmw2g>B z7uZc#*68+icyqnF1-4Hz`w4ZG&3C_Xfq81+ z&H%1jwRt0WazQ&y)Rb)`a#Rr;Y|6)GTh(5jDyuq)Td@Jg>8^d$EZS|xtl76RZW6Kk z`-8JSK&`-97KO312^nWoLldt@cSebB9kt%*=z2NL*^loWYBx39eEs1V;~wJ1I$>k7 zx!2dpAiNFC+~bT7qx{ATobsfm68P~W9*swAa(kB_I3c%^-LA7%B6ta(<# z{D7|R9fpd2Qwsg3A5`9Zr=45 zK6GD$r2wH*wZAHVX&ezzPS>ARjZA&aIawS8#>KuO$>eTx2y5qwl|b2njMZ*hBKC1` za`?BkUlj+gyV~U>xVYutaYv{L`x4#s+HQn00i@KC^Opnj;@08jb1(Xu5DT&^&hfi3*4@tuM6h#ftU&L;lm(z=^ z$lXg+3?ZP6J7UFdFM^6|Byi~5t9)U6`vdcayDQi&JR*Y)sRGH=8&XiB;9X{9~)>PwC`ot5gRFR{Vh ze^*KFc|UeJQ+wt>-P-NMrO~HlzbZ6^)=7L&*DOq9e4UXkIYxt1Li6Sgje#`_*W&@q6%DOV_7*>6FjkDOu@kOr081Nu_?7Isz9~km58E9fV z^mncTCKUlZS81K%=X^wPP}sfR& z=7751>H3RtVsfE*d}t;Hrki;2CCtZe{h4hymlZzJG)hX|o+AG0)ZwT(n=2ed9{l}b zZ!)mTV~tO>8+giQI;RiRjz6h~BwtyJ;9&xEO2+WIxFUO-#!YSF`@p01+#RF81a}B3 zXRPm%%n$0~+Z^QW!)?7>OMhEpD+Fw$%#j(J;@~4~U}?4|Roo##V2!71VG`|*c{S;{ zg{r#q_6KS7`Yl%suCR&=ExF$vT6trQG0z7n+N3_^3yWDu({EYE2Me#lY9n5Se_|WIrclbHR&a_x7>}}7?meTm>BlWZ3|qxhzpD`x@$JuF7$P<=D_F` zh3>GAFKE2)6A8HNfx#0fe7I69Sshf_YqFjcA#8~DrL}&D{9(6{Tj!={C!b2Ozp1?q zo5{kkujClxUgC>7NxFu!?YJM&!Q{~ll|DK)Y_ljGMC%VT$};= zxn$0$Klf{U(u_4x5SPi78DuO}rg+vEsG^j0qh>0m)wi88&3JJbU+6?RzlE|00`F^- zekvKYkNQYP)gjH=o%cP;?;sql%V2mW+$2 zScX*AGSDpwP1cupB?@Ev6uazx+F7~ALns2@i`ddZS&P3Z#O3hm`5pL{u6o@OBFDeu zQF@oCz`xw$JK`HEOpfF6M)*CU=|1)Rn62gD?DzR4) z&rGE$nYa!G2V#)4!@h0zxFae4@$TFFyA|$a57fZm#c~_h69K+Jv-g;$F~gH!O;2?y z8NR5CS>^V`MSg!B#sy&RO0G5{IvZsk7^YobT_SN?N!@Mx>%P1R*$XYE;{Y?}d<7N2oecRbrGF9k7c-4nOWnT2;mRl^> z-ktq6npUnU3rvtRX+<*+(mh{zg}9F++r8ajO7xeYX1I$P8*B7*X>YUs*rXfU=R6Ws z@sCcLAnE7Xv!^|L@N_W5%&1yEL&W#82Mirx6`$zUreubswhw`*93TyA)bH9YV2_Y4 z+8twu6QpE%?y7;kA#mdu61JUVglnxAP=y1hEqm;RghWtUy{n5CPC-w8F9+|*nI#C9 zkWb(qHHe3w4|WCWn4&W!vL)87h%O36Le(Ko#UwdMK1-s> zAc+8l)!xvfSzB?y?P#601*zXBWjGI6MoyRLXHaR%u;z**td2rPlwk81Gme-&VLwu&6PE7p)^2|GZ)>o$8r1w(c? zPK}h1Hcr3s6yVlwOzf{~^OTl0pAiqFcA)r$Eq^E9lE%Vf#-12d*~liE+$4m?{&XFb z<&L}>w$J4+7Jyn~Uq3c)wvmn3QAlaF7lzpw6;9_aWOp=lXlVW(sv|G=mId)vEf1B% z$zYi`I8o|l%roeYe(MFTeJ?6cS;t3#`spQsvmr-pM%0W!;zbg2GKj|7`!uZape|H; znd(@=MF%Za^$T_~S|wKT4-%BUlPKk3~x3 ze!>oK=0R-*TMo6IfPU*NSVo^ z{g=9H5NMMx-VaxDxdCSrV10gcKNmt}Y)H8#66Zr=Yspc~Mqn~c6`W`+4L#WKE$kQr zL~F18S-%9X0SzEJD9@5}MUG@e=pKC3Muyb_8!MSVaC>&(!|X!n$LEZVj-}~uQ6Ji| zBPWAHRR?TAUAMU{k*D7x!FZ`Q1NlnJDZIOf|>GK_okNz-(A2l zZ9Wf4-cPjczMg?u<{b&4=EyF8&AVe?ke_$ypGj2=4Y>TO=rnN%oPlsh$RpaAt{(05`!GLU#2XqH9_UdGA|`8f>{vj7IFeOLd(b zksj-*YHcS+Y3XdWFG1n?Sn@ji%+wsbro|yQeLUeBac+dhcMoB;|7jVod%E=2!3^X-2d+o!UBCo+bL4Rl7 z{+rGUg1*Tw!2vPEy^#$A7gr<%PyMkpQJg%GT=V5x<(Fx3C93gD4piobFp`nAAr`6I@5h2g}8Prl7;H8V_c!78bVxG;H1HXCyZ1k z6z5&)+*GS?OBO5$&plr-8iiScEE?IB$jW-%m(qwUw*aOX!?HP6gc_W;p0y}q8IYM@ z+jL(fR2Xg;nQ{?UF>ppTG;5v>8Hl^V5dgW|s8{C3KZxtMB7#xl$jGyz!bOrGW^qwp zs7^G5jkD}h*5#n2SyV0G2+p?kcQMhI&v$fzW2%xcBB(=T@&RCS?@x_>OJrV-b6^`t zXi`|S?DXLjIdSIo4Z+>WuNm%O?D(ohZTEfr_GCA>i<|@B8ATQ$#5{*R%ch=Lf9en3 zUQmsyNSg561~U=#v2W2lGD6%vDe0hCJZ~=;e*-?y=&^P^2BCfNVIXER)%D&)7~df| z(j9h+#EjW8kw?$hI54if7&Mec=cE~E3E+o#BpnkBQ$>r7Ll_2}u4jJ6;k=S%H- zIwC#MgHST~AgRo<^vN=j!p;KvCatXKv8Txk&YmN^bt?7zJSs`(A$wThBuG%}+dDFEb27eI~4WxyLk#RA59YTkLv7){_Oz@$Wvn54C}>O*>Q zAB%$8|7fbjSO9?8!Wmw~b(cTzZ3NKHMtAvPuq3K$_x}9OKoI-~vaHucsKNaNhpx&7 zXju)+@7=hjOslI?+aeCN;zB>1x8MSa%m3$VNph9>ya|F8zJ^?8LVyxqrNdmAtza zjW*l6kc$^J%9*!A+sqyvB>bpDA2$`kwdQMXq-ZzdBZZ|3sx0yfb^ytt)HGEPMRy5z z1TMfj0uNU@&6MQ4L!m?k^qf@)4n;jWiL07WK+g3cgAN?cC@(6tJ4K7V0KazQNP~Ij z{ElZ>h{1_yAwCdOms{S?TW3%2uV4FgMO9Qu%I2ZLHWJ*g{k?aK)Ig;3MFdeqbS$L& zS7BMVZ8)#C?iNmAsf;Cr?fV&HXKHad9-n}C3N?+zL_8b^WBPBYaY!5jW@8d*$O8rL=_oh8FNMI4g9Mz>&XO-^pTe(jOl zc||OoJqFJ&5_}N|A{$}kL6w;$l|3VBQ@$gF#tY@MQB_bHF^+2=gpEFM+f+hSw>T!C zL8~T5#EOyPpUxUJg%slK#%)!z*_jwY&6)@~tit#uyRiB|kyVA2k{LXtC`gV4L6G=h z0WCJ@%r_!eJE!aSf!bL}H4;%A0e&~*-Q<-sRp=zd3)ld^L`ofjXG2Wz>uPrqes5ri z=&vUs>USWdp{#M+XeFfK6X*elB^KO6_) z>OYG^@0E}s=uuL^_TjC$??$H7 zYsNmmX-c!$U9+g~pnipa7;$x!c8L}Qv=XJu9fOF!%TafXB1#Jz0=3T2J{xKR7}0)E zif3M1BNCUBF}f4H{X6%~m)LOQ9;&K+El&M?qXE3GWd_Vy#O9BswHL95;{dx7`sj?5 zJBBQU)K--|k%O>MULC<5oo+_xdmV9)PEX!84twl6tz~Sh#Po7B$+{|oqve+z04oPJ zwm)kz0~Sz0WBvQ6&2O7qu!Zh<&(Pu8m_wvKLT}s7A&7{6oR?OS9#t)xJksc$=X2a0 zBWv!GpYGvd3y5A=0{fce`&6$9zH8&<=cJ1t692mR0NHDp+33g!Slp^R<|YOWN~fZ+ zt=$L;pg-4PV-+VZH%pmt)bZ5lmEM7((uR7wZt~qbvY*hfmL=r5>}WI3)fXM3elLlm ztNhS)>ah6cvo1@_xWw%9M0EtOqqCvNA59j^Mn!tRrN+|mpLv2IFN0K{$iDC-pCaot zQmiQYyI)R?sNDNsd>yPa8maAM0z(dU%JKe&utlz!f2A1W>J|Lh369QUinK(+XWjd& zR9XvCe>yoy;kuSiiBowzYZ)ii(GX&;oq&#wiWupT<}Y1@T+b5c{IWG7g)pdInC5+d z^au8E10lmgw}oIl5ruN`hLH&uWZcIWS);AuvEDDsnNUaxglw0XsBY_D2+)4t$NUzT z3?bL$L{AWpnT}wi{SPp8de~(f@wgU z^Ug;}c4r%G-6x1y-CuOd-*Agz28X08v8e3?juq8F`oK_=l9T@Y)s2L5#POYFdKdQZ zcj%ovIS42dm8)|vH$;8A#kbuMZ4 zI;-{7&>;OMG5`7rLj))uh5@laXqV62%+=qNF4WtYn%uXqO%Y*f0V?Rs^UARJ^Nit?ilrPgK&kU_WtSbHY(ztpg0MV0( z;7}q*(hkY2Jy8kl-N_oYy@>@H&e-eEk_@|G;c z&!^(z_3;!m?OZWq#dF=(^D^NSI2PHuN9)A!dW=Q_*B#=2e5i{oW zNi-q<&Lyzqt0Rfh22kAE`;t#?*&BxRpFYCA`Pix>!;aXz`Fudt^U#VUOxiOU1+d!W=Sggt4C1S{@@WBC zDv!YpXatn}hhkV1Aj~Gvv1W!?~Pn&c%{bv=>I*28l25a-WW@k}RO_ zFCmL2SMHGoDQqBpq(Nw5((F6QC`Nd1#})|VBJKw-JLcgp)F8g{7qV zAK=U&@viFs1D}@P_Ag<@?=)HL_+}^K!IWU47~uY`NG*}2KF=aOY?7gBW@CTAogfOPtBn`p zRs8)At(@pzw%cew(DcxfaE8263cO$TSQWn;Lh>N=s;VILUi)L`3nQz*73Km2BV1W2 zdyYfDbuX6rY=~n&Nit+@F<%gj&xbU~o_`mbzkN~(eP2@$_Q@v zSj_^UP7OhgZ{y8*PAtAXkc16c#`-C*)>B-Ag>uY`&A@;Dh(`-U64lmka?C5BjaJ=C z2vLkwd((k43I4!;Z;Ajnb`pt*jN?ikV0uY*Mni9-OWE1T7rPQwrgYaLEYwO38$Duv z;pwV3Y5^N{OvwF06YYY@L^UpiI(9l&U${}?$ig}hP~gyz`>i#qs0^ew;_oz{tpm4W!3yx1b} zj#O<;<-ONnN6={79s*P}lH!A}5_#nJm;b@^ zORWfqXujsQsHo3_edO@3A2*6jnEZTmu+s` zpjAvzz_P5MLCLp%pjx#CeFz#f^^C3G9t=(gQSxvgggH`CUQET{I#Bp-T9+H%zN=-4 zE-p~;HE@(U!t4mmz#;)7OzuAP2X0ncGJ3Hs5E8A5toW@32~gm64e6Sn&iJtlHmPtg zbG4d%ElL<^f4cP?(xmv^WqNT21mTHm71)mGtm8n?MK5S2fPQ5?j5PQZTuJwyLDDk^ zrR7h7wjC6x-i~bU2RlH9^lYwDvZ2OGj8nNd%5dV@JDOU??j-xefCZR=Gp4y+D17&-A*i&Lt$O$$$DUspjSIE{$Gc zVN$kdaZ;9K>wEm{vq2p!eepu+8Bsz1t}JlFz}S)c$AF$NR*w>u)}fv+k5nN|MgR5d zMLh6{0Ny@pIf2hEu;o2P54g%g(!1BqCuP00mkuqE5u^RITGO@`;0m1&H~h^ySA^pBk(JE+QcE9c!ZDS;az?h)f^+ zdP7Yz;Cy$CJmmMf(QlygA}=8KFJ--Di1`6c41NhwjewXkKk!OBUn*bl2pxy29|*)c zBz>gZc?qMs!ch#%g$9s>Wr6+IQUIec__^nmKKdp`XgUa7Y~zxYG^uP`hO&R*Dq=q( znrxWoo{f@-h)M&_;|TjrCqqcc}Pk0Is@@NDS>wPdROjPT8@B8r25!Yik$56UKd7p zkcN>SJ1C}oPfVFNJvVs>g}{__`^_+O?>S>KvwGuG1m2YD<&*MDO|~$CpUM47dNX2! zm>P~m{#(KAHyq>I0FMGqYe3mx9MIuiN2gD>;*OXbAu-n8APl$}cJeXOH_{jVGLUIPLz<3wTU`0;+K2f_3$~K!LOKP?Mrdj7?5;g44goG-+W}&NA3S&o z5c7>kaarX@kK{LZvSGRjGmFq1GTJ3_bmuNJ!{xtQk=L<}z?P2bmb-y1-|Z>W)ru0J z!iK!yxs4ReaOH&TT(Je%u@a>C{TIpmyD7N{6B)@b{^15ZE0IC!@z_3(qW}|bD@s|5 zg9|1n*M8RCmwj@zp*G<}Vqw-UB!wQag{qd(KcyYnB23rpOsQyuV-i%qtaMi5Utq?j z!lrvD0f`UfAox7L!RRGI>KB9&#|YnFTCrBMV}nbOjmtvnL%V5j$&xA$;Ig0tjW_@x zP50ihDV9(md=V`vxPQrptVoo3obuU%>3zYAU5g-20(%%Ec0eHH`Dy03g%R3<*A!M zx;Q?(nx&z40=AAczN#v*#!O<@RneU>Q9(`?x&`wGxB+`?88z>hEwj{bM@eBubkVTOV!wY9^DH6@$8 zXE>%)902$WEtXpgh>rq+I#ueG&hYhnY>Day#*jRrC+xA~6l-_GPO`eik-NXh^m1I_ z&Vl}=(^0Ab3z(-3{R;RSHa&=YM8zP*itm>44;f8^IG1)Mv6*&5j&n~)g5%s{zHd@d zzQ}XU;2`U5cN}UfRY3vuTl{TIz%Ba^OpMev%ox+1TNDo%(C{4VR(1;C$W?thRb(x? z;Ocj{KE|DX{~ijzd;32-uH|JdJO!tO(sL9$2R!Z%nlN&k8g!}&ln(hal|3EW@Zr*2 z``^qp4i7s~i+g27!>V)=YyU(F&ywU<{q^pKumDmQI zvb63Z45q}qJlrZHx>YSbj)BR@Km6yS^%>zp?xrgTdCY(vJ2_(R+4yS(m--X1+fM@A)ce-Ofg)w{X7Ne+)U_LliOl4df(;@Yhue$g70Zd z5_m(xvpP;*=bd1x=#B1G5PcrsP}>JRr6aN(QuulGt6nv6cBP?@+jnS=6L0CXlDKCMYZeJ$*YF$JOG92zf z;A|d}aQ3 z=9NkIg^MZ=PiYf^K-ry8mb!mTWYx@*0%x362>Ne8f;5BhJo}oE@%oPUdsGJL=HvZXt zoSNJ8(`t8L20dU3qO$P>-dT^WGT(1H9!#GgM(7$YlV#29mI?^D{U%|1C_&f$spbFt zWG{4gC@{u!$pH-SF*vP&>zdVfY8b!iAl@Dcoypc(M(uRLON?Jt%V@r=bga>z%bGyB zdO#JiJbB(E+9bB$BfH6Z?I#@HrCdfLS5A`fWU%*Lo_PwKAA6Nnx&P=`fc3(u{%`R8 z4j)WRoh40(Jv)QP}V_c6%<2{&wx5SqrzxzU|zq=W+t zjsd(JnaOVxXjU!sRC^Iu=QEd4E`Wuc^jb>3{djeEl1f=%9m3$5oQY4}wBn}f3fK}W z6$+1V*9)Ng6P}ZcH4k|1Uy$Arr+ZUSD_t zB#QlIJ<>e`?3?0rg2L}yBRB|EAe%k3wa@#8zWqS(OVG9d{;)S3HclO5gy@^SFTvS( zy}@J(o*x+^${t%Pd|YV0TVoZAStC_o^9Os7bNawB2+a0o4zc-u<04^yclJx?}zUXWmPd#|gY)3^_@l%@?gyO9yhrt`o?{%5Gzl^L#MI9M|Gk z?Jetpw>&J{n2G#8Q4;bX2inGXOzY3S`Ff@bSGoC!=ZHU2DW^tTA=< z3LLh^#l<&k{<@%LLyzk4*;PvgS%ctYHKe}B0%m-= zGG77tJ`U*GC(;r?-fMpz{;$LXDGRfzucnCpTm{?AAF`-kngicgVuRmO(0x^gXTH0b z7=0S++JTPMtphBl2kbX^ntTd*FV>VqI~#|%7hVA=n-K1Zz6l~D(AD!`mAl-F2ToDu zHf}As9fjzLTF!-+oD;PzIW5c z_}YPtP3+yEX88!yz0r>l3At=e=S0S|H0ki~D+`0A{%}@#-h$N8+zLo(iR>BYBM>GK zJl(3;ER|JyeXO5A13JX}WE1k1-ste5((kj8rohe2c1F88kXcmPQ;_n}97!UMGXIC4 z{{U(4>7lbl;#gHV)@vf`Jh*LxMKb#?GH4}CDZC9`R|s8Gj&M+pgzKh9hridjKd~{7 z@oX+wC5>3iRDcsh2XzST{+T>VEHD%>Wr*%j~&sDO>M7bLF#;_vColZQz5rVKWhMr`>p%e^J zHi^1Z_6!s-hLQwrYpF@Avv6oCLc6~}MdX9|0vJh{oE9L)#NH{Y{cvofJ$yI8Hd&o%B5w$mjK@s7+fd~DtX5e2$D+rl?X1rq9vb0+tkGJO6-OCY zA`NpjTByCWp?Hy5z6RmVg&Y4B+<4xb1Tnhzo;0`-M4_lGBvC^aCd)J(Zz$SB)qB~ z$mF2!nl;Gx$*|bhbezXTM@7W2EvM_Ikbjf3U&-QtC|X0c9)hcG z$yV00yHAZfp#7YMAYr)%YO2~!p9rm0av|m$%&;&k&YyDn=s2t1k5J(1OQ02Fe4>!v zhTXIaHp1kvmU9yeGvof|+svyD6Mpl$Bk6C^ziN5&u|ASFb=?+LB})*P`}DN~s|5Al z6JYI|wGRodq9wUhVaVH=Q+7k?fyYZqR%fQoWIgxIVWK^LMr88K^iTx+=ElVfDKn=K_gVv zA+SBf#Ps(=b|ws^#P9|Bt%rZ_YLryL(JijmkyP0BNr~X6899}xPGo{58%n0Tx zxVK6)wVap5dI`2?70A>AXZHN=OU#Je?M=;7eG<}q|8ucq(OvArR<8#P{3^u70>ka_TRS9>A#qMza*4DI0__gVMT^?K%_Z+zrdNq=Qa@0O0pns zu(1Jz{$D%_Rt{-j^IX_(ySuaTmZ=RVYDMpB2!qKEe+Hc+Lq2X68bQvu%$eI)_Nr^ih@uai9WD^7*?U~Dm-J2wz5FI(;fZq58Zl=4_q!; zeYCcHo4v_mhCo`s!z3$C4?&cbfm;NK;b#5u%{ZDL-bO>uHMhj#e;+OK;C`f~p_@$N zR#i%5XhBhdxbdU6-q~Aw!SJa7;{R|I)&5HL#=7VKQY9}@!iu)+o>8s@KI#c{cvyd1 zQ(8J)HgzgiM+6z$@F%|H=h~Ok3&hjkRQ=1-4=8ISbDhm&x}ECQ!J22awd!emjWCh$ zPh)%k^xtgSe&rEPJ1uP|7yHnVIebkN^&;mUk zcri6@+gxihAj(5=LaACTfqO$ul!!ME?)n&Ho|3{m{sr ze)C$=oh@+zycS(6Zv6DE_^~Y-s$U|caZw)?xYqa+ZqoS2kN?vrf8G2!Nx&LiF%MZL zd*Ux{>5vpI&X_sRzkJwsQ>^CSxQOJ16v1}ej8;qbsw4H9VSqC(+`B-v*YERxbB_D5 z?tum7+Rkf*U}H`9E`Cb9m+H2Xh!8w(u#Ut5zldO1lK9|40+)DrKy7d0qmTbf^^A-J zHz|NO`5XDZ4iJgP3;=srO=-m`^+l6Fz{{HL# z@CgIb@>!k{eQIkXLyn=MusA^;^&cPFIz7op2Fk9gf3nTfp((CP9k1!%JQMj2>LghD zO2X=A+s`cn*+h*tN%rfsa>8IqYl-4wy-faPnZ{3OxF@x6gp+35(%+_4!FFRgHhrI$ zY>ruE`ZC9rN_RxIEzc5mIUYVFdsSJvHc3z*@#2o*`2&vp$4|n6$De+?vo^=DI3~>* zVC~#lQ4f_(p%h()KKtL&0l=b}<8QA!YCCNc-5X{=fQJljD_G>5Sb@}XW`D)axewHoP3li_hO||r&?otYT_Yc}yw*+3;MsQo=uyD{+ z-kg7xJR5+!*g@*_sVTAMiVF4o-#M4pU(C*1w7CA+VxtxbB^=dQw)tck62J*09VQ@x zaTHFo5H8Y^V@cWAu_!#%N+($qpZ@b8do4HMn+bV8ZB48`RxY=Exy*{ZM(gh{G%Y6> zvtRP@wJ?os(LH=2t5|LE86nrv%HH~$30ux*OIBaJD($ans-C;3$xu$2EPvYYo?P4{NX z(>q9h^)Ih6J4lyu=39fZRPx?d_%MN!%=~x_$p(PM`OKGdoF`xZ5pxE~*QbG$B67rn z2oH2GMCK`dO;?0T!}Qc%cDn3tcb?+pJ+pw>Dm}7fk+6erj2V?}9Wk7InOt|4lxdB* zK*iqsXltWjr6bhicfO^AMfAG`(AcdrDko@YfF0XPp`Qu6k;Jx z$470!Cg7(=RFAv4;$MDa8ohA#1}s1>F@uO&gp6#Uc_xR-+qsV>2|*#9@ro(lkE~*z znCvVzpDs!>zpF{Pl!UDrOTwZ>u|Wl#pc3n2TdYj7rGddwqg-D97}o;U{QN`?N0a*Y zBWvo(LzmKvqq<9+S9FbyPU^eqeLYJgc; zs4y&0d}0graUYgQ5>|pvlj7Aum&yt!qr19WZE&^}w*oGGzW(^fryEj8(f z(~I=MCW&zF28@NI&~}AmqfZ5SJ|3=S1AORn2jvU7x5=Gk?ypAW>#G;Kx>QV`$u?j5 z^LBs><3^pIwZSrU-8y}CGRlVu^$!srX+F$j12 zw0Cq1+m@V`Wfn-YzAbICH+`YP*LH?4L>epbjp+zyr~uOStK-!Cqd*!}Q!QtLKjJK; zFl^Mgxj`@Ppx^qN$qWwLAL${k^xI(uVvP9~vHjLNS3zVaJFztXpgcojz)33)T-}+@ zB^E+f$5J_9%r5+dl0N zvSOt6>0$2Jc6&R|2O%=vhd&2C_|*0pFy4m(X=T%q^p>~@jvpL1E%)hR4*P`sg6D*> z=s(A&kM;0>@Ef64xBM&LL6`-jq=w+Oafc5X5PCc6CwhAw0Fc(TJOZkzq6%;Pcyc@d zQohUBt>Z}z@DkACr+#JyeP-S5hSl%UU~MlP`lP&Pff?N4Q7C7b?zkhPe_=u-Z-4;& z@TzpcC=erpX{HYsOmAC173@~@1Z|3kw@wSV*e8T<-NmsDN(4E(9JA|`1tIA!;f9n3 zY(tsjNE#kThH-=1-o#n;&K@JefYgl`>O6_qljGY^pJTN1yM7V1Ebrp%=`ZH-_RoclaJjyGAc$t_J>=@f{rZ>RD#K3P@vO?y9cw} z;r1wT)24@eT3>q%R`5B4-;M?GVM95wdYOIwS{U%`7eXj9yyY|g_uz%i=K-hbDF53 zhkHv%g&04a!54dmsD1 zzdX^x`5egd*xwZ~6i7IY_2t6eI_<_Cny^u9y0=L%vPk$8X+i&t@sC@B%8&wJmY-@n zz_mbZHasaaij7Vc3-v^6xA@C0a>Bv#Et>^I- zBgWvNK`FR#m^QMnIWYDf(czs8cL6^t>NXM&Ic~8Qf7>H)!6!jjetKp_9ENtE7xIxr z%l4BP&x89&9(Ts8SN2wwyo|f|23kp^o2~kdcg0Hgi86n}x_VS`mLIhK}uZo!p*&VSz`wX*zc=}!rq{Pe+XAV>+ zf_{u_XYDMRCA%!Nta)#9=El3s;}S9;^%EGdjV!MP(NnUf%)S812AlLw4{zpHL0njV z5lMc*UBgDm-~vB-B=Y3zo>=I>AgQspM;P)EtL{EM$7l-0ZB8%ii>LgM!aE?2`S$Ls z+=ue~_|3JH7Reo4^o-ks^=dF4SNgq95qyd8OJ^C@?49vvG1Nw9R~MaoW15BYoDtp^ z>IuQup}m15L6}%xk~tCsdCPTN16$0ZT?7ELVbw-T9-7UPPh0U!?vImdsOyLcc(Z#k zlcWj?eVcQqs`e*dw+1}E3}I{U=!s%L`St}r12tn)KRA5vEAE}W;UjLECft=UkX7W) zoWzkQ>DyxjJPoPY;dnO3;czI~cmeC?#gusfLqX3W4c!H&NEIJKYwv?*N|^Bnfi0Tz z?CRZ{DhFbVd@7G^e*&!|=u+4q3Q?#{k8DEL|Cig6f;D)3@4!NYW~h~c#g2dB=d!Is z2>t@DkBUH$48|bOpty(n3ZV3qN@6c9hf)WnKOUoEy3>igpD!fufP`_Ulca?878i9X z71^~t4vX(PJhJ76tPLp~H09l2jXAmK9tMl?skLd(V_dR=QMFL|?QD6ZPr=AF@2lK~ zZ%oyP)1(p)|1iWQ97fjU3=27CkqCVtpHuCEZvPE@viM;+7K~+}y@c?W@>p4Wy`K3> zU@#|NDO`wc^vd46&)>wUqtys7TPg#>8zmAJ=&`#+ zAs1onPohGNcI1hh2s$jfK*G*Hp5{>?&iK~q|J}IIBFwd3Kun%9oFNF_H-vF1Wl+3F zFZxUuBg2F~2+A8fR<&!HB5|*drX}kP%p3|lX!8!+oD1-W^urLI*x9~{39-LM@XGL( z2}{2?xM)d z=JZWNu1f@dD-)fTi~(zNFysjDl}33*xd@I_(+4|`fc8>g`{wcvneL7pY$^#1NIXSp zM@R{M>;Y?>1k&=-9S|l2{i>THSJc222Vi?6J@ydcab*9Ou3h|r3${k`oE1h6{z|(c z*70vURA!Het5~KR&IHbQ{}*XU9v7TK5E)VI9|cpt5TAMW|L!xpu|!wA!(g`~xb17J zQ}|h6@JM6!gUkWXFuHIYE+mUEILFlQU~h6n{<=S=&j$lS=H9~7YQu;Za*1e^9-3+= z!#Nzr2Md#OWI}J7I2<1!ivwa8brY$WC3)z9*#yF;2#+hN0fdu>ebTjUFN7$Q9wgeP z&;F0LZmVdp4T7mhpCQ+cfV1x0sa3Q3EEmx6wY|%q@tX^ZeiKVafqoNLo5J> z?`dhXprAu`QoJw>@*l<~G6`11D<-(3#oi<-Ic@6*)%M^YQPu3K94x@!%pFpOyBHYq zR#M{V_dsW!6R!6{ULw12(T{m=7Z5phf5Oxr4!ar8fzQH?D@3i*dv=>5EXtYr6 z6eTkFZ9`(I_lOJ~k4st*g$gR@RTh&;f_eNL^vI83pO8_Mk&DnFHB-T;fePXk9&{lM zV|Ls#2a^13ucX@f1QmpnG7>M9?+baL21pigw|o@+)9`8q z5|agOlj~c`)dJG1q4jJlO}YJ!zvOXX-A*16BX4y=j*KC>;J@VJkRPCSO7NFCJ-&%= z=%@=~2D^2ii2k!cj7!?SSvNLDYR5#867JYL`ao0I>8l)9+G!yp^}&^s)STti2R`J) z=8VeeRSW{#(OBO}qd!w@usD!Vee~Ca>QAJT#0CZD_I*&alKW9O6NpT4YQHi00vO}+ zr3^X&Ij!F2(S)dd9%qPY-4DPcP96WgciZ6(1-rQB4v3W%b!>MJaHJ8Bb-UPfyHg*s z1`m)^EMHvkccIQxIPV*@GQrs=n?CZR5Ha{haIXlPY!X6N?a1OK$Vrrv{NKG0HV4^T zSB^S)t!SdpfhT|-q^g-jf_G!29ktV+7iLugUey{jozO!ppbe8OsCN>n=o$Ix@}K#7`dmnMpO;qYIi00kD_>F>D2L{4Ri?*yUYYh;F{EF>L+js%K&nS z$5%Wy!u;+W!a>}>Y-D{Lc8R=nrxQPzDG{WjVAe;Dt>&u8OZduRFd`~yrMqI2Lj1P7 zdM(dh`&QQ%D(|bKu_UTA`I^QWCy+}^HIaQ``tvf@-C(L3!geh(^XGdz_E$z68CosU zT#K5#U}k5WC>1;xM?<8F4l*NK=b*-4yAAxLXvhuc(hYSibN1sFZfnryPtyc+C}a}|R8X{FsQ2Yd}~ zjGj}MX%CwWJHQUJAFtZg7>3M(j~h?4ZM%LP4>VPH53%HfZG$5Q8`QVFBRueK_xJF_ z>(w%<0&)=}5-(>lE2II}Q+b4?rT#HKD4x?=aM0ZSvGiTI2yr?w!%n+_EkPkOr)Rcr ze=yE00<$6sMxgigl654da4AEWdu#v1TpaGglZw`!A! zF%=VvJ#0r?YXXuI@8P$VWZRly0aC_Mc_E>EHyUgteS1mbjfnzy8h}Hx2PGItBPuMTrfj6G~k%{R_t!9ZFg7RJXGMtob zqLAXp$KZ-x}^&ThN_ zz4gfc_#30Q`%o8Pa9p&QB{gE)DZ?~wz4kCpiDP+E(LRErZn@39}pw=SeQ9ZETV8(r&^#mlN)vG-ZgD+D`RuKa%f062CsUu!_Nyw10!^?wNcx23p(BjZ2$m zp30)v@5FrblROJNV%kecqDg)_6QlMo;i%pZj%QMt21wMFTwuhsr_K1VI2_nUThzqX z3{cbvNY2>zX?McX%CuQSS4RVqEvwEkYh#;5Z~VGp_W^$9ti)}#`z6AiRgr1W1%5Q$ z)D(jec8t*EdiSK|COqP4N`!iH%6N! zh%?1DFj`oC7+?rtyk28V`!j* zqSCe$&YBeVEN~dxBAiaUyEJ?PFhxd%B^Nj5SdYsY0~iToL?FPj;JzJy3!VYFdiRL* zMa5kymzIGlx629#&HPN7o#MIr0xh6~EJkFg9s3{B$tMhN5mERZKqU${TOLf@Dh%8* zv-PkIo_R(1TlL6@Tu9Qvwi1&z+}|CE4B-ia9+$&rh+hD|-TSoZ13eb^W!Rx9&l`_} z;K4IWR93t0ijV>6MClvt+m#F`C;htVt{bM66o&`DXljhweK7V<4T=(@L5R6H!0*m3gD(($eFU4zF&%jB>R0b%w&TV~gQU`HdDFm|+ZVq}bO8j){V zHckxSh3eO=$)iW(pH^JRn&CGu1?AX|N4HPJZ>a^?H8%HukYrQP{?0Yx5AMehdA1Z2 z=k8_@0?muXiI``!r`cB03UsjrfR#p{9_cP5-VeeR1i4-CY?5hv$IwuRVG+4@xDsT) zoe{P%K%oV5!)L+KgC(yyXIIIZu((J{Au822)-bOhf&%|Gq8X=z(#U*G2Sr0l$V>sX zv_Z5FW0>7Pz;4{FB~`sE7tu@bvJYc2HGu8)xS4qnvwS#}a^P=D;DYbnA2USry)i5i zG;VTob9#)NmZoNoZ{tYIG__gh7D)Z3k0lRp(_ntiy*oWrG#GX1)R~qbMLBv!{g^#} z3I6mb#&8k-Gs$og&IFA9V(p4$lDMmwaulu=0IZQ_e0@*7FTt+AB8?4cBW=3yv_}tf zC@|z3tf@V7;Csx>q=5~KcqL$lNuZ*l4sM?>d3G6&#l=*JD?VVM`plOu$7$ z^ty(ldz0}2SXf;--j3;PE%^ckjKl+L1fj3$C)dShi@&ky4T58hy6FhVrSx+eC*_~4 zI&!zzb<2ro$R%mY7yf7iKa3<{%uXT4+;=qNA7rHD5fn~O5gg=42pwlAG7^l5=Q50& z62Cxwz|jYotm=#VJ20yUH*mx9UH`0f{hu})p}MI&GXD_m8jw%RyZVnX+j`{)qBAY_ zTD=xu&q!dK_ZHGho`VN6%Czm}b1gxc8${PfVws_Xz5*vkboqGTaWnT1XYb+|z@2{_ zPTrniZ8%b137w_KT{HtNS~F{|VLKz21^17%K(Wt`yZV5roOE381O%EHsmVr|@%ewy}Ko${dScEr|T91GViMMEL!Rc}jQ{qR#z+dPFBQm|v1>E_~&Nwu#20fhY zaq`N~9cvxCuJ%V{Ns16)LZ7Xy#r^`Cp-CE`sVJp#Dxclm3Mqg!J;)LihmpzSf+6ze zh$D5Ntp-vig-cqA=8k#94U_bSy9l4T)d6kWFaD3XR~P0KTO7CBgNVVcfJpGvht6r& z({q6PYn9q;o`!w8@vD{^GFIy~`0Se;jMw-W37l z@=Top=Tv=2EHqZ^)uB2gUTBSiWB<-Bgves~U$t^q;)fDl{K>wnL+WdYAz=nYIyQ1#x#ckaZuxixy|J6FO z5#2zC%7$OlZ4%3xcjcTA68*$wcfKJrVn@a?4u_9c6sVY@tus7J;Vn;_l(p0hibF^H z{7w*!Xp@bV&xkeA5yDG@{^-oVZL*c-;Wm!w4eT3meanvYHNh9<-zjb--ksq#AaTwx z4+&ct%W_ebj}w_gohi>dvW)xQv!~^*KX$!&etvN=A~BJ(AHS_FREK3W7|_Pf4iFRA2(++<7B*As}-$*%=T`9RR^uBOB) zu=*Jy*42L7#Q=qSXTwDhtTQq*IB4*`Ho5at=gg<{v5ZMK zyS2w01D@ZF$5WfdIh!4Mb69ex>{;6lJ8~C0{qZ@5Pd0Q8VPOdnFSR!3`7N{?)d(wY zpe5}5@OhTp>8QRMSb-^4D%Zn^JyOSlGYbPYy_@_^f^#v`p+FBDng zGSnZ9FgKOeH67cli)ao9$RJv}a;r^M1}4luv-fC=rnB`NB@587FU3_GUNX< z8w>Kz*L)B-6!XPE^m(^xE}~Z78zqH2bvE>Ot#dd0(1Y_PSWrSc`ezBP2KZx-_ePDf zL`_jEB;9M6LI3Fi8Jr86^Ybo`=dJdC6Cbnb`5}Nt9j1P2;liFb{T@B8ui7#-e+U2O z)%AXQUhUB(0;}JJ^*pO%9t)k>tm|_TyrS$~)Ye7%vh;n0HFoY#z`VlbYnD)f%*nVq0!@7rpQI=I#^ zV`U_!eq(i#7hSQr-nAJxSfhIB+;+_`$=>YFj1 z@Az1-Kd~tLb#89h>HMzX-RR>D!(+*yCAYdbdLC{+Qjj0oOT5%VXMme=Sp)pCfzt*x z#^yxruuJ0Q&Hl)~uwd{b&cEH4+S=l%{QUe!*>>yAtitK-FGMRI>$PWG9#1Uocjd}o z8(?i{2)0#gu&Tb%H>V&p^uf)#KjkVcJ!gX;?@FR}g0AT;hhF&=l?49*-+Yty_c(V|b>biv9#5L4CjlFuJ0o2ur38l(}ai>LS(j< z4Q016L=Z-vzf^o|G}ye?3B5APp4Q%ckG;cZqnC9R4c&n z?n<4T?XM=KI2Px9wdjc8prFN^uMJW`GoEwF!Rr@u7jq}?$*S5^#SR9J4B}kl9=a(Q z%Dvd0OvbppTw}+hrJKq&{n?S)PyFV9r}1J3ub6Ig9zFL|!GUAxxdY7l!lhJ;Gbzm~ zR$;trJp}0H{0)$^P5<#xh?)D{|r;}SL_dFb@~_3@O| zw|MI7eQvxw$Btf%J%tP^+tG;O=#m%wOO9>=+P^N{8Nw6^-nx_7VF$pK7P8WeRN9fN%FI{ zG~@mcg7?bFb?qjMg74Yb*hr}?mhx^~RyM8^K>xsF)ya6?tZa5K!Dd`ncYgq@4O$fK z%oP7LV{ksf&ISga9tJ8<=aEM?yD&fA(P$uGY#G4^(MmBp-qR6eQfJ|_ul^{nX<{fP{&!501e zb=}!wA6`3KXUPhW;I;m6Dcj`=Rq+IiD&01P=aadM;xrpmS;sfucgw8fEH*%2)Rna% z-mVvU^jtl-=$}jpLa>E`in^Vke&~V&1kndwMoJxyeND}UrbhKn&;zHhO5fIqW44lw z0*j++B6sE|ldcxq-z!20eIBZ*zl@Oit(o<$x<~*i6Sg;X83A)6oiHd42aLC2KrIq2 zc4o*vlR-kzc8k4j0{`1EdPHuErJP|jXnEotcEW3>>jI7j!A_?Bmj ztHtbK*s#G|`w)-W&nVlKhTx{GN-_48)m7bD?p`9dr>xH~I?73hy^(pFo zGnP8cx&rYzW2b%J>jtflX+_vAdrPFO>$%ieHOj~~a*X|Q)BKzQ+c;so)Kk?QY`zKR z_<~N|M8;&^YJ=_Sc=NJTTQfj->#N&E*CYYyFS}2 zKhH6pBB0>&4j2(BMoLT&4!ug?!XrF6_i?<@|DKP{Vl|Kgj8TPtE}3m zJgxg8oP4+0Yk8JjEb~F~x9aA+h19v9vOy_*dBfI5Q{NA>SCaJjB7Okfq^KWfVwt(I z@;(*+TK0IFV%BJU_5))M?Q7yZ%l*b1%R~1lha>ZOUtJ(N;d^5OF1;?~Dy6Zzy)ww2 z@5?hEWhYvP){LIcYR+YO@A}+FMgR~^*<7NV%~ftNVZo+B!)wGH*5MT7%Epg4CNlcM z1DRNs$|Y^U>kIq%+++bT6xaEV=8$-Me?|O`2`O;?yeh*V`iM<+TDEjeUnd z9Wqz*pj_maPf8~ZvvXY2baFZ=N@wK;9>7{H3bsx~{qXY^-oQ?Yj#KdxY5 zA-dx1HeCOJ*U$d4?bPc@cI4r;6YJ6Z^Y?aJM#Oh)T`{=>vQRBY+HF^#X?NyWc!WE-GC7`l zG3+VT1*&)~Q1VN|&w_%@--ca_d1hA3r!E`vp8!|D`{Zuh#-iP(7{^>~iOu@PV!p(r zWpj3YDbK=mYfhcbP{$-s{-gIRmr5FCRlN|iA>1s0GgPxNR-<4#;UrlM9|t1gY5n?Q z7w=aim#$yhLIa>fq6wCb<@cITQLR5{)e5%JObPa4>IwoUERmM8*w(f2(D_nr(X>6Q z^=TwH(aWIEA{KZVVarPNl4IdFIlGcwod9`QpR1KcMe0|)h8nw#+18alDkBJsjq1a^ zRXET=8yg!}uNoT`av5&6rcMgetaTqYvwnuS7Ru``*Ojc6dK@DV>m(f9l{OylFv_zr z?G|BFE#LT}=D>ji=*f)WSu<&#}6P8Sawy!d`_Qk1>Z{Bhfpn}a@& z>jimcdK^a2aLu+4Q3s|06zJ0>DsZ9|UzdYC^#fWuEK;J_r&>iEL@9E)mL^$Xj$1RI z{M4-m%}B9c9`#d6HW_W2^R2Ny4}L2@?5i6ON1#bl+k-c1bkehN5T|Sy zptgCByrzq>4UB3e>>D%Q`C>5-<3wbYO-z`}k`Qpmp7ZD%8Dkb;WXfv8(YW+bkha8Q z!JEG#cx3P%-aA!eUcy#C;GLkKVY<0a;M2eQ&5=9SbYaD?$DTYKk2NPU%*^z2y0kRiw9k8zeS^ z(Uoj9dK~&nX5Fa6*VbDBsgEJMZ`r!uZro6X9$@Y2JRJcC%X^zk>2*m5oy9reoxKWb z5rK7#)+9rgC2T=A(V4Aa=5%XX`=}Q)grzpY{RzJ#<9=2@^rf!~;&+O)+SufpqIW?H zDG_0;);)d>tG26cUE!k%U2=Hp0$N{iPM?fJ=NpwE*QK*)wyD-SrFK!!y6-S=~>@7wAxkY2^CBVZ^Kl94^&lE zcJlC+I`pUzoX`@Y#Oy*6cM#?`7w(r#pYDvWS#`zWvhtg z+2D^reVHp@Y_;A>H5YEm412VWtM3T@S;JBDwB1Z=F9b?-R*Hbzyu zd`!D+79FgHs6$zh?sj6na5a56X2yd5aC;#k& z^R?&(gao1$-Vz&+3Yt|RZkR5To$!f}DGfq=IQOXhVfGHun0q;WRFjb=9Z@uZMjY#> zrdE!3u1;j@b8Ia2)qg)s(>}MHu{KlD;NP|ONm!~V-wtS;xMQgW z4y(yrv;*aHA#&w8w~gFfVMBWpj+KYqnBz|%FEbYjA9M&aWZ1G zl4Nlxky9d>@4?u$idt7FF_Q3x6yPOA)RO4TS67at1@hLz)Jt=7q4eBQWR*_t0ycQF zE2)6RTp{A3@-LUzbh61^AJ>rp zJ;s}59v4!I5ims$HRHSGki$rEZ~ED)J0VtB(^ex$II>{X4wTv%Zm`3#>lUYN2~M#d zD4wk8Tg>gtUwr>^t%|)X;9;zsFM@E4_0PbOsSsl<+X4Z!$Ozv-gd*O}ybOXrid3uk z#Ec&P8r=M19C)p>pah$pQ}e*fhC?{=+JlG2F3&ao#`K-e)|1ndCiKD9FX9fAFTJ&6 zU7hh}PcgD;(ESo4zLuHal7H}do2hJtMPFAtP(+{dV2x|c$2Hl5jO&>i{I2OzhZMsGe6kw=R>eapUO&Q)?U#dK(2gsAO-Nq$Q&>?p*?=X(Qwu?oI>BS z!M}l2Rw=RXqfzxu`eXP(SJ0o1QRW4xthP2o00k6QaD3iMw1aj+*2swQNK{0csATp; zbPP{0AmNv;6Taq#F+*oIRE8D1vMlbJI8SgC76!+xOBR~m{WbE2WFtJL!@pIYy`~vb zotYd9d(898g4ly!6swC|Z&qdX6*ZqJtDkt<u(I|*Jlm|`43iERmO_NXAa_N8+@ClCrkCaYO7EmpFN>~hh41tU;-$b zRJRkG(=?kON=iy7Y;VitoFd;;BjkGG;N*B#G8%>G@(Hw80U|W*wc{47PMqIR)fa@7&B$tQ&M(Dy^L3 zDeR3|9Vu*E@d_}(=Yg!L(O!`%QCR1Xt6<5FT~O~SGOQO)5K&zpk&RGVczecG$Xj_K zfivF8F7!sf`yDBtCb!|YMssz7=|(PT@epQ4h@YG~(y)Ync$msqvWhc+X7zVzQ_wpc zoeqd;(eG4GMR7AB>Cr4Cr*!Y0Q7V*SOzRb8YN6`4#AZ345Dw{WHHc{X>{!qCQ&(%MO0 zmD=p?SBt5!`7{Riodk$GcDR#e=O z%#yI~B|$@sdHJhJL>4-DbNLRsnALVVRP;wqg8rmylO=e34{@$((qoGOKp%4c5O*^# zI#bV_Zf(|DE3{@%a!xd;>yoT7wKG0{TnYI520Rsm%1~Ay(U=-2yOv6ToVH7LvCBS1 z6Ne>=1jrRMD!+9IAgQEZpTBDQ%6ztJ+B%?huFJQtrWP1+sX}8zn>Wtkl+%lNTZHU8 zmAN0<>yP8dZ>7pc4ByrWJsaD10l?O7^6e#`NK{8W7wQ^$paq>!GNABQjx>u0+`#d7 zjQnC#CjXp?KaMlHWXBF!L6ML^vi9-V^+KA?Kzv?nE4i~9_3XFlr%666bXrX!3}e#< zF}v+YJ!~Vo%KF&#wIY*V)-$WCs+TUjB^s~)`phz!sOMw;N=%_?ifph>O+?U&gYiO( zmPFRs3AcqYl2U{ecP3@h;l0nPQt5~GnP8nMJgah1F-=ciag+;a&nB#UB!bBtt}ZU2 zDAeZVfiy=47V-7A^pXQ@-HF)mtle!|qGYf=UKZRXLFRh@SE@qB!q=kHMnUaTee z4G+KPAAiudPUt1JPyr1#L(}wOft9JdFV(5^-z;(CM&fFee-ywQyP%lnVw!gePY?V# z0dvUX-XdYY$lFf{8I>{!HHEPVNe6G;Y8lvAUwuwqcH>=TvgYfT*mV1(2MpDtk7hQ; zG=Hla#v^)wt`AFbj=E_y=nl(&kMs4Ts`#ILCsyme3ceVsrjk#89w-*bQDyjIzl70U z2Mz4cYFuw4X$`)&5DJt2C@j48`E`rKu~j(=_UC#_C$tVqOU!$UeIJVu^{%c@){l$*PTy{P%hs#@Y9Vdy-4@hA~!W> zjpM6>xE-6>G&k0kQ{G?+5jviIX1V~Bwx}ERX3+dt(JfP(=^!Fu?ktm07Ok3~`M1*} zjZ8lmKSi54ZnU~Nk^}vPE1*;dJ_+ zzD#oz_h6b{%SH2=p(=x>?;Ge~{SY_y(?_GfpW?Rs)tx7s5LBOOq*bVOqwrU>Oxya& zk7m|0y^u>bB2Ej=mbGJaqTK zVSG;j|I0J?k|%#fjkl&MDx^Pu7B%?PJF_uL)hOGMfJWyBVeV(kbNs`M zY+4`ZuTDhEFr|+0TDE1G#H2i54~@R?@vQM==RLwXB3~;V>^|%1VseFj9)YLb8TC7@ zKVE)(XnbwoqeE|)Yj{>;vwuXg+rB>RD2KhEYHLw#T7CNTofI8@@uR&=IB({f!bHjC zlQrW!CR#0y4*hmGMrnEPV$X}GviHmDUe>ka45c5$8?>sljt$0&->)Iz5cxCeNe%Vc z3UAi*4&Eib&K&zU4h|e9QRAnth@QDw?8~Ng^}f7DrAe;3t$0lP7w788`(Lj<@+Wz{ z1cr{*tieaf0QDXC2S)ki4aywH?!v{c1o~1p8nY`;(oXLuCL~Ih8}fd4cwkKK(CwLC z+Wo}HA5>>9?=RxVkw+)Ly6+=c489a_{{s#_E@h>ImBrbB*^X-=@i>{v& z-B``kaTU5VB#m;d=eHUqc-PMYH(Z-V&0#F`Ak4C` z`_RyZfKr2MbW@!F`QMi*i7g_F=tR9)8i-GhRCy5IWj$KIi0;hIUWmP|KbR&|bj5DN zx}u$D-PqwM4Nv>bP2!AL8P`?abR{wQQs{J8J7*lT_|?S^magz}ym1>(A;%TC9=^9SO=Q^-s_!At9oOm*q!pi_aud-i$KQmd0-N;|x9V>|Af} zl3h&xoaCBbsfbhd<4%9J*`$td)(4ZZg>lZAQks0-TOSuEdEJeMsC?c3nX!one5qP8cV9jx-c_4<%S;14i}%=;x**a(V5`%2!=kCY0(reuHq z^wqI-`2)A4Kr`NX;*@cP^Z1m_2Qs2xj%M2xQ&*k4YV2K#?i1Ue=-R?zI@Zr)X8z)3 zcgRUL%^PIe!$aQZ-(Q|akyO`pgu^>QR=9#|2FgCYk8KgJM4z~EfbM0b*K~`=WJ|J^ zu>BFz_Qs@dn_wsE7!RxikQWaRORWnY=sAEg>13q+%t#W=wyd)}GvKY5{#?7`3)^>l zT@^?IHDWc)4W1QtOc+(SB7R$gEJ00nMe2twQ_OE)ps1gF&it@Oyh_X>?xQotEeMIo~R+%#Si1qqush zlCqp(Zs=%sGCF(m;jDsJ=}_^8-Ql+$l7S6+H%e)vsQCqIcsE~ZRQU-28q&Pc*Q0zI zo8eM+v{2`u{o>1L=YkV7uVNq@*ff579>0EmAmrBTC!JinmG^*)e^Tat+Cf{b6YPv3 zz0CJj;hYHR%P)oWdVbiq@(fh@3wGKpDvt+mP&CTgK9Jco{CM$TxsYb%lC(kR3wcP1 zo~7#*POA9!ZI8`f(z}XpWb>z5!G9-;6r5}QZg=`qVvK1Yzh0lhTY3Jzd&Z9sT{0VF zDCAO4`iWo6k!X4Ig#q!~W^DHhIJeY`ok>Z#jUs&&jf~ITx1uXbTuz>uAIY6;YZy_GB=*Ac#BroN|`kftO!&y1mX-~p7rvK z!b7d(+(i|t@0MKhVIp1{gOw+#57dvXlotQwP`j*EJth=)7$q6mJ5oh!ig&C7y~M?7 zG^wi|7+J@JT<`B5b>+-&xs~g>S&ojG=>_uyN#xgm|510zjwFvj2V-VXo?oUeWt~DbdRG^ALW%sg@ZRWx3~ta$HMixFAUxk=jqazD z$^$vm2PnR?owW&O&68tZo>i>zn$pk9GuSxj{rQe#mSa)so5jRPF8d>rQBwKm5^`po zpQ(FJ=5MMTA+_XuAl9o$4?5K5`hxp!I<#N3KqgQyf1gnpwM;6dD%0!|VxNrLJ74%SVvElE%7=n8)Y9(h_x zYB4AN@to-S2#HNaVrBiZIr}jI7SSNCOtP2#DIJeD>I!4Q-1_%Jo$H>}!&U9h4TF)7*4rBtTcOu+!euOkZ z>rD2~U=~4BEl&WOnaT6fOGv5am1nKe2|viNt$JE7%L>ewn)KG#SPlAW^^di%QQJNI zMuu&YyVU#clgXLOMXwGv#a8>ADY{C7ods=9Dl=JLE*Rapd_ zkEe-UGa0$Mk>ff14!idnN{L(c$V8#?LPhcG7pAe(i+6Jl6%-#liXuAWAew7A@7XIC zK}vBzGjZDZOC%v$#NBl4x1leGNpxH-ZxCH~5|;bEPRCnHsAT(uLv^7sAVz*a;(jLS-3u-55{lA>Rp zy3-ZRJIqv)1C?!l!zD_A#n0(16J0u%58CK9Mh|pK$n^52+#+q@u~})2U#<^J@M@O& zdiVX4$1`GFyXwoexn@KiEHE4s`NM2= zVLVK~i^nzmtyvqP1u3&a(S0kKui)P++)qms@q92nPQU@imf-pa(H#`LjfIi_^{y>F zH?f~cR)&U~N_yee6RKQ`n94he_zA3lmHFsu^X=@k+tS`Y`PqpTP)X$;r88W6THz7_ z_Qf*lH!r%(as22ds73h*e`dH*TvNcaz15biRnw}T?Ef|Tju)fMwP}}@BczO>7f108 zkMJ{UzJ81Ai}#_J{MKE-(H^c;oI;=j$LH?&XqRszC%~cmQK*gMNPFXj zPSk_k(-1T#U3Dyruvx{Y?ubl!eZ zF19^=OU_kRq>2C161Z25GIv7lUy&cKWZ;JleOM2;i$K{Ar}+}aH6jz;5x-RT-zC6=NRcA30_Ma5zlJuA7Z~1A->e@gyLlshW9BRfUcx?vq zb(>kr-M?EEQ=UCqu%7?ZfT!?encj-XmoiH({Z1vt@3^nrT~FPl134wrxalxVb?Z!7 z?Q_}7Loo??9|5sw7qTd4@xy-%j?Nq#H*{?U96LAOR?{o!#l?5c=jLc)Ue!;pV7zJz z%URD3@^MKS?cO4%w048M*KZ#ez5UQCPu2wS-16{ORO@ERN%C8_%1kL`^}fG4_#j+N zd%*Y`tl5IeVFllsri_UX$4;tKds&Endzs9e^x({yG{$cPT2GSY#CfYT3?GL^Acj zRHRpFrT7zQ!;QBAaTGzr598B z!{w^#>(5#lZocUMW}R!ZYO39m)Vutmw!m zX?z%OOP9}LR!mdkS)00}XjUffD%PTTrzv7%{jeNQjTe)*-tTXnGnqWe=?O;7=``1L zJg+{L?CQqiO43MlHdlIo*NvKaezZCHXj`1(|0C_Y!>R87|Irabod}6?tkkhewrsMp za*jR9Ob8hvo1{?4$R1@KA~SoGkQrrVWGj1S>-Rj}-JkD$f4;vze%JN8E}#2$yYO*d z@7L@3dOjcP+}8t^ACvjM7S0vf3qYgD+}6z;ew>u~%F)Z@OxZme0FI@84byjsq=Oc|q>GCl&`KFRsF(U4S0SdA<3t;tuRQu}6 zxGeVtQd1_UQhLOX61yR_T)|_KNQ}GaWZ)wc*Ha5bl8P;*;52vzBDE} zADK;eYC}$LiiU+DT#AST3K&rx!G5(Tf><^i*eu$`(Tf(X{cP@Qry{xZZn*DE$Cf@h zNjfK=ab*8}%_*V`W{1U3X)o9G_#59P2a4*x%S!1CAAkR7&t1TImX}D}VH5qEs!=4HeHHPbT`^IiDb? zm)nu<-!)KZ#;6nN81~3Xy+40sYOXgo=3=6#%Z*45^}E}Q{->*4MDzl9?HnWuRn)S! zjzo}m4&%&8h36+ipA}bfJ|^m15wahR3LrVr>PvVe`T$mjJ%m;-H*bzufckuBKbqqQ z<;T-Dy?ox5#>FE$2GO<}n@iXa$o`|9X4L1emw1NVuSXurX}6b}H6OR`PS3bEzv6vn zIY)dj*2McFHn;XA*7Adxhr$^g~=j3CcBm_MQ1# z3n0qV2y@Gukgzs(KV4Mnu{*2xVd}coR9Hxd0zS z5uLo8+-Zq2a#Q6b(J-?)Q22)TWQ5U&oO~065>vONyPe&oHtj}L8AN>SRCsT$j$UPF zqKJyb;kKDUtefIfTU|>AagWgJai)}C6C{0-QBH41>(BAN#5O#v%_n9-Usdd9C~{zZ zP?W^38jpb=wc9x}-?VFuZ-|lf@WSMR&g^X~1DzIn!m4_7Qb(G^a! zvdwE--*9cQBz<^%zA5`J3VS_73JD)?8+K^2tQVx$e@$p_=uP;7v zhAiCo3aps$WTW;^1hRaz>b^ZUbM*6tUDssHJ15ctO}ozL%=L`D2Sh)N+0%^Fae)>* zN6W3=)li2ah#nJ+K1}0Nj_0>7c`*KKUt#KN>AZjOF#NOne5&k(7@N_${8h)*33={u zf;(I zVy@SzIJ~9ze!1~zrOC**iRsc@s_uUn-d(Exf;vzG=X5rbI^Aw{;=?p>0n~{`M)TXN z%}k}Z2EcUr$8(d$>8TK6I9!3N7oPrLer;3Zy6CZWsG)+kuAM=lpyHH`Cvm-CUX_Q9 zCvL1T=I7|>+OH%2q&tN7_=#(UYK)_dYGMDr@_Z5dN+>~j(R>$;yveS!8PH=AULm}? z@*%*bIEQcL+CyVGxpMZ;y#O7Tkff?zkB&axzz=Lk)Y3?mfgzy7%79l;ro?g^Tm#jl zz4d&EuGRXCYNUA8WvACdnq)`!H$MNkQHm2_C^__dOMQGs&{eO1md7x=hD#}qFQfX< zLwRvr;@ok`y@g_Z!4gc)XcT_p50XPs6UDc?{^;qCc{H?3N9!hnuUXPa>Za=v6EsT6 zSL4Qrj~E-jyw~0Rve}kc?!nelcai2?$K#s|05HZ>d?n7158~YeB7*HI9^R-He-=ig zl}F9+MZ^+CrGd>}J(c*>_f(>tA>rA4f#spM>SqLPr>-H-RBG{A@*nfA#=2}g{FbT8 zQ0jZWW~AnUPFvAk!79gyi`xfVVKn0Q-)gzt*L0W1H3?^1PTEXI0%pefkR)GZfcRlw zj+TywMa%QohC3Cf(3qA_js0_ZxGPOCI@*4`9V)$mt78;?V>0S7&;9-?#!8z14Yl=n z6(D$RfQ5Mu&Iot+sa>DsJ^2WIyRFm#OUxR5)AC?EKh&s>B2 z{bF*}-uQ866xyppv6D@p94J z^K#$auuegz%WP|v-k|}SFBAv`_9eg2CVJ+bBj_JvMl1+5B;8Ybvs2+Lnut-y39oOk zhfCz1s8Q|(Dk}*uPYzFG80&c5(bXQO7w6PZZRGC~A(pL8lHUZQ{=BDH2B4LQns=?G8`xx$ENBGhZjht-hB0 zD2a`mx&CgOM_XL#A==NJw03LY`0?_^|O zA)r8Lq3uEsyQl4i+gd4ZD}zOwloU zNGe|8dMJJVR`54RCePVUBkmSJy4hnYzjHFJk9?*dnERJq#8 z8dR|n{u%Lf_gM@UK?Gp-fEi`Ri+&XU8k^f!WH%)Ej4oVxWdn#i#RmUtgwHrQ(1o3A z)10pWRQyUWYcKp3U}1G~+wO!p&oYZPZD3cz(#{fvXvE3Upn__o1(?-?=2@V>Cc5>- zrl2M>Da_H1O(iI_`SH|{y?qNbTW76AfA*`-!e>)qLZffWA8d`>Id?@t?Cdu!*X_Ap zo7;pA9$JeRiKbBm#aA%B(k^GSwClXRq-ofSbCN|~aPl#~Ip)NlVzN4n0t&2I+6-55 z4Sx=&nB0>6zuEIcJ+(qi`0FZcc3R(LGAm-X%Dt8Q;{2FW{oK5vL(0QF?$}c6&-zCx zPTzD4m@R>(XtJ;}u)wU?YlEu(Kx}EUBZ$BS6TZb>F79Eayv&ANN32L>$vix!PgOy3 z;@ll8ET&4@#b8~8cgCvuq%^_o%(iX~r4sR~C&}^^$JgWc`M58UYUEL$-fPs0{5LHd zE)_+Jx{xoQFiOjG`RoOSLba%}h$H~xs*%~+uj7Gy|IjlB;HZ8XGvcK?l_}~r`oT{| zeOeYVm3aP~L=iKJ326fpIT~i>fDCu1$u^r{gPyD|R6|1p-@Pf)wiT@`wP?+k*Qz8H zF75&EPXDz6A*se21su*DX%CO$w&6Vc0Fs96b7od_v9PPJL+$Hh6)@yI@c<*my=e=>vaxNw( z-6A6$PkKn1lQz;69`9t{wQbWS4=86Te(6|z;)AH8lZkL{xx&qZU6)9)`*+_rF5r06 zD_PO2*|`4iob6K`2|9iGrIv4+#V>UVc3oW$HACVMgx^EWNfn=@3X#e{O99KUP?lJL ze1*6bHhNw)o(-gMmpm;y-!1L--&8g-F#e0=R16%r)rPQCmD7C zJWuyza6|`U&C;lguBj(q73pbD7i)N^92NAD7QIpC`RiaSB3Vc*Qc8vunP7U>i{lgj(slWBmSe#wnX<1>Qco06z|r0PByE+&Dp_w&kDIIS)MI6pSHpFRbSmS;&n zR2+Uk;YI$Rj-*}$(DuY)Qx!Nf|N0=kO(d((>C`2e35|;1#Xhp{D#6b={Djq0{}Ow2 zUFaxQPmusJ_l5xzTr{W5V+Ds`()(O=LaY#Wpi;omO-j|x^vHtR%9|e}J(owtKSr=- zRjGr1!F+8>=_nIV!?WiisbT0;JlDi`(|7B7J-Q*{&hUJHfep(`n5&1>Y*xmk!D>UE z_4#LoQFr=>VM3dhglm$r;BHOQOHspJSKuUq3UI zUT%5n4Gsj7M>dj>jp0**@|S>RtX)!~6SQX>07(dhj+d-u{Bh!`pjZG#>u|$ehdU#Ro2&&AFsY+8CnSFJa^1?sUP# z&Tgaq^1`Qnamxo(inkG_vU!tAuZ|oRZgyTwTda}nBah%(T5V6qh<$Ur=6<;Et{N3k zXQOy?V`r9=lIYfk7Z?Sm-VxjMFgtCY^~v*-U{N5Tp+82;g|g~Qr2Kw|A{(a%Ad&PM z5gUW-H1Es8%`Twx(7a(#i@L$Ow=dd|7jeV3D6{3QKs}bj@eGdx*Dg zp9QvVk3>;h)Nq9ponDpe>%L6r8u&o&5^$oL>tXr(oE&sL;A|MnuH?lQ`Q=e2WngyB zmdc|{62~lF(d;}~R(L6XEmXnWg~y>vR+;$?J~K_K9~SBqogoW7C$2x&Uto3mv}vU5 zqvK>nkyh<5H2JJL^N2;h4z7&9pZ=_FRbH2T#i|UGt!96ETuV+rWvjGseo!F;rYdZB%!6@3Raj(yE8-T zhdyVi%j>_I)L$<&Yo4mvniG~sX1M-ooI*~JnxVoU<2sVW6!Zp<_KTxQEkMWywU#sc z&gMi2?;XiU+Xc$s15*qToIXG#eC_fZ1yC76Z>Ojdq@@IGVlEFJ*-gKJ{TQh(XP@mI zf-bitKh}CTn<$T}NBY%~LNyFmCADF8vg{Z=Me>JF04pX^2P+jmeY<|vQvCi*wyxLm zR3f9n9GTR2f?Ig{mP_(r=$d6-k3(3Y z)_3G2O@?z_=Dz4T@RG+MSA04CF3_gW0ZJ@XN-+b{_+JVyDI!)p_BG_d6N@l?K|o}J zB%szCDzScEV$(lrb|0H*=&YC^m^A(&nS;a4`f+Tm$7ho71H?K%iq%;^ognn_*aNQ^ zyH;lV(-?al!b|`gsm@fkMV#Q5=^MHAULpq1U23KP9H+eZ^DKqkNyE2*0e=iP3Y&6N%bL;KyCW8xbG^p1tw-?y z&P@eW<_FPH|MGBtg25QGEA9jZw7;lRb(?=(|8d!yO3K&p*Wm$cP5H`-!$qwoU6E_- zuO7D(l-Df{7RQQR_8onsEL8-gkoCZuh(6n{~H^pL-Iuq&XFpf0f^cmDc|@1+H~eys{{CzyAq%YW6A zXFtzZF#jdzx&QSo4_S6~FW(sjCdyN!GVMV(KZCBtnrV-d-ajkw$kPU4+j7u6-KchR zl(oTbGCx}4>ZsjcOFUn@`ZkX97f>AvU*8HzXno@76?V8;SH0P15MvFAC)djEj^Mlw z=W+|~U}M@Xy=MrdPXvm4IFbEGFY5e$-NA3n3d6;h@cCGsrXJm1q?9KsQp@zLdhd;f zs9xuZp=fsf3^7dg=LD_7ry~bERI<`h$Hx)LN-zD{k{`;LKf#HaH9rk05vJSH13tO- z6sI-@G?JV!Yn75-Sz+{^Xe4W$?cc3y$nGKa<3)%ey`C{iHkf5NU+w=;Y`HJqbg&HQ zb%w&TY|nfpe;*f z7jL`@kLHw&-T1MXQUxI-g022Z-;G&m18vq1w&r)a6q}t6kDnFQMF%(4r5NW2|Go`S zJn-l_c$SaP{^qan8|3eg)lfJntG2Qgj8ws9h(ya6SBrP@KZQvpI+ONpw58vNxwwE+ z#`mo8)j3LDPo(hVmoocrnHEdEL#va`836eWGrcx=tGzb0BXp%`6sJ>=)=>;?44-HD z<=o`Ap`Se@;nIxlND$K7XC*-T`PQD{qyD)xqJUSf+4Mi>P`4&DAIp5Zvp!2^{3L!6 zFntiS-ip__7MmFN6uQ#PFv@pT~LqN{)6C(en)MTH- zJn>O%x!UH({f+M@UspaJ*M z3~fTr&BqTReds=D?_vStww3@DJLhz(UjHYMoC^KAVx?w-b=(kioM59Dx|_K7vP;5% zi(NVDO*?tCUN`~KPcYHlUe~-_Xn^#0=w|Bw2yiKXS|obVmlYLpy95l87xhbm)3QFd zD!RVM8}0R7d_`U6+}dl;J^oQy)t*u(C9(URb?a9OpG6U-z<6ZrhKc-JOwJ7uJIJ(! zZ^yMK2&Me2NJ{kq6G8Xa+?QC~I;EB|iSa{FNP+q|Rb;J_IjfjW>jEB@$d+7ry`O4KAi z*X8LYaPUpqWKS(Y5DOZskK*i03FI^$LkAQh!os?_rUkf|M}??24aHV3c8n>uJ^|%V zgSmu>Q@-?cE;5@h=PisON-)w>rc~f4$LRs@nCE(&-9Gtad9(f7!-o&)(KDfE#k65m z;wo1V2XJGIzJ#O7aWeDNMbqjN>n=>`55mwV<9MC+#~(4hy4fI8>wL@LPmi_22~)xT zljoiPqUwIrc2`fPg4~$oH$jJacXbJjI26 z1d8xE$iAx%SaL7%&Bam5oF#PyJt$X=2mO`J9jbN=Kvg>%pVhe!i!I1|{I^+bfbEZj zm!}N>zIybLYVuXRrqIhrH*<&*E}e+}BrhpxPQrz0*)F2w)pVLIdy+)UYs|*362s5c zrBfKqA&9%)iW9&Ol^VCmcvU-sY|1b>Jxe#MT9%0SevhfwgI%hzpT~LFt`ohbW`Dja zeuu67gHtctO<4l^VokDUoq%j6|Kq*ll{jURB|8{%Sn-0Z&a)s8eB9$@frn?^9DsS| z0}5{I28!bHw|<$82oWb*_xEpeWZ!a}krB%6IzIJ8nc-Q%nw({k8|p&|V9=<|abg zM^|QdQ4XzfS7OJlw4xPdOysw|*=QLVaj=;Ox5r&k0DPUDK`>9~I*paw`nlX>&hlhB zIg6z3xG@Kv*S)D+0tXyQ2I<_%_e?p%iS;(~Td< zyPnq9OLUMuLMTcp5MU($2y|#i(C16;Oi}NEO4V*pYM=0A@aWRFI$mk%CIXe_fcP^L z5{KYih*^T$ECT*lDaj=6QGz1>B)X15YVAKVNms$u&#(##m_ib>jgyF|6I0VXz-5U5=1G7dN$;Nu{&! ztxlc(5{z>4s)o=_(Q(Z;R1nM_iOmT-r)$5vL0{cTLq8I+X=qW%-KS|WK_Y9|7NL4R ztum%s=H9>x}ru`@B=+% z3bU3s4wFNXGE(WH0)zUp1I?Tz?ALcZ7oj_qwfMO(s2MLL_8gEB=#OQF9GB-eV*v@= zBKB7%<*XTmAIVTWX}0NP@3kVHoFp^#8kfEF`WLVc>jV$M+|U zkN7;$93M7JYZkcs7fhqlYMrWo4ma~-5ezF)IPAB}codYD>{zSiHbqyu!c#`;9Ng`< z#-u?gMm~mQfyuba(R{;-^v5@Wb(^D1*?UmsB6CXe&KJc+y`KPq63g7f@bVcrzh1?M zJHXUEsD198F+8;E)Dj@hHU9e6-abJp4`e?@j+56Eu2*AExlT>}V)&5Js#n{dU5x4xW0h-lSJ~)nh|3 zyb!c1Cv)f;L!eThcVX3S6tRY?l7UN6-pH^81C zTf%<2pziP#sWa*+VADB1WM{FtqV?}2+ahreCx1z_R=JfV{Ei$;~9v|qmS4Ns<{ z+adCTN!Ear;@(ut#VXf_t&lx-Yx^#Y+iTqleN%+Gb$ScStxu1WkPa|hy6FoYMTa2L ze&Ee5KC`Cl3T+C$>_prhV7v8rjSi<~knnurT2LT_NU&_bed0^yQ9@<2Mf&eyIqpsw398E z^Ze_TEyl-V*)`MUf(kG;17&u(KNA~l@lt>2$P_<3x%MmjQ=qb{NAcdyBhurh!sw8}hHFKQ)6^Bwt7xe}{8J6us5GaH+hme!LOw`ER| zt5XmO6;$|LP+i^ji1*eKuQBmlGGqGMRrIHMN*2ap@&r!WxgjacX8>)a*Ek2DWUyJZ zMje&J?TL5={OK7lZTP(*c}G{1B;lnD>Qy+M@3ufqaq-vrbUxFj&}$J((5!>rK6g7) zK7^JJ#8ClE!~HdfyYn%762Z1`*WSq)d(;VdWemty&+Lz#BVQJTF}~rTBI%NFW#4wU z!jTLtr&$I|zd2a=&a*0<$yK7^G1S!<|Kam%FXiEDUaLS_JC6`aVzH;*7t+%-ZI3<7 zzh)R3M3ZA=np9ZPciP|r2@4?=K&_|d5gR`p*Zwoj{P_f_3ul)s?_k1K3H6C|fOvcC z!_e1Ko3p$e_M_E&E_0{2Q?FxbnhIG@iug!ehq5@aWUA8*o2t4s&zX+bn;l6ZQV5MF zUP=P`!uBAFR;csOqb{1nPJMiCeHi{)f0~Y0c{3UBe61Rnh4;ux~9UWc-0 z9Y@#k0C~}BK@LCAu~1Q)CY7IRq%q)~G2aJkjor+>NDeId5!6b*tr-p<6v?52dz z1*m*-7n@#)IK_DGuQo@#I}#z>IAK|wt3Nr9%4X~-%`Hr)^vn+A0w;ZRx_pR!5D4JH zfgS!tb*aDZ7lB?$i@uSZOF=ClV|s-XPCvfYrA)O(x0wYwn^W;(aV3j4G{-Lh$Qmi) zWT_OvQsHX+W1#S5#sNTu#8wdi8Eb*Y!e07aAdm{g=|qVF8)c4@M?c10$r`^BT~0)a zpYf~l!r#?5*VsfaF?90pu4byIoL{asYwvq15kkis<9(24N|$IAQEG(UQIw3qt>qN- zt6;veUf67p`%Fuxn2BU$wB`Kd`yCX(+A3>PA4j~PSzw2Os`sb+_R4sSIm2yIX1!P* zBhUo0`lo7%X<@~RR4zoIdl~M5Mm2;J+j|<*C}Y8um5IAeKuFv9!}5Kpeyt~QLo$xS zuc7>@p>*C%u@ju&H@STe-C0h%eXBhn>R|d=su;l%2~xB7R4aRk-UoO!P5Ow>q{OgA zo#E9BR?Ygqp2|Ipep`I5leo%N*k^RkoTu%+@3RTPll$PdpP_30q76DI@#5@ zS7jNO*~U#vyyxm`_$Gnr=MVYX@L@l0-dl}bqmX(Y6t8+PrZ2hdAm>i;8p}LbdX1CV zKHS?ZX%+D(&P~=!m6Cq%bY|U+ zxt~nA24cII_Abqa^`Aoz1(?Zg!7da(AZwr9{#?LDa2na-Is|mG!h6}P#Wx$C=Y2xP zj!j4Hd`Mxn%DmRH!f@);)ZrKF7}KgkY{eui#0I>nwemEVX!#<1(`oL=`-Yai~J`qsi+2t$La~KKcNN; zu6V2CkCnv9V6wvF=L1nZFS6R(Jgt{L_Yq^vp12U*sS+O4J)eq8Ot0j16;(wPrCfEF z88pOJd>%$_Rp6)I=4Zd4>yFX&KaV90o^kSP%PN=%0IU{2eseqNSWIgZU%}Ca9TSM= z)H%t<_=#buo4FRO4Y7P}Wm==RAFa!j*-9Vv^U@t~S4tEvZ0G=MLk39lY5Re!dJeJ{ zu`@T=k4ZaRjJ*7UG8E1T4=AyW4J6T(EZtJi&xf65_gRTrsP8sE`1BnRx1-%*qwKg( z6L}9nYVHm+0|J|Jr!ViKz#Yun#QPonhk!0{70advKB0L%xX?ifm?FzykzUtaLj$D+ z;Dtdmxc0-_zE6aa_Hm&7ktezpNQcrHYBy_Svn#6^s~vE6a6%ql_pp+ zbzmOIUNASwWz>J??@jEOl+GRk-c)6Ry(HiTga4W0u)n4|Q?X502-}b#Ro7i^!WV;;&Ocae z`mUawlb=0<-C~1!7YiDFjmM|_O{(8&490L9l%l=VLQT(dL`@hc_<2?95QID^o(YA% zk?I%>K~$}3PD%gV?D)@-BzOv&M9@zQ<>#g*wMj(cr$QPoeN=?QlJ3RCs{))VFiRtY z9@w}j@yCzD7ikG?a0_iSg&UJ&RS~QTxAlS#EOzH4alZUnlVilUfSa5K7tdv95!Z!m z?PvviSNfbyJLE!8s!(x;YJyr^#${;B?!o+gfn@I1)!x%U13rgJEB20%78HY#&m?wR zG#?1Ub%`Q0A#IG(Z5H4->bO%RwwlHLy9KO>{LYbjj4d$Od}St-rXUp z4~15*v{k?oj%~4d*CCsdFRK*`k>mA%qm)rwzn-60jf@va;;>&4oB=k{9Be`QS1`_g zEseIU3Qr?J6rm^4yt6c4z$+>m0=YZ9kyosE0!}P2@r6E6ZW+T2n(uncoM~|D6W2(b&U>g4;aG6!kzz?-|p*|PlhgwpvhCeH+4zd z=$?yz@WNQbk@)SF1b`I{Wn^W!J7(JBxM|RsmA;Y5E!-3{3`XhBKYEHB#{CsLM9>pW zij*yCY{*Asjjwc@A2tlR+b^|i#-I}&Pcvs_q6k(mC-20+{=)@u7Q^nnFT7Q*S6i>CRyNbda&5}RTq(N^-)v#eYoe)NaDJ7KxY05R8P_=`&oWn{&Lc41{u1WlQU`wZppi_^m!` z5r6*+X`;7j69tM={{u>$P)~AyCEyg7Zp+wvo??4*!ObE2I%pqkGUOWMsd@jgD;-kYUvB8@!vn*vG?kh&h?Vra8M+xUU*dw1bb z;p}tMqye+8h$GMHK`T2|_v@hG)&$a7YT%?^i1JvK_jM-=<>AlClD{%6^e3C2^`l+0 zDQFZSlVG8=C@X6&&UJ!R-LrPv6wgZamOXwRI$68IblLQk_J^ZEQpyNSJf#&!Vj89| zQ`Lt4QoXJPruVpXIB5{S0S0kqU>5cU`OM8S&=j=ntlz3lU3p+Y8o_7Yg0!xic_QK( zw+4c7mIptxi}UA66h6%Y1Rj%O5M3{ir8(AnU*lnOC#v>qtycn`>nRIUR-C;9hmG<9h7P4C(6A2djUqLzTH z(E=L$#_5sq;XkO%wdhS7eH} zsF5*5-feA>^1LJO0Wzd@Wt^2!$K&3+*x!Z^DS1>`qiBAG8`a{a`Fr~nmKXaTOFsA* zEhkBcy=e$jx5bd){LY8iK~y$XLo~dV+loGPPZmqV9Q5TzwKYjbP!8Cbya(DQF@m-O zsI#KZC3a5RE6nbblzUQQItu2^c)pMCr0-~jB9U z$QtmM!E$zMpou3zED!@n+!d3gAW6{a3dlsv`fL~H5hfilj;@BP%BFI6u4n5`<-!m;Ey8eieEf_kgOw>wg!wadjWS5fuUP8w|VI zhz}u+1nzu^0?38Fw$^Qx(+{O*k8?$z|8ADwbUMkV+WR2h!rJYZ|D6=E z`$2b>#qm-OkkD>cHqT5UjK$s%{$Z%M@nJ09fl506-BL&qC6htm2xlO4+UH5LjVU}@ zugzPlA^lO|2tu91H!!J^7MWDp0{`0sjv9sh2y6l1=s~D+)#mY+2nPv!2VakugV!6R zR+Q2P!M5jsfc8GvOol!|2XqN9$@HO@tsV;GBGSyo*!-1-%lCPptU- zjy^h5_U%fqy)whP!!QV@ks~Ksvm_OjExQ_9Ps}?y>OO14{h~cVdg@5wv$jOcl=Kk5 zGA$5f;d$(+Y-utGDoBlgdR!sRMBWaPIIIk+4*R%_N8##Wmwym_LBeM}VG_3{K*8`H zmA~zj1fD~mpOKu8y@^OOWi^u89DI@i5K`S{q6xSy&hMO+p`5`h!J(GO2H6Kr_cblF zb+FW$VTE@`^cOu-`4Z{YW;$jyG0$4jiWI3hK_W{yeWe?7n`t^H18qDLXMuHR2Ij3) zwaM50$MeflEq*@vuP7uBoK?aVZ-U+ZXv8D*x`;msSVoTG;`LBcRC;5M#u{;N*KK|- zNEf-Snl{g#PE-i@9S3Q~R|nj}s_?Jr&ex+tCixlJWgj*H96r-N zC=!*->-42kmivZd?(YD=L%+Ryz?F@m$jr-}bk-TD!5bQ}hi6SXI4hB{=7dry~d!5@)3 zoeRe7*4Y#vu<2|eva_vdx3c?o0nnZTYrtD0?LRV}pydafa~DAeEa;h8S=ab&Rl>_- zXt#_Soih_?u2iNBz>5et~7U(8>a|Hhv7FiDxS2FxbW%TmhDb_Bux0K<*>7Fe_heNr&6fKDc#TPyQ=1f0z~TPvWVe0C3emX37Wxb#lO%e)>BH&Ro^tHN?{L0jE=f|B!!?gX0W zBT$w#MUM!Gw>oC-u#vvAAE^vm7_Aw~J%L!fKlep5ck1!_@aiN-$s7w$;bR>VK}v^F z0#1`o`s5}LK$-C@(OqT|gd|YfoADb25&%81!KiQwnyX)a>tlzO<-C!z0`3p#%Gj&-{7d16h+TF*XZo^mGX-s?3v zh@Sjq=5}t4)KrV>``mD0CyYB`@%)Jju)(&my?-Qa(|d2;TvAFLxNy3;rlG|8Gldb#O zWAq1Mvce1gP{#|4@ATrJ4}NR`1JJqL@$6mMOTM`D?CW^C0WUz-7GSNVM>jxbmKHU^ zYon@84fp1c`pn9B7zEz4;8!h6G__!bKG&u`g z=;H0Sx85K90rtPHNrn3)hHZd_Qe(`#Hoivy8(_3&f1RPx21z#P81`ew?aT=x9?ZLk zw`#Nd0`F)>2%Iku<`VhI>i3-)+21JIU99X8#zRytWYeGj`sZ5#upF|xubJz+T-jf& z_vx_o&R@c{9Csu~msVV7ck>&7cFDQB^i;<(5I!ZXu9g9(y*{NjJZ+Y6nR{i|D>E$i zvXXUasGY{e@JQ-CCiM1?MR3%VO`6?BH+cRX(&9b#0vP6iv0=kft#1ul@~p)urUMNu zNw3R>>&_Z`2#S#rp9#^_7wo5&D9nS8ikS3j6gTZkt}%yIjFDKM`>RG2N#>E?8wdq|l@f~KEM;5Zi!;%;+qVcJ=v%o+{aOsz!xfW6Rn3RR zmYmk6$N-jonBfKaJ*BN1Y8`hE)plha@NBKhXLvAZJ=yx?i&~Z$^PoDyPm^5V(%!M7 zjVf(hqdw|Cl#d2!qE2pdP7&~%D)5UaO{6IhtL4UxOeas?&G|w{>ne@b$+!K^kYv*+ zg^Xox@WsVPxO*X0O5u)C;d7W)4z$?G{zA_<{OMy%5#`_APc**!`JiO=f9ZZ;+1}3w z{A)Zl4T5jK{9YvOnGDdCbwuUf_{0JBR9wE(j~@1mI{DSNl<*K7$k7eR{b4|@h*3TK zKxYy~XY3~3R-1A`lSLY9@FJ_;1mTy?qL!tWG!1y5-;=QQ^YA$Rgr(si3l~xniaZ8T zM$y6H&9|9k4t3)SXn$r-Uk~tNg zCf@sX=#}vc^mZzcD98Qn;UfjbI6if`A@s>ka1gw@_5NB=E!>RpOmFJ$FDElfc~ARyiWxtyF)x<+^y8@Juel;Hwp{upFRC8kBXauk;%M*V z<;3VDx!AK3o{FPxlav7uf(7q<$)?X?UX;6pwnBz{DflL(p{HOWc7G`xAZ+^DcQqP7 z4~z2w_40BGppJ=qV5aIRvfx}?2iT_#`h0G?Sc zd^c$u9!E-DWr4-I`t>><|NhTh`umd!QNRW#c8@!H|8py^K7?gowcwciDGHTiXz%^t zZ%GY?^!kfS(s%mvX=vzLYyV`8?T+xC#T#IxWbjE7GK1E?3aF!3k24PNf>9}?#3@)6 zT$QEAI3X81X-K{FPA|St(L&t8jFxuN;|;|1v$KEUS(iLPva1`%Z+-98quYIXIe~$J zZh2?i=KHvw7H}r`E-8h_b=GdJ|t3Z!(;t zYTiE|c2!k)uw{#uK8S44ezP z=S^+TfT>OT-b}moHtjH^dr|AfE+`OPmKg&=Ztbg0K}GW}Kx-KqX>YyG8DP(n*ZJ@m zSAQ?&b%i@E7{86vJHyZY?&w}w7h~QC=v=ffNWu|1yf&8 zf!5IDYRAD_>Bz_5ZUyt7#piuc7c^v{_Tfr9hL8pZI~L&PV`%hft=G_)1c(H0y#87R zrT2*45^lAdxw8KSn-W!=kbVC7+J*x*bQt4Rw@<81fF~S7D0o!en|=Zq=GTkfWvqU> z%np!ow6pB<%jR?yuzz?gI%Vc1)qMMl%Ua+?o_WqB2zScL33yfe?<0&~_c6x)tp)hM zEG@K&&4g@%|JGee;=g(V`Qsg4U|7v!`unyIrb8gP;(=s^UHWZ0C+jfFqD7(4%Y67P z`EV#5`78_mx1)~ZpUUyO$FkTUzm3=k9|cn63c891TX!KOBd1yZ zr6?OtZf@~U9o(i2$t8Cq0w3d812f&gFmUH@ON+hTm{Q$w|RCWE|nn|BJR{ z8%u(sGfH1b{HHzn>zdpSfUh2=Dl(1Ye=rPL$MLV;?-%}D)Ls42A&H1&Q(^&QssJe) z3fB@Zkl=GPH5Q>n@Gb$Qx(0nu=bplohz9)e4DKq(6mLNA8V^`2k7d<d3Iv|pO0gL z9Jb9wn_APNOLn8^Uqm$ALDaJg|NJgRlobA|Pi2TD$P@=xB-!!ft_98z#?b@A58}-| zp39}LaePzi)dt(R$mWwv0r)=Nw@OZgiclAQ=Hlo))xA=AO$ ziEwC&cBTxlX(YYw8X1p37YWu-xCPy37$g4s_N3y1zm;mC&uRRhw`ZXgsw@6UkY=3; zEzhTXH1SF=-HuutDtExEohh(8?CXUtHKrJ3i(ta6|8-ak)D6%>c38IxYP=dWO>Voix zPv|9l%1@94u)5~ZInhcE77>30;)xB2M7W+j;Wm65*$l9g#mX2mMR)q+bA?LRMHc%y zBi&@5LlJzX6kxd5t#gW9;#-NKr!N~GqvPSasFnEz zp$6~RR0AQkGJIpQTu+ z=4+42`c3Hn*;hyhSaY&+?#V!CXwwx@I{AicgH|B|5&pdROl?JY>Qjx6pVvB@8Bhh& z;EUxK^z$84WFyGsl_B+QdZBE^4ypwY1f5x`>y_L>L(X{bV+gR@Fx-z97Yk06^`D*6ckk5H;r*a1J0O_t<2EWs9WBbQQ zSfY9|+FC%WHwAC~u_4;@C+o@0HWqX>dXP{4bN%}l$c01!O?Ut6aM?(qnZh)L(L4Fn z{jn_V{ZjwS9bhzc2pcgP8afr2v8O>C6yz%TYrhyo?p!u4NyU#lFeYOWAnF$1fU;#g|vK_i?p=-(IC)Jb1zr z=3}PXV!xHy!QEos7SkcoMfWZSs<=peKo&kcFZvgNKSAf0YjhNHgNsc++ym0}P4i_9 zM@~ow{>xZpZ-0BEE8J&XAk}T2^oH5C$r40(D>&P2}|K8?Kfnrp2uKm*C}V#{#nr{ z@V(%~aEyb@-=~(HDEkH;!btJjht^e}I1$e~?tv)g!wvU4Aa1?^d1B3nHDuZ_{EZdL zoWQm0L&4H|ssn9uh4(?_WJUHFW0=3sOY|@xl<)%=a32pgy#5%i`OtCO<>}>8!tfK52Du@=&LD!YJw#2Gb`+Oy?v=KX0&fpPvSJh4>mr3 zl`N*YprL`EWZGt7nurF=fS$yo(-y${_Eg^F%88MPj|1tyn|D4r)2{G`Sj zdMftFaVTt!Wu}w}MVR!#lJEe3Ta@JEj)~89JyL?xYnuf;k)HdHj97|qT>uUpJ#pO8 z=V%}D)Q?2~?$)3Pv>DjfSZp&6$MBjk1M~MB5Q5Q_5xAEu+E2eEy}(yJ!0{7b-1i>T z)Bmq4(;q(t`w!mXiTcN?qK@P1)p1*yt0_baO!QowJkx9o|?KNo`BV_#*0 zBPW|br7^WZIi7-X@9DmfO&=CzR)|Dnbb!Zy}ZwMZTvvD~h4!sNe#ww+JHp zL!N<2p%uU%TdzkClVagaGt>%&xIj|>1qg29D`~h20Op%!(0>0P+TH}3s`d*Uen~}A z%1{ZBN~TIFb8d5zONN9ZR6^!?b}NKJi3TpWC_~1OGKZ36DDymKp6A)O&*ctEzyJSR z>-*Mv*Lro%xzDiYy`TN;{S3}>3gLKh59H9e9fx{SwXH6}gy6+LQOg%H+B9`hNXKQ$?kmts-4Z;qUV1LiC<{ zo>Cz3EmK_g-^{sBgB&v!UWH>rz7*r839)l>ba1dL07nc%J#C;M6w)T_&Bl9_Wssa) zJ#(SJYiBrB(jGi8@%k^)d65FCpPTXys!H&!mG4N9Kf3-dE2MKBu^ZbCEAgzde1D=I z5i#{mT*ba*^nmBGRyssn6kRssB{Eo? zA&1C~AJ}w!bx4R5Iffg7%Rk+Qv(*WAIl928G}1&kudhauHU@5l?S>UAikQ zBpXEer%SdyTs9W688BeUUjQAmQFb?1rRZ|$AFe(yn4w6WC7q#Cg*K$^?}NneV@Z&& zV_*yQe0L|m01v@ODMn4XIApVzlCya@B&pTRe7-`T6Lj3M_Jb@GvbYJCJp%>__nYS7 zFq!SeamSax(?BbJhvDl(++AIzTk|d_JbCvS>_UNv=6jM>HBjtxuR&$>Vt!-Bf2x=L zCn+^0=nifHvZL+G9h1AAG{n9%mz2h?w$-)4RMDcRfFdZs~l5PC!|SShP) zZm^FVk7-KLzLi?C7_FzHY187WRrIpvXb0tO-;>ZJaKrCoJCJ@z_+F{-M=jEZd0;_Z zlJF3f0IeCn*rk{r)50FJYIsR(*!-Olx|u09)#HE5;f#U4M{QaV1S(tS&7I3?-)9=R z56&~~bD;~61{wglS!eVf43|7o6?q2fWc6`j zMy})5E!+sSnEak7e1x!Lqz#f9_aT)lx^g(Zb&2DS|K^faV3$tA+8+g0_7DO9@2Y1C~~u= zb%SNg!G+c*s%?h#Q`n(sT}gx5D)7-3hQzbXJKC$9dQ+VE9H8rTPinySjh?S?lbMsh zOv+@1E$6SM2%|6Fp@FNpqtSJ{oact)ExKvbSZpxaFpHvp$?!L>(rnbtt zzsgC#VKgwMWCL}PL}ZuCnoZQ!jnnRCC*coQDt)PO=Ndd@Mt+fu2gtrFuweDNJ# zC=YnoF17Ns!+@?XvODC9Uv`&sj7oF4xG^^Gze0zq4~C07yy4m|{VAa%apD8j z^qVAxvh_5BZQqxG<+N)cb2e7BBy+?+YnQe!_wmKtUB_7lpa?i*WwNy-A|v3q?DF7@QVH`&)E&r5V+B^FtHj^HX1iGmj5mg)*RLKK4Yt>zG@(8lIZB(?{G$ z51Nf}?+dYop4>Tbee$^BL+CLWW%7kd<%g)zMtW$9Y1@)R+q4z>be0s@H_i`OyZbne zy*zV`Ikn}L`@UOPGloIun!@5uko?!D-?#lxZG4Mn2dgerO3}J=hhJj4-49D&^uscr z2G$f}jV^d$H}exJHs`LFn)+(9AhYj0 z|9^mPJC$uSX0Uh%i^t~tG81i%8z~J|fvg6QxX==0t|2xw(5MVM#``o1*EIac;_^YnI z3I*Z7g-D5F(fW+Z#+@27znZY4_RvCaXYQG88J3lktvnLLLhLFTAfe- z2F^;>f2K{8#`*_5EsOcUl#T6m)6M z1=bO^U4+Zw>h%xZrk zlLNUjRhEy;8@A_4nqr5`pGT`mB2~0tuSvg7;-yY@7)m1HmW4$_Iw1>V3kz;r0dgY;dS>>wfnW^UWaTg>fjzClN zA?WqHb=~^)X?7Ek?z$P;6Hv=Z4Ch79wp|&18up?JXocBZYT*p6_pT7Hl9F`jFepU} zh5F1@l0QYhL?mjTR}@h#!yl#RVP9uIRJ4bm#6%QBfyu?zMkR9lwGhHU1> zTKtNxJ3byM=W3mV!&!Khg&spLIGN{-LUiWrA&;@*8ioZklg_ILB2BtXG4{H%|Ba!dcTrTHU|e za-iXK)u;Ia?vRaOD{q2kpPBTzVqVvvkMJ;dd?2X|jYXmuENIc2sw$3G5<8A)!MTv1 z;^`~~w1{Z}k7HIVkO-5p?+6j;2#M^0eebg@QzJf%Ld3%Ori!fNTG*M%(hbBHJ&0uxfGW{vKJ2c0#m%}XS6A?%j}eNfpTKzMZUDwrtd+*jVi(TE8&S$5N$ z_Sd9gU|?wR7SCjZ?wQ7xClAc0dqYFrVrh}4>0;{~!<6}JQsKZt$7lN(G=@E!Xg5m? zp%4~SDuL{&&n=IP(>WeKeAsTa^->gNeNY;gRYN`WYm5E?aQd! zQ2FA%Wo@l(?>@rE`(;4T@Y{~!8<}IQsQbWPZ3&l}*Nei*iQkBRf;tA^ykiu+oA3j~ z2E=Rx!0#xPI7yt5BNP^KR>a2=F`bY4?h|3n(SEd{Cx}p|W`rk-)QQrhCtyNy zY@qI~UQd+RRGtiASr@0q9HHq=N*=A0CCns`v?Ep>>DWtLkFN&m3U|f}?v=wWuLp06 z;$`SJ4p;|85mzJtM#1#c3xa{>f+~T{NHVYxW`Kt4TJXg6Qs4vPH^?6VPcmWre+5tc z_va77#Em}~NPB~o`d)%Xa|Pv-7JUQQy}+g8u@cN3`LX^YaXC0$z(6n!9dW}b+#z_9 z6Crz=_zm($j@Sfgy_i;5uV}g0 zC{tMP7u6KYeOlNLR*k8|<$MQK%_Gy8PMY`^Ep>00@F*1-@TsV8fa6s_-6=>~^bXkY zVVvzAf0fG}~x^j)ez;kqZ+Ec+>RD-&pD5b8G8Y9kH)9At~mx#iy~Z+XxX^!pT_I43r;>dQ8)rvKK86F3V1*e`Y5 zf25^SQ74^)HA!ZCP#`?SjDUsneegsXA6P1O?lC?DRO0jjP|Cdz?C_L=nx*QX?cTGs z>YDkBGRxW&foL0c0Id2w!smDhZ$NnGVeM%W41>T=8@mQ`4(9D6;pvbp>Po)b^1r{+ zU*X63l_%YFP<)`pwvB=^3qYK}Kq64sblM|^;{$+Mwiq(xIv3ta$AUg}TUPI!p%r2Vn6_fA@;$53+*9aEf| zttGHgq+&|>1p>>JQo;QDEPk>q(a}+=%Nt`htdUuFYGW_I1xyyI85M!-T5|TSK4Dmd zp(*Y z#YYA#Nxcp9=NW$OVB4{2SO_833=k4yMz^U5U*lKXg{)m>e+rorI00gJ{pnNuAl`KD zkyw6Z?9FrUc9{|vP$@{j0F+&&Msrx&ZDk zfF#DYJIth?08*(rL4~g)VfX(|g;#9tIu^!-Q;f-seCr5H?C<@=M3S!{bA(_szq(|J zoK$)e9xxxvXx>ZP%ec{q$QhVQSap&U?SFWl51@_SuEw-3V58giO4Y8dzx1ZhCd_?@ zWh=d9+Q3N?RW7Jepy!uOI($TqjPM4ySlaz8zW^_4ReOnQEFvoVN7lW?{OXzbVNaFI z$yY6xDNB?(O!SPdwgZ124n1eGg+o3QMwphArywv5!Xs*2Ah2FuR^Lx}h*}F$kGJ%` z+cS7Ev`(F!_~JKJ$Bg(?mF8rx|2xGKOAtj3QtF))3+2kC6wC12i9rnjtUmd%_!8I; zPb?iEb`pe1LXr{j7=tX;b+=aP8Fa=35;TOnw;{K}<)&(RGEd;|a@i(U=}@P-g}upBQ?5%6NFjJ#RLNHQsPU>7XHa@}}9 zH!sp!>@4+2G2YR{c=nu|c*r#nf^sl{lcb{HiV6Od`#3j%+0i>{MHiSWBUN*sE&&&i zA#V>8)TWeDB8b6#K_ontg<85ae+FU&Ju-+~OOjCVK`K0~=zsK`h{l(Qs!I>PY@aJqp4f2i85dL{qOL3C-mO668$cs(1N#@^UW6_J z)U@bgn$6%vHRE2=3pIH0vp*Rx+ao@qHg@ItVD{d_me5`keC~JOLO=7)Zw8Kd7ce6F zmyL+!Rl6cy%vnP+6@KDM{ysu9nkX<)*HhuQY)ik5;`Z%eWa9*1Sndqf4A^>Qa@Tao zC5%h95N0|@Y;$ZT7pPEEH6j?OsM`@UnCtDU4#XA`BterA!F_Z}4gN{C1En;I+o_31 zY=VY@auk+Wy_0MV!2e(NaPti|d)y%4*^&DgNl0My;K{KiLwCPQHV>WwQ4Ij>o=U-S zyoa_-fXjiO;9o?$S)aT42DUai+G;J>7qK@tkaWZ+;F+SG5e^7?Bi^{UpatsQ-lp_N zq6?>YGLpj=+&_?@p(U3|(FHo7T>Y_E1h-;xvCe;9gj$0$h@`+wt(p~}|Yw|Lp8)ypN1NO;kKTO=nECgcP9RjaY` zA}t3ID|@DxW%;7ZUf)vZU5332YA+(Uf3CwDI;c~yPL^Ni3pwPqV4oX=&*>5O34h!M zOBh`yrY}i+sEowM&8%Nu3XDe|nYqnGd@&BjXrx@665eF1U;?d}2)6k1?Yn4)%@L)7 z;)sE<)C4D2&@Sp0%=*KEohW)ae1W^TkG@#108kF2G8ZQ-`#%gj?9h&COayt7bQxFY zf~NBP9Q5+dvE=jiLnIvHLxQVp~#NCVC|%1zH<}R0v49nulDvj zvX=cPfIs%)0-yYc5ru8t*+^O-@)i+2hx8eUB8Q$DIO{|fd^sa<2hR#i#u1Q&Wgj*E zw}(?R=_}ze*c2neGBED} zY~P;G+rbWgRLc!)KWRr--Fs7DTL^b?aOqAg(h?pBUW?Sfg6}bN7 zfE9t*Q{?y%Nm>!Gu@#`@!{C3tM11T=%gax%ZfAAk0Ns;avEg|d;DD!h!3%R z-$Gx?liv^xvJ~teKcCP`C!mn5-jU@f4K&I==!*@ekm)0;DlaRc$=n;@*8?lsmx zR=)ov3KdR$(;RRb22g!m+d<9;s6wC=wM+_rl06|0gP#1PF8U%K%~xmilQ`&Jpqr-; zjPY(%S0E~d6Wqo?v7-;YH@df%IP!hZhA;ot#t7i70dNhXpS%briwlY!p4jeZ(c#2= z(%3&WB%tEvo@ud{=oA6SXCSUswqEHJq8?B@lnv`Fv`qz{gF$^E@Ztp*ln)FbNolvj zIi*SkM(l3JNClvFH;Vf8UUjh;Zd$Y4>3 zZf|Iecu2qk&X&@BzQRLmFmy8TYV`;EVCv1Wjiu?XYL^gw{pM#N={)8QJP85 zw~tbfQp{8u{WDTp$6wmzZ$XVnza+|0SIZBvHLR`fB&`33nWMVpW;vH3E~U;j4_uay zUXoTzH~`{IXuO43%a|`~pSYmYk1W_G$e?ruIh(jqIB8gdY|qCxf;nRTcf3gFDJ=Z(qPatj1t6in$M?Qjz~&gn(#T6qPl1?*VeBt~fsYB0!8-*^ z)w5mmxmwPaInm?x?t_&^G_*t!(G`jhYBuV=;=FC$4_LHR97$CqBjywJj~S83Yvp<&eK$TkDzHwgQmHP$9b zPYm-0h@(&!rkF5XK_=e7e4e4yDTb2p?FjXrMau+F^%Ex^i3^w#ASmFIZi1UF`AyZ z=x|wH;|ljK%~1Tv&hAT;kiZFYz}F8TkM?z2X_5_4b%5gS_TlQZ~?sYO5XVa>a^ifGWUs0iOJ=S{C`vi7- zwJAYu_v}728kU_}9#gzPD`7D7bV2TY_QqNE;lk<8x=?g7f3oC0AT<;h)e}XFRLK+E z2WojB0+j)VnLIcTXo_!dNqmD~she^X8|zn}+#k%nfphd^^~vY5L?t2r-bLP_fbRC^ zF>`Dz!!x#~-?RBTDV21VgmqjTBeDnsh zc5P79Qa}lH86!l270C}6mZcO^O^`h2f_jIjjs_<$B3mG$T9lF&8>@&uLuQ1QUd5!T z%sZNI)4l0W|Gd`%dx7!ol5GNur=X-`GI@FP)J_9cp!cp&)YeiDMT8<4EM}_dPcd+m zZU6~*IjLqhQM)4&TNZP#SW+l&GX!mjY%)!o$H3PXDONXNE3x?b(fR9c*`pc@9VXvu zj5~<%fF*`NS-#F!>Wv|@Gy#qW>V}X3cBd z1>+gy>S{nXm~AhjC(cbpo#%_hNcO9tZinAC9VMWeI_V6!uL9T)h$%C6!1oxALD#|) zJveX{q3est>A_ucu|vhnO@B{(cxz+iRRcy)4VbgEdlnbNe?prFkn4Q(awQ(z-Fb!5 zVvh{XW0;vwiO;f|UZ}P;P-*{$T9NY$`Js9M-3G(2fcH_*0}c3)W^w85f^Bd_lp^zx^I_@+m_!VOmA<47 zbL>pKp^6+WXhxTZ<$ou+6n_j%eS_&dgh)ogqM4OcxNK5_I4A3Y`dH}+L4N{VkcD%+ z%huGG8IJZ@$K8A5?I;HsC6bW^vcq3^mC0GaBZe-Gv`}XP-thu6TCMz!3`vX=784PG zn%i4wpt)EX>vh5DQ!BPKb)PxczXv}?Ic$AySm%1d_0Cc4XD-FsCo>^nfLSf`Z3(TI z(*6Q5L0qEI4FR%qrj<*E0oKv%d4-(%D6X~GjZw5OZ7Lyhoi_T0A3xZO`jMyrYksT_#_g1EN}oYhfjPwmTOs4F*!Ewk(}+fuJrXOw{VdkCu~M*uE_l2UBG{OB4D z??^<;(y)K;8}pr(x=ShZ%m!r3{lFOdO($trfwiv>s5}aNb#9;luq^$2fK*FX6_%mfpFbr$APiUC}2zWwPgPEYF z9!LtWx=cKtB!3IA?AgtMQ+$Sw^n)MwBZstdaWPN-9tagnc~|_C84J->BdvVt17yGks~K^`a7@em;OyTan;Gf&}hASQVwO?BAk!=^K`Ph?Vp# z3!;SE*>(&#Sm7tcya%v#Nw{MW3$sGaKcZe4AoN5SKg=y3d`0oA4;M{Ud@OY)|If+5fsu8*y)2n=lpuirej&Kli zodS%yx|Nv=OYp}itbI>qt^FAeyY7m=u_|s~eKZsY7CAa7GjIJ(g|)C6KokSU4)kvM zJI$!S?FEj~+n6b@Pg8>NHx5-C?AA8n*62N3d8{_S^p6$F|a?r1u_bfQ*{ zoQqWUD@y!<7Gz%&)OpS5ba2M|?0%x_yI+2P^e>OE>T(7C63`U0)&v0FCaNR;qb&sRb1MF~_EW_qsp{IJ$I%J z!g3En=6@yp>Q{!S84(L)e^_urfURKX)xF`}{1aD3Mu(?vsW@HEd>$}}_-?2`5FEAj zjt1mzDgpT%H)sY^RtMD?TpSU5@zv%OT>$qDI}q(u!9( zl3ktx69jB|Ziy|k_CG2Ux8x4rw|&K>8RBB-ww_0qc*uN5Xk4%KRy-pzcm$HT7gPJ@ z%4($Bv!}TnGGJuziu_LjLcd>=qqkc$VPyLc595t{af(j6l$X9)nX*Zo9}YM;e2;!) zkTLH;Q_vk=OWZm69~ZWE{T>Ty*ZoEbxA#ht@ISu3O0pe+!}n6(pc~B7{D^+&W7)Xl z=Rr-G+d(|5F)T-ED2g4V=+?Q1AM?-JEJfzWARjc`=cC)-&JDs@!j!7_#7*yn4_I*Z zv9AUn>Zg3oV?k!~MR;*RXQFT6ACu{r)Ei+(vd=GE3!m+@okHY zHTUd&wscaAk^<2)6KQZ~)j`|&eUGNMm1Bc;YLOp&5Lc{r5cWN*}&LUC%eFK9{ocVjvz!N!;_9ZUyAJi+pR)4mU-JZmPXT_;?@rXOP z%s4Ei;8O+wDUiE#qX^FeYvppZ^{*sQa(t-mK`7KCD7EbMBkjo55P6ZIYkVxoV>p5S zo@kEfu_bKYMlR;1%^Hud_KT!t%nQ_T1gsWd`a)?N6wDP=)=GC{DJ*||6JnyRjJpxQ zF2I1m@*=uHujW?5)m0Ej$7o`QUK6MhG?^i5PJ_%Jhb zmPqV8a;|!|Fk=Nki8AsK4@bJr5fb;4U?3lc+WHM4bwv^GN|rTZjq(xSVqYwsLM17J zIKn!ntVjhV4${tBS}1&c*WKLhcN#GDY~oG3^I&A4y=Bt%?HGJK2r=iN)7?FZ26 zR~;t1MoL-?3O7Sp5GfAPO~tI-n^zmkHF5UO3VSBiU02!G2>my%Aip_>pCwGAT@RMi zaxl?AV5+|*{t+R#M!Tm43sJf>r&~Rbq-}H?zSUkVv0s_tzXW+q56wlo&b*!{ivu5i z?eT3J3o`mbu+QN;=#X5RftRR7N zBR0h)!TRUVSoW97*l;hHIcYmYhqkt3*BNb+`CFz&B!dgIhubJzQk46^RT1zWRF=Gl z+eosAfx2zi3>NZ|%Mo8rSs?&`_^odUioGSs#_U5#)9)$n5nmH9{O^ zKw=OpUYDpF{dZgeXHQ`qT$bT4P7o`Qr0ktR+{4O09&J6JBVOo!`Ief>)<#v~_s7ac z+5g59hrBLEB1hD8ez^0_Fr684e2F0Zf4L&?mYIfnFe#!Y;O{VruNoo|n40GHO`E?w z@dV{>cAKVgj#op`*`e*MULV);x z#n%8nxHwAS)awP!Hy&{J--l=b_FStMbGivMr^O@qKaO$(|qB(0Q4kxo3mRXcxBcUE5_Ec$;L?vR6Hf|1bJzQyBh z=Aqqgwge&~C-&E1T6W}_1_kLz5w_$B2^>6?b$3>o9~M%29kVB$;n_!UXgmBj46b%Q zgR7)e%oQyNN#x%z+92vQ(Q!zX3OTF7&%5gBAHntk(pb9|2_QOTJsU!gw0epZgWk;h z8+RDPcw_~^Zx`IAG%<-#s{)IK)}LSWSk7Np(u?DxHIjU;+&Xt)SM5K1+32+d}V zSteZ~aDde=N*?8o&3YC>_odU-y!NA4^ztRJRkNrFvqPRi7|P+&*B{&TZ>V;aZ`0i~ z`1a*Z))LZtg5+-J@|~*>emOJpm?b%CVaCyJi-wtjx?om`5} zmlcH9lk#`RkeHeChqHv~$S{XjE&fdBHJ)yzy$Yt8IaX1wX-jj#kFFDcOo5Z5v<-gA zAj4SPtr)tIaDw!IAPf|T7fPcAiY)yn7qH3ZPv^W#nS-Vyuf#U!C0yI-uxQZ;Y_(mU z6(s}0bVaW2A7tl61J^AoKE0{hmXaszh7(ey-+B`jhzL z*PDw!axT>G)<73Y^K~u-MG$rJ=iSwnz{T-lB;=Yrp!iSll&I~ciz==kiPbiiXS zX?zCXed}gW1`9V_szH!c!NwY+x0eYGtZxO3Adg7^2c!ixZ$?z%v&8A4~6Zt zoFrnc9gGgY@V5yoMk3ob2GIuRwBX$N!NJ(>@vY5dN{YaUsgwmJn~>lxV@+?#)+fS? zCi-w78B1_4P&>hNt}VTZBqoq#D;ag}rETBQEZs;ae1K=6hDKtqaMjo!K$D^P9JF-g zyY#aacM?_^6g5xP>Sg|9TUWa&rO$tI^Aj#Boh)1px#1$+5dSW6RRIPp@r0y!;q;JkG3H_g!4>rT}bd>j#q%DRgPphk{R@0}-$h;Wfa z0(g0yS}VRL<2hUu&>|Ew5g-0?X4s5UPFXy5r4Z`6j5-vq&O;WF{013genmx!#5w-> z162f8+~X(PxPDtw`q0Z29mALrVTT7OroAa=*qnWl-FuPQ`x%3|+Q%3C#B0>-41(aTOtk>ROA6{)xII zd7LIEJ4G0*u7aQt4J0I6$cOA15gw z>3a~oF{tq7DP}mmBXhK$*~BvNh>ayE=U z7<2z0L`Q%70V?aH1f>KWguKYkQBiu7XMzK`o+_#Zk}eja{=RAdCoy+rp|U6?63jS* z7bZ(K5|;rPnliX=^5~Jl_jWYPUZ|BTrewf46fQkloKHLaq^BmWuh){va*c_X4hoA~DiML))m9dJ^*@-Kj_PgZ zl>-6d@i*cRat4chD6`Rz=X^?HE!;Uwr11v&Mb^bfTjHEDR8g$z`@{3!+L9&cW6pp8 zVcu)DhHPVGq=uIj!jwnk+Pd?-dbQb(oRTQ0N{##}N%qZCE_vuCq`2tF@l+8VS40*+ zK*2>AtCjap2m%<7N|ff>w*a8i^J0&<;r~pUzT4uanhL6BQc?HFGCWJS(0@zVLWX>f zAsO(A#WDY4MDXT?Fz?ZATUvQyeiJQi`r9t3ORs_=osD;~?FfE4Iyt@W&TbhJA(Wbc z1aT9tUQywNTwiAet#cadm<7C<~!I5O0j!4kj?q0-1?@Dizu^sk{X_gGVQ5)64#k= zyp5<#lT>hBhtN-%zv&G+C7O{Kfhra5)^ryaAh|~WS~(r#^Bt`oGJRFz59NLb?&y$S zfDS#Fz#qgYw&aGVTfTaLeL7Yx;zL9Y*k}3x`^za!tz_K&=Oi5r&so|%Cc%K>-}8;3 z)AQiNpzXT?`DLHyA14H&gj8_RhtRuuK`&(7BZeEwL2$;4|1rEi_#!y79$LP_ zO`{_I;co+_YnFD3YFit%^{YpzApHUA#wh_^3>g5Fkjqt+vhx23E8&?l5oC-l*pM2E znh`Z|OmdF8V>i5~j%o!vc|fIpJmPMrp_Fq)5)LkGs-9@jnI1O8aXEy3CT~P;Sp0{9yD@Bm|ARGMP+y=##wg>Kc8`st z*dIs6?U~vXHZ4;a4lZN`1y4z=%$6_*B~i^xw(h@H3AabmpIdYkNStxC-OBqan8lFg z4L;0_VTNiIt*1R9T+H_P^}@o_xeT6SQNiQKc6fU)N{-tL82P;+qZAZwwOi7EPAXwoj$p`To zU#w-`(2{q;YeQ?^r~9UdQ-0#JWeDtlI~wG@j=F)?|Jvs*+PrPC0gk*SKcep2^(j#n zF^Y6N*U33r&}jqi<3E2e$#Fzp!+b#5=;y#|%vTI$Ug5A`8Mxp2j6w_T-&s$YX@dY2~8EVk@g zIPno0e}@Y|^GmiCxy3POcXgvrZw3krlNVf1irl0!xwXK?H9a_G{V38oZpixK)fCKU zvbAxPvVbtCOsYNkKe!d{4<1s%aeK3#l2o=$=r5P|LCGC=6uw}-c9U2a4qti@DxLDP zIt-cTQmkjC51#MSb%%nYS@BTh&aJ_^NwV)loe{RV=s;TE)`i}z1wM(9Z(J>{UW>*b zX9v=BKl5Kp723EtuAk~sN7@JDCU$R$k+m(*G(!JCp9G(m_vYhMsA}6zEcf74ft=H? zVUr3OjtS_6Fi5kal-;cv60$Qb>)U1}}ms&je4h-Hm*QVEfC0}%MuaA$9dl_2& zcyB=3Kx!hx#EU^s3are5PkXL9&vWzbR=Bg{=RTIA^Z)^S>8TXw*%WOrL3KGwU99D0 zsD&37Z2s_L$uoY@r10T)b3>l5l(ityQ$dC?hsVEZW!BxFS1MOoH7b%)dDHmm=btn- z2DykPhr@xv$VN)RAoIofNlWzi?(teE`)|`Yy6!DqqTP7U&`ed9QSmG1iNp|Bd|lGX zjAw3BogXyT?{YAKYG)z30*m$LnoN~(noXAj)MCYhcaOh0NLQjd3oV}50}eQ92~<_c z{+x#kTP+r46r_L*0zp&-*icc8oa00-w;FTMN3nOksCnJ)`c+YGz+>{FBgOkN`#XJy zsfxu1kG5PJv{N~tC^b>7(=yejDP$N8WOA@CV^SmWYZlb#%MF<;2yqLCme9{W^-6pb z%x7glD(@x+m=ue~ZQOb6PQ>UhHPu-*>pRrz*=?GmlQk`V*5y1BZmhoI$FIf48T8d? zJ?%<4Eeec$hn7qOlQYSZP-5YdPmwQCvY`n1dMEe7!FH&rNf#R0YZogR{LsAP#X&kK zr5DWF3kCc(YEI?ICVq8-TBo_5OyQFfx{B*W`jg6XxTS38qkpH*{pQOBwrF^I^TRDR z`~GAV;NeMED6PlYmp)p|8608$+kbGg=6sLJPQyb-qq**vbzN3pnOXLC6|PkTWe=Zx zr@Y+j<5gavoio7<4n1jdv&z#@NIZ8qKg*%T+nq3gwboEVg`8b>q8OrXwA2Qd@-GW)q zPTgD8#D07Tv-y5_upO^YW6LK;s=mAu`N&sRY+oPXfh{rc3@{^++wHdy{x{Qy6%s5 z3Lg;b7S0?Vr_aaQ%{Q-?qUH{^`SrSFV%81%uDE6`^lrYtX0wFJVPD338$zb?xO37P z$DhpyJJn7-)0k--I_ErgF0tckVn5;zrHs?}jbEWDe3n zDbCYPz4g$WE4Qv`;e*NCaK8J!O=A1+)KA5=UOU3Zx~*v&h_wc&)L~H zL zQ39gzOZWqI?s}}BeQQIoqI6rb*4sL0Ty&%FVs&Q^!k#oEN$a#p$OU zK(x^s+ZL)b$8fIOrTu)5_xR2%Q>6#)^o9+oN9UT?`?$#l4s32(n80d5qXN!Y+gH7` znjd+BAh73EEIjU36e#Qtt);^3ZuJ!Xgo^AXy$;=@Az9!&TSF<xdxiEQrk9e{bKU#oyK?+`h3q~vh|>;X36AYc1UsP z%TpgcUMYx)o|gVz@{9lVm6-A%w~dKM(sSa$bxBo&!m(F%>v+2)Eq9e%B1 z_m7um+h;sS(lU;#t5kK)IK;-5D2Kdr>`cuvN-_E{**R{P*%&uFTj4BeR8p?R)) zAJ)2E-0F@+m&uiyHbI@QH`Ye3qhn#5pA88Sc6oAAydiDZu1H#?%!J5+^UAv-_LV%J zfQCetVVhJU-=R4}9fV_`DF3-<2mBW$dBTGKJn+gE167*xzbsp@@fL4kzNb!xRn5q4 zo>Dr?H#KUUX<_?8syl4gO|g$ktcG>$TcNo1yAjtWI|FIes+YXn&jNk#9_Bfe_^R#m z`dtrmHyLPtkXY-bkiGv=)BFIYt2&|0P9{z@ai3apt#QURZRjKMnC5UroMzVdDpsk5 zpC7wkduc;!ciT>M$Z>(PmT)ny44quJM^TAkcLu!02ev~ykRKa9fsWum`zZCu5pGL+ zq_?3qv{U+_F11!{qA^XYBgMhG{e^C@htqs<)K~-a0sbNj8Z~i+^+8K+S|(8cPOGd{Z%xI5A(NUzn4fi z*eGyw|Ct{nvh1UDSHW_`TX!(6XoSC-kGCQ+=tLc>VKSwN;&AVP%|JGVe}`QwXK6 zt;iVp&esM~uta<^$h_FQ@IYF$+KDje@4;_3Ie<+mD{TGZrClhH!rzrqiN3(5ryv|c zw*wkLdV{{;Ht2>r-#Y}CQ4DZ=`pY7f!(7; z1|ts@!_gif07HFMA}xxcD^IJtRA2I?qx@viP`sL0fU}^seg9ia-~y-_w+^YwTvES# z+~0BLDF?d&gXs*X@4` zRbx-EI+z4TTTc~RXEG?hO;;MS-@9ntbV`n48KQ>|VH%{r>u*`xzOp>gN!&<%P_(_RnsAgGm- z7i1k#a3tXA)6*rv(`g&tWNjYpDDr})RQZ7SQ0K+bP-(w8=WxaKG3C;Qf5r%oLG@SN zHag`$pRcr>P%-oo+nXc#OUvR-6Zw5=+2?q^{*K@+HJ`jzWjuBa>g(1)cY?l6Jbag< zA5_L{j(Tqe0$B$d-6|K$FerovFrJ(S@!JO0+XE1+QpXpj+U{0f4pM^p!~ha>Wm~x_ zcSRhKN#$r<<_s@G>-2Pqdmd{6(zzYCsUxLg(7liA*%(!^eMEoGq20$LcI z^-dQseo-!|3*AZuO$%kHTc=NE_m>Hdxig{F>oYiG9eZ z&C4^be5)7;mo4;MlmC&d)*Xd%->PKy%{$ebHXLqp>OX(&fl+Gr*XUaFjvpS-we7bB z=Y=Ng@(A&^99r`dDIjtt*-){9x-eGjx676JF`8?S-z?NSa6+uy%~Y~Ynx-;=7quNn>^2- z^r#$Dh;KmzDfEZsN6IW$o945bzkN&pYhiYm{Dg#NJeTVOZ?T?PQBFVY9dE=U4P<-z z6uwRMC6?^A)ZZa#cX3ZjvQ8=PgzcKc-46b)F$&l98`D3D)?MD%TW?k<@M1vd_qkee z%MKnXh3wwhropYywe^t$g+CObf0@qJJ^#yI$)O}tQPUfG3f;)Cm&Fc4No`N)+98xL zVp%3#-0h^y$|h#*;jPx#A%4Vq9 zrWxbx#<1_A&0)17@`})x|tE47Hugo!V!e(K*^Yx|Bv%QX4H(pSx zK-;C_ABEd0_39>kk;?s-5!gakEHCJ$Akvv3HPw7iWUBd?YKcM5L+EYrND(c%*C38D zRkYb<4VUZhkw?RA`R4+Ut?!&SjLVpa9SMF;GTWC7d?l#utC;+F2N^V1ljHJ*R2#aR z6~6^JG+=lYm=(f#7qstNb`)b7plNPVtv*0Q@OpN3)CV8v)WV}-Z+N*lJ@%TKcC92ipWUuC&#)QGwdI-+Nu}ZA5?w62Rb~Y;o-}(r!q|1FFFHa9~rlsj?b#c zV8N}@&4<2g8j+SgSCg)PzWXxWTc13J2PaKxNI`zS& zte^RX+nlzGn%#h4#?W2;toaFb(NVPIcoIS#v}#$Lej{riey@`L?0d|x_A4}dxiT?T zqsyHSh130#uYHLJ^Naf{op8PH=mj=$c6MOQqS>grJBhMS0~SO(Q_AF|xS$(okgd~T z#^h<}>zLR94Vfjt1#*7oM<-a2@>aKm{gQ%$g5IZc&d%&L>Ye=&iB>)Z+b0TeDj zf*ua1bKU%{^FFnC*R_3Y^Da~yh5GS%P}p8`A{rXD2||;HarUE{I6Y{!o=|aF{bo#I z71P_d1Ltc+Ecq^6^Wyb}7RX;{xaY3>1qo_~M_3F7f@#z}`*hcu^_w12tZ`xqH2k3Y zrty^9oCLIg8PL8aY5AvWgZ#e8<@Q6@Q4etW0wo;|w8{kVmx+Lh-%QKL* zm>HDlG-!YBjGmre$21hXXUbymo@mi zM=j6&+UNg50@~u4R;o(X?9pXCs;>n7mBdPt1K+xowHK6}-OvJ^xOhR3M?|Qnp#7_Q zH#<+A3UYQ@xj$@>Lx8B9Fy4?R#(tsBl4>p%UH(=i$~bKO@T?i)J0XdW8k7M$5|4dz+0I z5EHQ`5rnFqAJFZW3Y9oo+n9!r+EefLmDqr|&zEzy)ADbME2RND(C5Il_iJvt#vUb> zDPIl;UV&4WwqySU=(23Zs2zy^!mNHn%;e68np>({CGEx@KwmWGFoKhi*b#fMUP1on z_(e6%|Mm(5iVR1J%=RUk^Gbl6H?7v{PYf=3F34p!)}=bZZ+TC;vj&7%V#k~7pq+FM zH`rB1tNKkFLerHJ3%BmlmiVBIMu>7;^u5^+!-V89sa$=0#CU3GXDdX6>#jVxP!(4a zr4rXT4C0YFX>zCWkK&+|RvT!J*xK$V$8!XFlRNHXeOhwR(smlmg6|ig!>z>3M89J9 zg8$mDBbNQrY+4Gs&yz#<4W6$BgLT2Q`%Ujj4TH%Hhy}F9rkyo@as;Yzb89%yCkHhv zoY*B)5(Z6i4sDW%A9p*DLTn4nU`PB)Vze3*;HLg_XNnIX<%=f@ay#pL`@nOp*+$4j9ju$848_?vHeLX}en6q$s_=G$4M9?JA=&t6m+z-*u~1}{No0HD#Kp`qJm5OpLk zdY&v4QbVZ=x#=T@o@ZMX_0x28I9j;u6R^I=?{h)p&lSf1823NYuW!@A!(LwoBQI(q zkKf#c=!Pt?UrgGXJlbFrZl0xuf2p?a5S4Vo7q`u?Tvkh4yW6W>QIvrWH+MIE1kgA6 zVsr1$v6sMXs*J%~R!hEBimpQ>z9Sp}UEP$Uyc;$3`bNS-cU4VQmTpy|0l+W^O;f9T zdaP27Sgn|mbr^8ZBV9mwOEw+?fT)ey*?L5>SdG4hk4h~Oq-cle4SQxV#J4mczp!$4 zGZW4=i8{Dcn>UQRN1I9ksV(`|Vd~Bq*J|-@Sk$S!JmxlD@tCmZZLa%Bo!-3jnJbgM zksYm5WFT*LJxfWixLyt*ZN4&*W@(fs_!riIA+4V!^|2TEZ@7EG!alQRSqmEm@YMQM zO+@sL&B~*eHM&z^;YyH$gba<*&;tRCsQ^`v?Ytqd5PVHBz3VYx9t9y3Wza*e6#2b- zPDVtGv*63*>uY(T``qad#otz;FBJiRmRmqCHJP3*hKBP*w2y%$z6%IX9fcin*7ptFuy-FnsBl6b$`pGSLD1|okx89Z|b9;8YMrDMM=dD z=K64P@t)FDSN-LD)L5JJWik_okEPt*X@XKaSum7iX8Q5TwnEKU`R%~|V!4pvtDPxI zBUM#1fpHEWtgR`J4pTe`YrJd7?#BT)@c%uz%<)Dl1wtr7Jg|;^&3ps#aFVUe)Ujo zf_9LQqyoyclz!sIdmZzez|zTNRb^Fepc`-ZneN-XFX6d1GfnOpts=DTTy4BcZ)>rB z961w~g^~soCEWU@ooUH<+ZRWt3NpvrHWFg$dxX|caXmZ<6T}c4YajVy5$w#`&!Ctc zPo8&RS3XD5Sm(@+nQ$#5G8cYD<@crD+Jg~pjL7t!WafU#s&rBK0A(F0l{Vq2wLrxV z1VEXqb&eUM31X}iiguJV3VLYkhUe!7%{>4cM`}dP??zETV+s=6JRK+_EY;5kzZxzT*fo4^zm_oM+=g7OVb5H zbwyu-lNiYG_Ky7hZlfe#Y>@v(!nOUq>k~V)Xnig<#{^9B^=LdJA63N5J&972!zJ?; zg|lCpp--ZA;*Z|kWa?TQGfi(kVBiraWzW3hMsJa)zZj26omH6~CD;K#={+Ip=WhQi zBuFV`kjlddg&H9u;LKKSJm>(&8?yC3TfT>;bD7;je~fU_F>dhR0>G&ZLi-GwMXID2 zrv+kXy6Yx}OZZssn#~+5fk)0KzAOQ=j?d!Cq)q%CX-3hsQ!2Qq8Bnez<5+m(Vix!q z6kgtw2rFkI)Ekj26Hl^YHP%)A?%-k9chV?0F#0OMvQq47p4ym}$7PLyD$tVxlBxmz z>A{=^B`s|M@~1`utB@HvMvi&WHn@UkLsDIX_+=n}8x?LMI^LtX0qH$4+}}*pGmQk= zk)r>G$w_ip_4VSZamJ3|?b3nym_b{*#yYnI3P9>4rD|o9?ZF~B3>L{LI7ByH!C`MA zMIKDFPz*yfJ#9eI0D0IA0;~*lMWky_l#;bk(ZksySavSGEnakY#C6@xgaQ<29TA4J z+X{3INs59YmS8pQ~*QQOI4qyfPD+ETo?AG4&^{8I2WGYe=bLN>G z#5e-HFnfJQj1~Kbjm=>_W|kvPFJ%AuqAvO>*TwRzs#TfNo1ZA2nE+UyAbf}`8$Tt& zxmUfbY4s@r=%UHvrs2t>W|d)S5{OJuVV1!{6-w%oo15U3h`#sLZp4L4S(dpzTCfG>LrV_aOKMXc#!{!+m+G&U#98G3|pCDe}&F6->gK{N>nW$DJz*|`SDq* h%&$b%|4mfQVDq+S?Q4FknZyNOn#Xk2)732f{tXhG&KLjy literal 0 HcmV?d00001 From d4130b7f039e828f7454ba7088c6af19ab6ff628 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 21 Oct 2024 15:05:33 +0100 Subject: [PATCH 050/485] Reworked some of the statuses --- .../database/prisma/schema.prisma | 13 +- .../run-engine/src/engine/index.test.ts | 4 +- .../run-engine/src/engine/index.ts | 128 +++++++++--------- .../run-engine/src/engine/messages.ts | 4 +- 4 files changed, 81 insertions(+), 68 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 5f71b7da6f..4f5a799b49 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1904,12 +1904,21 @@ model TaskRunExecutionSnapshot { } enum TaskRunExecutionStatus { + /// Run has been created RUN_CREATED + /// Run is in the RunQueue QUEUED - DEQUEUED_FOR_EXECUTION + /// Run has been pulled from the queue, but isn't executing yet + PENDING_EXECUTING + /// Run is executing on a worker EXECUTING + /// Run is executing on a worker but is waiting for waitpoints to complete EXECUTING_WITH_WAITPOINTS - QUEUED_WITH_WAITPOINTS + /// Run is not executing and is waiting for waitpoints to complete + BLOCKED_BY_WAITPOINTS + /// Run has been scheduled for cancellation + PENDING_CANCEL + /// Run is finished (success of failure) FINISHED } diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 3605d3b558..247f31684c 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -130,7 +130,7 @@ describe("RunEngine", () => { expect(dequeued.payload.run.id).toBe(run.id); expect(dequeued.payload.run.attemptNumber).toBe(1); - expect(dequeued.payload.execution.status).toBe("DEQUEUED_FOR_EXECUTION"); + expect(dequeued.payload.execution.status).toBe("PENDING_EXECUTING"); const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment @@ -251,7 +251,7 @@ describe("RunEngine", () => { }, }); assertNonNullable(parentSnapshot); - expect(parentSnapshot.executionStatus).toBe("QUEUED_WITH_WAITPOINTS"); + expect(parentSnapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); //check the waitpoint blocking the parent run const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 3113c2722b..b0924c82a0 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -582,56 +582,53 @@ export class RunEngine { //todo figure out if it's a continuation or a new run const isNewRun = true; - if (isNewRun) { - const newSnapshot = await this.#createExecutionSnapshot(prisma, { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: { + id: runId, + status: snapshot.runStatus, + }, + snapshot: { + executionStatus: "PENDING_EXECUTING", + description: "Run was dequeued for execution", + }, + checkpointId: snapshot.checkpointId ?? undefined, + }); + + return { + action: "SCHEDULE_RUN", + payload: { + version: "1", + execution: { + id: newSnapshot.id, + status: "PENDING_EXECUTING", + }, + image: result.deployment?.imageReference ?? undefined, + checkpoint: newSnapshot.checkpoint ?? undefined, + backgroundWorker: { + id: result.worker.id, + version: result.worker.version, + }, run: { - id: runId, - status: snapshot.runStatus, + id: lockedTaskRun.id, + friendlyId: lockedTaskRun.friendlyId, + isTest: lockedTaskRun.isTest, + machine: machinePreset, + attemptNumber: nextAttemptNumber, + masterQueue: lockedTaskRun.masterQueue, + traceContext: lockedTaskRun.traceContext as Record, }, - snapshot: { - executionStatus: "DEQUEUED_FOR_EXECUTION", - description: "Run was dequeued for execution", + environment: { + id: lockedTaskRun.runtimeEnvironment.id, + type: lockedTaskRun.runtimeEnvironment.type, }, - }); - - return { - action: "SCHEDULE_RUN", - payload: { - version: "1", - execution: { - id: newSnapshot.id, - status: "DEQUEUED_FOR_EXECUTION", - }, - image: result.deployment?.imageReference ?? undefined, - checkpoint: undefined, - backgroundWorker: { - id: result.worker.id, - version: result.worker.version, - }, - run: { - id: lockedTaskRun.id, - friendlyId: lockedTaskRun.friendlyId, - isTest: lockedTaskRun.isTest, - machine: machinePreset, - attemptNumber: nextAttemptNumber, - masterQueue: lockedTaskRun.masterQueue, - traceContext: lockedTaskRun.traceContext as Record, - }, - environment: { - id: lockedTaskRun.runtimeEnvironment.id, - type: lockedTaskRun.runtimeEnvironment.type, - }, - organization: { - id: orgId, - }, - project: { - id: lockedTaskRun.projectId, - }, + organization: { + id: orgId, }, - }; - } else { - throw new NotImplementedError("Continuations are not implemented yet"); - } + project: { + id: lockedTaskRun.projectId, + }, + }, + }; }); }); } @@ -1278,20 +1275,16 @@ export class RunEngine { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, runId); if (latestSnapshot) { - //if the run is QUEUE or EXECUTING, we create a new snapshot - let newStatus: TaskRunExecutionStatus | undefined = undefined; - switch (latestSnapshot.executionStatus) { - case "QUEUED": { - newStatus = "QUEUED_WITH_WAITPOINTS"; - break; - } - case "EXECUTING": { - newStatus = "EXECUTING_WITH_WAITPOINTS"; - break; - } + let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; + if ( + latestSnapshot.executionStatus === "EXECUTING" || + latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" + ) { + newStatus = "EXECUTING_WITH_WAITPOINTS"; } - if (newStatus) { + //if the state has changed, create a new snapshot + if (newStatus !== latestSnapshot.executionStatus) { await this.#createExecutionSnapshot(tx, { run: { id: latestSnapshot.runId, @@ -1314,12 +1307,14 @@ export class RunEngine { { run, snapshot, + checkpointId, }: { run: { id: string; status: TaskRunStatus; attemptNumber?: number | null }; snapshot: { executionStatus: TaskRunExecutionStatus; description: string; }; + checkpointId?: string; } ) { const newSnapshot = await prisma.taskRunExecutionSnapshot.create({ @@ -1330,6 +1325,10 @@ export class RunEngine { runId: run.id, runStatus: run.status, attemptNumber: run.attemptNumber ?? undefined, + checkpointId: checkpointId ?? undefined, + }, + include: { + checkpoint: true, }, }); @@ -1355,12 +1354,13 @@ export class RunEngine { switch (status) { case "RUN_CREATED": case "FINISHED": + case "BLOCKED_BY_WAITPOINTS": case "QUEUED": { //we don't need to heartbeat these statuses break; } - case "DEQUEUED_FOR_EXECUTION": - case "QUEUED_WITH_WAITPOINTS": { + case "PENDING_EXECUTING": + case "PENDING_CANCEL": { await this.#startHeartbeating({ runId, snapshotId, @@ -1513,7 +1513,7 @@ export class RunEngine { //we need to check if the run is still QUEUED throw new NotImplementedError("Not implemented QUEUED"); } - case "DEQUEUED_FOR_EXECUTION": { + case "PENDING_EXECUTING": { //we need to check if the run is still dequeued throw new NotImplementedError("Not implemented DEQUEUED_FOR_EXECUTION"); } @@ -1525,10 +1525,14 @@ export class RunEngine { //we need to check if the run is still executing throw new NotImplementedError("Not implemented EXECUTING_WITH_WAITPOINTS"); } - case "QUEUED_WITH_WAITPOINTS": { + case "BLOCKED_BY_WAITPOINTS": { //we need to check if the waitpoints are still blocking the run throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); } + case "PENDING_CANCEL": { + //we need to check if the run is still pending cancel + throw new NotImplementedError("Not implemented PENDING_CANCEL"); + } case "FINISHED": { //we need to check if the run is still finished throw new NotImplementedError("Not implemented FINISHED"); diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 0e110d9e32..7576406133 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -11,7 +11,7 @@ const ScheduleRunMessage = z.object({ version: z.literal("1"), execution: z.object({ id: z.string(), - status: z.literal("DEQUEUED_FOR_EXECUTION"), + status: z.literal("PENDING_EXECUTING"), }), image: z.string().optional(), checkpoint: z @@ -19,7 +19,7 @@ const ScheduleRunMessage = z.object({ id: z.string(), type: z.string(), location: z.string(), - reason: z.string().optional(), + reason: z.string().nullish(), }) .optional(), backgroundWorker: z.object({ From 07b97e4cb39bc867737a061175fc108a99e672e1 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 21 Oct 2024 16:53:50 +0100 Subject: [PATCH 051/485] WIP on run execution data --- .../database/prisma/schema.prisma | 1 + .../run-engine/src/engine/index.test.ts | 300 +++++++++--------- .../run-engine/src/engine/index.ts | 114 +++++-- .../run-engine/src/engine/messages.ts | 61 +++- .../run-engine/src/engine/statuses.ts | 5 +- 5 files changed, 290 insertions(+), 191 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 4f5a799b49..75e2b75a32 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1965,6 +1965,7 @@ model Waitpoint { userProvidedIdempotencyKey Boolean /// If an idempotencyKey is no longer active, we store it here and generate a new one for the idempotencyKey field. + /// Clearing an idempotencyKey is useful for debounce or cancelling child runs. /// This is a workaround because Prisma doesn't support partial indexes. inactiveIdempotencyKey String? diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 247f31684c..13f293a76d 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -150,166 +150,170 @@ describe("RunEngine", () => { } }); - containerTest("Complete a waitpoint", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", + containerTest( + "triggerAndWait (not executing)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, }, + baseCostInCents: 0.0001, }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; + tracer: trace.getTracer("test", "0.0.0"), + }); - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + const childSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: childRun.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(childSnapshot); + expect(childSnapshot.executionStatus).toBe("QUEUED"); - const childRun = await engine.trigger( - { - number: 1, - friendlyId: "run_c1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, - }, - prisma - ); + const parentSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: parentRun.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(parentSnapshot); + expect(parentSnapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + assertNonNullable(runWaitpoint); + expect(runWaitpoint.waitpoint.type).toBe("RUN"); + expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - const childSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { + await engine.completeRunAttempt({ runId: childRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(childSnapshot); - expect(childSnapshot.executionStatus).toBe("QUEUED"); - - const parentSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: parentRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(parentSnapshot); - expect(parentSnapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); - - //check the waitpoint blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - assertNonNullable(runWaitpoint); - expect(runWaitpoint.waitpoint.type).toBe("RUN"); - expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - - await engine.completeRunAttempt({ - runId: childRun.id, - snapshotId: childSnapshot.id, - completion: { - id: childRun.id, - ok: true, - output: '{"foo":"bar"}', - outputType: "application/json", - }, - }); + snapshotId: childSnapshot.id, + completion: { + id: childRun.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); - const waitpointAfter = await prisma.waitpoint.findFirst({ - where: { - id: runWaitpoint.waitpointId, - }, - }); - expect(waitpointAfter?.completedAt).not.toBeNull(); - expect(waitpointAfter?.status).toBe("COMPLETED"); + const waitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: runWaitpoint.waitpointId, + }, + }); + expect(waitpointAfter?.completedAt).not.toBeNull(); + expect(waitpointAfter?.status).toBe("COMPLETED"); - const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointAfter).toBeNull(); + const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointAfter).toBeNull(); - //parent snapshot - const parentSnapshotAfter = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: parentRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(parentSnapshotAfter); - expect(parentSnapshotAfter.executionStatus).toBe("QUEUED"); - } finally { - engine.quit(); + //parent snapshot + const parentSnapshotAfter = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { + runId: parentRun.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + assertNonNullable(parentSnapshotAfter); + expect(parentSnapshotAfter.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); + } } - }); + ); //todo triggerAndWait diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index b0924c82a0..970a3d9fea 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -23,6 +23,7 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, + TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -38,7 +39,7 @@ import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; -import { ScheduleRunMessage } from "./messages"; +import { CreatedAttemptMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; type Options = { @@ -401,7 +402,7 @@ export class RunEngine { consumerId: string; masterQueue: string; tx?: PrismaClientOrTransaction; - }): Promise { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { //gets a fair run from this shared queue @@ -1035,7 +1036,7 @@ export class RunEngine { }, }); - // 4. Add the completed snapshots to the snapshots + // 4. Add the completed waitpoints to the snapshots for (const run of affectedTaskRuns) { await this.runLock.lock([run.taskRunId], 5_000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, run.taskRunId); @@ -1086,6 +1087,44 @@ export class RunEngine { ); } + /** Get required data to execute the run */ + async getRunExecutionData({ + runId, + tx, + }: { + runId: string; + tx?: PrismaClientOrTransaction; + }): Promise { + const prisma = tx ?? this.prisma; + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!snapshot) { + return null; + } + + const executionData: RunExecutionData = { + snapshot: { + id: snapshot.id, + executionStatus: snapshot.executionStatus, + description: snapshot.description, + }, + run: { + id: snapshot.runId, + status: snapshot.runStatus, + attemptNumber: snapshot.attemptNumber ?? undefined, + }, + checkpoint: snapshot.checkpoint + ? { + id: snapshot.checkpoint.id, + type: snapshot.checkpoint.type, + location: snapshot.checkpoint.location, + imageRef: snapshot.checkpoint.imageRef, + reason: snapshot.checkpoint.reason ?? undefined, + } + : undefined, + completedWaitpoints: {}, + }; + } + async quit() { //stop the run queue this.runQueue.quit(); @@ -1172,43 +1211,42 @@ export class RunEngine { const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: run, snapshot: { - executionStatus: "EXECUTING", + executionStatus: "PENDING_EXECUTING", description: "Run was continued, whilst still executing.", }, + completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), }); - //todo send a message to the worker somehow - // await this.#sendMessageToWorker(); - throw new NotImplementedError( - "RunEngine.#continueRun(): continue executing run, not implemented yet" - ); - } - - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: run, - snapshot: { - executionStatus: "QUEUED", - description: "Run was QUEUED, because it needs to be continued.", - }, - }); + //todo publish a notification in Redis that the Workers listen to + //this will cause the Worker to check for new execution snapshots for its runs + } else { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { + executionStatus: "QUEUED", + description: "Run was QUEUED, because it needs to be continued.", + }, + completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), + }); - //todo instead this should be a call to unblock the run - //we don't want to free up all the concurrency, so this isn't good - // await this.runQueue.enqueueMessage({ - // env, - // masterQueue: run.masterQueue, - // message: { - // runId: run.id, - // taskIdentifier: run.taskIdentifier, - // orgId: env.organization.id, - // projectId: env.project.id, - // environmentId: env.id, - // environmentType: env.type, - // queue: run.queue, - // concurrencyKey: run.concurrencyKey ?? undefined, - // timestamp: Date.now(), - // }, - // }); + //todo instead this should be a call to unblock the run + //we don't want to free up all the concurrency, so this isn't good + // await this.runQueue.enqueueMessage({ + // env, + // masterQueue: run.masterQueue, + // message: { + // runId: run.id, + // taskIdentifier: run.taskIdentifier, + // orgId: env.organization.id, + // projectId: env.project.id, + // environmentId: env.id, + // environmentType: env.type, + // queue: run.queue, + // concurrencyKey: run.concurrencyKey ?? undefined, + // timestamp: Date.now(), + // }, + // }); + } }); } @@ -1308,6 +1346,7 @@ export class RunEngine { run, snapshot, checkpointId, + completedWaitpointIds, }: { run: { id: string; status: TaskRunStatus; attemptNumber?: number | null }; snapshot: { @@ -1315,6 +1354,7 @@ export class RunEngine { description: string; }; checkpointId?: string; + completedWaitpointIds?: string[]; } ) { const newSnapshot = await prisma.taskRunExecutionSnapshot.create({ @@ -1386,6 +1426,10 @@ export class RunEngine { async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { return prisma.taskRunExecutionSnapshot.findFirst({ where: { runId }, + include: { + completedWaitpoints: true, + checkpoint: true, + }, orderBy: { createdAt: "desc" }, }); } diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 7576406133..acd290bd1f 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -1,10 +1,11 @@ -import { MachinePreset, EnvironmentType } from "@trigger.dev/core/v3"; +import { EnvironmentType, MachinePreset } from "@trigger.dev/core/v3"; +import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ -const ScheduleRunMessage = z.object({ +const CreatedAttemptMessage = z.object({ action: z.literal("SCHEDULE_RUN"), // The payload allows us to a discriminated union with the version payload: z.object({ @@ -47,6 +48,58 @@ const ScheduleRunMessage = z.object({ }), }), }); -export type ScheduleRunMessage = z.infer; +export type CreatedAttemptMessage = z.infer; -export const Messages = z.discriminatedUnion("action", [ScheduleRunMessage]); +const CompletedWaitpoint = z.discriminatedUnion("type", [ + z.object({ + id: z.string(), + type: z.literal("RUN"), + completedAt: z.coerce.date(), + idempotencyKey: z.string().optional(), + completedByTaskRunId: z.string(), + output: z.string().optional(), + outputType: z.string().optional(), + }), + z.object({ + id: z.string(), + type: z.literal("DATETIME"), + completedAt: z.coerce.date(), + idempotencyKey: z.string().optional(), + completedAfter: z.coerce.date(), + output: z.string().optional(), + outputType: z.string().optional(), + }), + z.object({ + id: z.string(), + type: z.literal("EVENT"), + completedAt: z.coerce.date(), + idempotencyKey: z.string().optional(), + output: z.string().optional(), + outputType: z.string().optional(), + }), +]); + +export const RunExecutionData = z.object({ + snapshot: z.object({ + id: z.string(), + executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), + description: z.string(), + }), + run: z.object({ + id: z.string(), + status: z.enum(Object.values(TaskRunStatus) as [TaskRunStatus]), + attemptNumber: z.number().optional(), + }), + checkpoint: z + .object({ + id: z.string(), + type: z.string(), + location: z.string(), + imageRef: z.string(), + reason: z.string().optional(), + }) + .optional(), + completedWaitpoints: z.array(CompletedWaitpoint).optional(), +}); + +export type RunExecutionData = z.infer; diff --git a/internal-packages/run-engine/src/engine/statuses.ts b/internal-packages/run-engine/src/engine/statuses.ts index be7e7d0a39..6b5f78a5be 100644 --- a/internal-packages/run-engine/src/engine/statuses.ts +++ b/internal-packages/run-engine/src/engine/statuses.ts @@ -1,10 +1,7 @@ import { TaskRunExecutionStatus } from "@trigger.dev/database"; export function isDequeueableExecutionStatus(status: TaskRunExecutionStatus): boolean { - const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = [ - "QUEUED", - "QUEUED_WITH_WAITPOINTS", - ]; + const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED"]; return dequeuableExecutionStatuses.includes(status); } From 5fb05d96e3aebc222fed290eb8dc6be2a5d2a163 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 11:35:03 +0100 Subject: [PATCH 052/485] triggerAndWait with completed waitpoint test working --- .../run-engine/src/engine/index.test.ts | 73 ++++++++----------- .../run-engine/src/engine/index.ts | 22 +++++- .../run-engine/src/engine/messages.ts | 42 ++++------- 3 files changed, 64 insertions(+), 73 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 13f293a76d..53247c51ba 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -87,16 +87,9 @@ describe("RunEngine", () => { expect(runFromDb).toBeDefined(); expect(runFromDb?.id).toBe(run.id); - const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: run.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(snapshot); - expect(snapshot?.executionStatus).toBe("QUEUED"); + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); //check the waitpoint is created const runWaitpoint = await prisma.waitpoint.findMany({ @@ -145,6 +138,12 @@ describe("RunEngine", () => { expect(attemptResult.run.id).toBe(run.id); expect(attemptResult.run.status).toBe("EXECUTING"); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("EXECUTING"); } finally { engine.quit(); } @@ -234,27 +233,13 @@ describe("RunEngine", () => { prisma ); - const childSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: childRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(childSnapshot); - expect(childSnapshot.executionStatus).toBe("QUEUED"); + const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionData); + expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); - const parentSnapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: parentRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(parentSnapshot); - expect(parentSnapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); + const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionData); + expect(parentExecutionData.snapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); //check the waitpoint blocking the parent run const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ @@ -271,7 +256,7 @@ describe("RunEngine", () => { await engine.completeRunAttempt({ runId: childRun.id, - snapshotId: childSnapshot.id, + snapshotId: childExecutionData.snapshot.id, completion: { id: childRun.id, ok: true, @@ -280,6 +265,11 @@ describe("RunEngine", () => { }, }); + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + const waitpointAfter = await prisma.waitpoint.findFirst({ where: { id: runWaitpoint.waitpointId, @@ -299,24 +289,21 @@ describe("RunEngine", () => { expect(runWaitpointAfter).toBeNull(); //parent snapshot - const parentSnapshotAfter = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { - runId: parentRun.id, - }, - orderBy: { - createdAt: "desc", - }, - }); - assertNonNullable(parentSnapshotAfter); - expect(parentSnapshotAfter.executionStatus).toBe("QUEUED"); + const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionDataAfter); + expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("QUEUED"); + expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); + expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + childRun.id + ); + expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); } finally { engine.quit(); } } ); - //todo triggerAndWait - //todo batchTriggerAndWait //todo checkpoints diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 970a3d9fea..6a5bce9283 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1121,8 +1121,25 @@ export class RunEngine { reason: snapshot.checkpoint.reason ?? undefined, } : undefined, - completedWaitpoints: {}, + completedWaitpoints: + snapshot.completedWaitpoints.length === 0 + ? undefined + : snapshot.completedWaitpoints.map((w) => ({ + id: w.id, + type: w.type, + completedAt: w.completedAt ?? new Date(), + idempotencyKey: + w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey + ? w.idempotencyKey + : undefined, + completedByTaskRunId: w.completedByTaskRunId ?? undefined, + completedAfter: w.completedAfter ?? undefined, + output: w.output ?? undefined, + outputType: w.outputType, + })), }; + + return executionData; } async quit() { @@ -1366,6 +1383,9 @@ export class RunEngine { runStatus: run.status, attemptNumber: run.attemptNumber ?? undefined, checkpointId: checkpointId ?? undefined, + completedWaitpoints: { + connect: completedWaitpointIds?.map((id) => ({ id })), + }, }, include: { checkpoint: true, diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index acd290bd1f..7ebe63ba0e 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -1,5 +1,5 @@ import { EnvironmentType, MachinePreset } from "@trigger.dev/core/v3"; -import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; +import { TaskRunExecutionStatus, TaskRunStatus, WaitpointType } from "@trigger.dev/database"; import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these @@ -50,34 +50,18 @@ const CreatedAttemptMessage = z.object({ }); export type CreatedAttemptMessage = z.infer; -const CompletedWaitpoint = z.discriminatedUnion("type", [ - z.object({ - id: z.string(), - type: z.literal("RUN"), - completedAt: z.coerce.date(), - idempotencyKey: z.string().optional(), - completedByTaskRunId: z.string(), - output: z.string().optional(), - outputType: z.string().optional(), - }), - z.object({ - id: z.string(), - type: z.literal("DATETIME"), - completedAt: z.coerce.date(), - idempotencyKey: z.string().optional(), - completedAfter: z.coerce.date(), - output: z.string().optional(), - outputType: z.string().optional(), - }), - z.object({ - id: z.string(), - type: z.literal("EVENT"), - completedAt: z.coerce.date(), - idempotencyKey: z.string().optional(), - output: z.string().optional(), - outputType: z.string().optional(), - }), -]); +const CompletedWaitpoint = z.object({ + id: z.string(), + type: z.enum(Object.values(WaitpointType) as [WaitpointType]), + completedAt: z.coerce.date(), + idempotencyKey: z.string().optional(), + /** For type === "RUN" */ + completedByTaskRunId: z.string().optional(), + /** For type === "DATETIME" */ + completedAfter: z.coerce.date().optional(), + output: z.string().optional(), + outputType: z.string().optional(), +}); export const RunExecutionData = z.object({ snapshot: z.object({ From c0cfc98c526eba6e6d0736265186a94144e5d5d6 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 12:17:38 +0100 Subject: [PATCH 053/485] Expiring runs with event emitting working --- .../run-engine/src/engine/eventBus.ts | 18 ++++ .../run-engine/src/engine/index.test.ts | 93 +++++++++++++++++++ .../run-engine/src/engine/index.ts | 56 ++++++++++- 3 files changed, 166 insertions(+), 1 deletion(-) create mode 100644 internal-packages/run-engine/src/engine/eventBus.ts diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts new file mode 100644 index 0000000000..9919b4cc5a --- /dev/null +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -0,0 +1,18 @@ +import { EventEmitter } from "node:events"; + +export type EventBusEvents = { + runExpired: [ + { + time: Date; + run: { + id: string; + spanId: string; + ttl: string | null; + }; + }, + ]; +}; + +export type EventBusEventArgs = EventBusEvents[T]; + +export const eventBus = new EventEmitter(); diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 53247c51ba..24ec93a7ba 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -6,6 +6,8 @@ import { expect } from "vitest"; import { AuthenticatedEnvironment } from "../shared/index.js"; import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; import { RunEngine } from "./index.js"; +import { setTimeout } from "timers/promises"; +import { eventBus, EventBusEventArgs } from "./eventBus.js"; function assertNonNullable(value: T): asserts value is NonNullable { expect(value).toBeDefined(); @@ -315,6 +317,97 @@ describe("RunEngine", () => { //todo cancelling a run //todo expiring a run + containerTest("Run expiring", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + ttl: "1s", + }, + prisma + ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); + + let expiredEventData: EventBusEventArgs<"runExpired">[0] | undefined = undefined; + eventBus.on("runExpired", (result) => { + expiredEventData = result; + }); + + //wait for 1 seconds + await setTimeout(1_000); + + assertNonNullable(expiredEventData); + const assertedExpiredEventData = expiredEventData as EventBusEventArgs<"runExpired">[0]; + expect(assertedExpiredEventData.run.spanId).toBe(run.spanId); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData2.run.attemptNumber).toBe(undefined); + expect(executionData2.run.status).toBe("EXPIRED"); + } finally { + engine.quit(); + } + }); //todo delaying a run }); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 6a5bce9283..c684f13a49 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -41,6 +41,7 @@ import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { CreatedAttemptMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; +import { eventBus } from "./eventBus"; type Options = { redis: RedisOptions; @@ -981,7 +982,60 @@ export class RunEngine { async waitForDuration() {} - async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { + const prisma = tx ?? this.prisma; + await this.runLock.lock([runId], 5_000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!snapshot) { + throw new Error(`No execution snapshot found for TaskRun ${runId}`); + } + + //if we're executing then we won't expire the run + if (isExecuting(snapshot.executionStatus)) { + return; + } + + //only expire "PENDING" runs + const run = await prisma.taskRun.findUnique({ where: { id: runId } }); + + if (!run) { + this.logger.debug("Could not find enqueued run to expire", { + runId, + }); + return; + } + + if (run.status !== "PENDING") { + this.logger.debug("Run cannot be expired because it's not in PENDING status", { + run, + }); + return; + } + + const updatedRun = await prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "EXPIRED", + completedAt: new Date(), + expiredAt: new Date(), + error: { + type: "STRING_ERROR", + raw: `Run expired because the TTL (${run.ttl}) was reached`, + }, + executionSnapshots: { + create: { + engine: "V2", + executionStatus: "FINISHED", + description: "Run was expired because the TTL was reached", + runStatus: "EXPIRED", + }, + }, + }, + }); + + eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); + }); + } /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ From 4127996beb98d138e42fdb3780a2a0669e3eacfd Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 12:22:36 +0100 Subject: [PATCH 054/485] Move the eventBus into the engine --- internal-packages/run-engine/src/engine/eventBus.ts | 2 -- internal-packages/run-engine/src/engine/index.test.ts | 4 ++-- internal-packages/run-engine/src/engine/index.ts | 5 +++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 9919b4cc5a..c284c94443 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -14,5 +14,3 @@ export type EventBusEvents = { }; export type EventBusEventArgs = EventBusEvents[T]; - -export const eventBus = new EventEmitter(); diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 24ec93a7ba..34deeb84f3 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -7,7 +7,7 @@ import { AuthenticatedEnvironment } from "../shared/index.js"; import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; import { RunEngine } from "./index.js"; import { setTimeout } from "timers/promises"; -import { eventBus, EventBusEventArgs } from "./eventBus.js"; +import { EventBusEventArgs } from "./eventBus.js"; function assertNonNullable(value: T): asserts value is NonNullable { expect(value).toBeDefined(); @@ -388,7 +388,7 @@ describe("RunEngine", () => { expect(executionData.snapshot.executionStatus).toBe("QUEUED"); let expiredEventData: EventBusEventArgs<"runExpired">[0] | undefined = undefined; - eventBus.on("runExpired", (result) => { + engine.eventBus.on("runExpired", (result) => { expiredEventData = result; }); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index c684f13a49..275649431f 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -23,7 +23,6 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, - TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -31,17 +30,18 @@ import { import assertNever from "assert-never"; import { Redis, type RedisOptions } from "ioredis"; import { nanoid } from "nanoid"; +import { EventEmitter } from "node:events"; import { z } from "zod"; import { RunQueue } from "../run-queue"; import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; import { MinimalAuthenticatedEnvironment } from "../shared"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; +import { eventBus, EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { CreatedAttemptMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; -import { eventBus } from "./eventBus"; type Options = { redis: RedisOptions; @@ -124,6 +124,7 @@ export class RunEngine { private worker: EngineWorker; private logger = new Logger("RunEngine", "debug"); private tracer: Tracer; + eventBus = new EventEmitter(); constructor(private readonly options: Options) { this.prisma = options.prisma; From 4b935d2e3ef80c8f53a27616f89c8e92369ef704 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 12:24:43 +0100 Subject: [PATCH 055/485] Fix for moving the eventBus into the engine --- internal-packages/run-engine/src/engine/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 275649431f..62822dace4 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -37,7 +37,7 @@ import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorit import { MinimalAuthenticatedEnvironment } from "../shared"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; -import { eventBus, EventBusEvents } from "./eventBus"; +import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { CreatedAttemptMessage, RunExecutionData } from "./messages"; @@ -1034,7 +1034,7 @@ export class RunEngine { }, }); - eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); + this.eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); }); } From fda3bbaac67cb8e4cf10b62892d25fa35bb7c875 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 12:49:18 +0100 Subject: [PATCH 056/485] Run completed successfully event (with test) --- .../run-engine/src/engine/eventBus.ts | 11 +++++++++++ .../run-engine/src/engine/index.test.ts | 10 ++++++++++ internal-packages/run-engine/src/engine/index.ts | 14 +++++++++++++- 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index c284c94443..295ececf6e 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -11,6 +11,17 @@ export type EventBusEvents = { }; }, ]; + runCompletedSuccessfully: [ + { + time: Date; + run: { + id: string; + spanId: string; + output: string | undefined; + outputType: string; + }; + }, + ]; }; export type EventBusEventArgs = EventBusEvents[T]; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 34deeb84f3..730b48d5ce 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -256,6 +256,11 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + let event: EventBusEventArgs<"runCompletedSuccessfully">[0] | undefined = undefined; + engine.eventBus.on("runCompletedSuccessfully", (result) => { + event = result; + }); + await engine.completeRunAttempt({ runId: childRun.id, snapshotId: childExecutionData.snapshot.id, @@ -267,6 +272,11 @@ describe("RunEngine", () => { }, }); + //event + assertNonNullable(event); + const completedEvent = event as EventBusEventArgs<"runCompletedSuccessfully">[0]; + expect(completedEvent.run.spanId).toBe(childRun.spanId); + //child snapshot const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); assertNonNullable(childExecutionDataAfter); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 62822dace4..0e96ab75e5 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -920,11 +920,12 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); if (completion.ok) { + const completedAt = new Date(); const run = await this.prisma.taskRun.update({ where: { id: runId }, data: { status: "COMPLETED_SUCCESSFULLY", - completedAt: new Date(), + completedAt, output: completion.output, outputType: completion.outputType, executionSnapshots: { @@ -937,6 +938,7 @@ export class RunEngine { }, }, select: { + spanId: true, associatedWaitpoint: { select: { id: true, @@ -961,6 +963,16 @@ export class RunEngine { ? { value: completion.output, type: completion.outputType } : undefined, }); + + this.eventBus.emit("runCompletedSuccessfully", { + time: completedAt, + run: { + id: runId, + spanId: run.spanId, + output: completion.output, + outputType: completion.outputType, + }, + }); } else { const error = sanitizeError(completion.error); //todo look at CompleteAttemptService From f03b3ad150330ab2cccbeee68ebf3f5a0fd76632 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 12:50:15 +0100 Subject: [PATCH 057/485] Added a few more checks to the test --- internal-packages/run-engine/src/engine/index.test.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 730b48d5ce..26318d7723 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -276,6 +276,8 @@ describe("RunEngine", () => { assertNonNullable(event); const completedEvent = event as EventBusEventArgs<"runCompletedSuccessfully">[0]; expect(completedEvent.run.spanId).toBe(childRun.spanId); + expect(completedEvent.run.output).toBe('{"foo":"bar"}'); + expect(completedEvent.run.outputType).toBe("application/json"); //child snapshot const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); From 68a20e9a824e6b5a6244053ea07cfa8d07d4f24a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 22 Oct 2024 17:44:54 +0100 Subject: [PATCH 058/485] Added attempts and a dead letter queue to the RunQueue --- .../run-engine/src/engine/index.ts | 42 +++- .../run-engine/src/run-queue/index.test.ts | 96 ++++++++- .../run-engine/src/run-queue/index.ts | 186 +++++++++++++++++- .../run-engine/src/run-queue/types.ts | 1 + packages/core/src/v3/schemas/common.ts | 1 + 5 files changed, 300 insertions(+), 26 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 0e96ab75e5..23970c0d76 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -408,7 +408,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { //gets a fair run from this shared queue - const message = await this.runQueue.dequeueMessageInSharedQueue(consumerId, masterQueue); + const message = await this.runQueue.dequeueMessageFromMasterQueue(consumerId, masterQueue); if (!message) { return null; } @@ -467,13 +467,20 @@ export class RunEngine { }); if (result.run.runtimeEnvironment.type === "DEVELOPMENT") { - //requeue for 10s in the future, so we can try again - //todo when do we stop doing this, the run.ttl should deal with this. - await this.runQueue.nackMessage( - orgId, - runId, - new Date(Date.now() + 10_000).getTime() - ); + //it will automatically be requeued X times depending on the queue retry settings + const gotRequeued = await this.runQueue.nackMessage(orgId, runId); + + if (!gotRequeued) { + await this.#systemFailure({ + runId: result.run.id, + error: { + type: "INTERNAL_ERROR", + code: "COULD_NOT_FIND_TASK", + message: `We tried to dequeue this DEV run multiple times but could not find the task to run: ${result.run.taskIdentifier}`, + }, + tx: prisma, + }); + } } else { //not deployed yet, so we'll wait for the deploy await this.#waitingForDeploy({ @@ -574,8 +581,20 @@ export class RunEngine { } ); - //try again in 1 second - await this.runQueue.nackMessage(orgId, runId, new Date(Date.now() + 1000).getTime()); + //will auto-retry + const gotRequeued = await this.runQueue.nackMessage(orgId, runId); + if (!gotRequeued) { + await this.#systemFailure({ + runId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_DEQUEUED_QUEUE_NOT_FOUND", + message: `Tried to dequeue the run but the queue doesn't exist: ${lockedTaskRun.queue}`, + }, + tx: prisma, + }); + } + return null; } @@ -1257,6 +1276,8 @@ export class RunEngine { await this.runQueue.enqueueMessage({ env, + //todo if the run is locked, use the BackgroundWorker ID + //if not locked then the environmentId master queue masterQueue: run.masterQueue, message: { runId: run.id, @@ -1268,6 +1289,7 @@ export class RunEngine { queue: run.queue, concurrencyKey: run.concurrencyKey ?? undefined, timestamp: Date.now(), + attempt: 0, }, }); } diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 8a3f40ada9..d84f2acb0d 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -7,6 +7,7 @@ import { RunQueueShortKeyProducer } from "./keyProducer.js"; import { SimpleWeightedChoiceStrategy } from "./simpleWeightedPriorityStrategy.js"; import { InputPayload } from "./types.js"; import { abort } from "node:process"; +import { setTimeout } from "node:timers/promises"; const testOptions = { name: "rq", @@ -17,6 +18,13 @@ const testOptions = { defaultEnvConcurrency: 10, enableRebalancing: false, logger: new Logger("RunQueue", "warn"), + retryOptions: { + maxAttempts: 5, + factor: 1.1, + minTimeoutInMs: 100, + maxTimeoutInMs: 1_000, + randomize: true, + }, }; const authenticatedEnvProd = { @@ -44,6 +52,7 @@ const messageProd: InputPayload = { environmentType: "PRODUCTION", queue: "task/my-task", timestamp: Date.now(), + attempt: 0, }; const messageDev: InputPayload = { @@ -55,6 +64,7 @@ const messageDev: InputPayload = { environmentType: "DEVELOPMENT", queue: "task/my-task", timestamp: Date.now(), + attempt: 0, }; describe("RunQueue", () => { @@ -183,7 +193,7 @@ describe("RunQueue", () => { ); expect(taskConcurrency).toBe(0); - const dequeued = await queue.dequeueMessageInSharedQueue("test_12345", envMasterQueue); + const dequeued = await queue.dequeueMessageFromMasterQueue("test_12345", envMasterQueue); expect(dequeued?.messageId).toEqual(messageDev.runId); expect(dequeued?.message.orgId).toEqual(messageDev.orgId); expect(dequeued?.message.version).toEqual("1"); @@ -205,7 +215,7 @@ describe("RunQueue", () => { ); expect(taskConcurrency2).toBe(1); - const dequeued2 = await queue.dequeueMessageInSharedQueue("test_12345", envMasterQueue); + const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", envMasterQueue); expect(dequeued2).toBe(undefined); } finally { await queue.quit(); @@ -269,7 +279,7 @@ describe("RunQueue", () => { expect(taskConcurrency).toBe(0); //dequeue - const dequeued = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const dequeued = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); expect(dequeued?.messageId).toEqual(messageProd.runId); expect(dequeued?.message.orgId).toEqual(messageProd.orgId); expect(dequeued?.message.version).toEqual("1"); @@ -295,7 +305,7 @@ describe("RunQueue", () => { const length2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); expect(length2).toBe(0); - const dequeued2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); expect(dequeued2).toBe(undefined); } finally { await queue.quit(); @@ -346,7 +356,7 @@ describe("RunQueue", () => { masterQueue: "main", }); - const message = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); expect(message).toBeDefined(); //check the message is gone @@ -377,14 +387,14 @@ describe("RunQueue", () => { expect(exists2).toBe(0); //dequeue - const message2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); expect(message2).toBeUndefined(); } finally { await queue.quit(); } }); - redisTest("Nacking", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + redisTest("Nacking", { timeout: 15_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, @@ -394,10 +404,10 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main", + masterQueue: "main2", }); - const message = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main2"); expect(message).toBeDefined(); //check the message is there @@ -423,6 +433,9 @@ describe("RunQueue", () => { await queue.nackMessage(message!.message.orgId, message!.messageId); + //we need to wait because the default wait is 1 second + await setTimeout(300); + //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( authenticatedEnvProd, @@ -444,10 +457,73 @@ describe("RunQueue", () => { expect(exists2).toBe(1); //dequeue - const message2 = await queue.dequeueMessageInSharedQueue("test_12345", "main"); + const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main2"); expect(message2?.messageId).toBe(messageProd.runId); } finally { await queue.quit(); } }); + + redisTest("Dead Letter Queue", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + retryOptions: { + maxAttempts: 1, + }, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueue: "main", + }); + + const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); + expect(message).toBeDefined(); + + //check the message is there + const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const exists = await redis.exists(key); + expect(exists).toBe(1); + + //nack (we only have attempts set to 1) + await queue.nackMessage(message!.message.orgId, message!.messageId); + + //dequeue + const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); + expect(message2?.messageId).toBeUndefined(); + + //concurrencies + const queueConcurrency2 = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency2).toBe(0); + const envConcurrency2 = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency2).toBe(0); + const projectConcurrency2 = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency2).toBe(0); + const taskConcurrency2 = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency2).toBe(0); + + //check the message is still there + const exists2 = await redis.exists(key); + expect(exists2).toBe(1); + + //check it's in the dlq + const dlqKey = "dlq"; + const dlqExists = await redis.exists(dlqKey); + expect(dlqExists).toBe(1); + + const dlqMembers = await redis.zrange(dlqKey, 0, -1); + expect(dlqMembers).toContain(messageProd.runId); + } finally { + await queue.quit(); + } + }); }); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 83241d16ab..8920099d92 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -42,12 +42,34 @@ export type RunQueueOptions = { enableRebalancing?: boolean; verbose?: boolean; logger: Logger; + retryOptions?: RetryOptions; }; +//todo +//1. Track the number of attempts in the payload +//2. When nacking, pass in `maxAttempts`, bump the `attempt` number each time. +//3. Check if > maxAttempts, if so, send to a dead letter queue +//4. Add redrive using Redis pubsub to send back to the queue +//5. For everything else, like enqueue reset the `attempt` to zero + /** * RunQueue – the queue that's used to process runs */ + +import { type RetryOptions } from "@trigger.dev/core/v3/schemas"; +import { calculateNextRetryDelay } from "@trigger.dev/core/v3"; + +const defaultRetrySettings = { + maxAttempts: 12, + factor: 2, + minTimeoutInMs: 1_000, + maxTimeoutInMs: 3_600_000, + randomize: true, +}; + export class RunQueue { + private retryOptions: RetryOptions; + private subscriber: Redis; private logger: Logger; private redis: Redis; public keys: RunQueueKeyProducer; @@ -55,12 +77,16 @@ export class RunQueue { #rebalanceWorkers: Array = []; constructor(private readonly options: RunQueueOptions) { + this.retryOptions = options.retryOptions ?? defaultRetrySettings; this.redis = new Redis(options.redis); this.logger = options.logger; this.keys = new RunQueueShortKeyProducer("rq:"); this.queuePriorityStrategy = options.queuePriorityStrategy; + this.subscriber = new Redis(options.redis); + this.#setupSubscriber(); + this.#registerCommands(); } @@ -184,6 +210,7 @@ export class RunQueue { version: "1", queue, masterQueue, + attempt: 0, }; await this.#callEnqueueMessage(messagePayload, masterQueue); @@ -232,7 +259,7 @@ export class RunQueue { /** * Dequeue a message from the shared queue (this should be used in production environments) */ - public async dequeueMessageInSharedQueue(consumerId: string, masterQueue: string) { + public async dequeueMessageFromMasterQueue(consumerId: string, masterQueue: string) { return this.#trace( "dequeueMessageInSharedQueue", async (span) => { @@ -343,15 +370,18 @@ export class RunQueue { /** * Negative acknowledge a message, which will requeue the message (with an optional future date) */ - public async nackMessage(orgId: string, messageId: string, retryAt: number = Date.now()) { + public async nackMessage(orgId: string, messageId: string, retryAt?: number) { return this.#trace( "nackMessage", async (span) => { + const maxAttempts = this.retryOptions.maxAttempts ?? defaultRetrySettings.maxAttempts; + const message = await this.#readMessage(orgId, messageId); if (!message) { this.logger.log(`[${this.name}].nackMessage() message not found`, { orgId, messageId, + maxAttempts, retryAt, service: this.name, }); @@ -378,7 +408,24 @@ export class RunQueue { message.queue ); - const messageScore = retryAt; + message.attempt = message.attempt + 1; + if (message.attempt >= maxAttempts) { + await this.redis.moveToDeadLetterQueue( + parentQueue, + messageKey, + messageQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + taskConcurrencyKey, + "dlq", + messageId + ); + return false; + } + + const nextRetryDelay = calculateNextRetryDelay(this.retryOptions, message.attempt); + const messageScore = retryAt ?? (nextRetryDelay ? Date.now() + nextRetryDelay : Date.now()); this.logger.debug("Calling nackMessage", { messageKey, @@ -390,6 +437,7 @@ export class RunQueue { taskConcurrencyKey, messageId, messageScore, + attempt: message.attempt, service: this.name, }); @@ -404,8 +452,10 @@ export class RunQueue { taskConcurrencyKey, //args messageId, + JSON.stringify(message), String(messageScore) ); + return true; }, { kind: SpanKind.CONSUMER, @@ -509,9 +559,28 @@ export class RunQueue { async quit() { await Promise.all(this.#rebalanceWorkers.map((worker) => worker.stop())); + await this.subscriber.unsubscribe(); + await this.subscriber.quit(); await this.redis.quit(); } + // private async handleRedriveMessage(channel: string, message: string) { + // try { + // const { id } = JSON.parse(message); + // if (typeof id !== "string") { + // throw new Error("Invalid message format: id must be a string"); + // } + // await this.enqueueMessage({ + // env: { orgId: "", id: "" }, // You might need to adjust this based on your actual implementation + // message: (await this.#readMessage("", id)) as InputPayload, + // masterQueue: this.keys.deadLetterQueueKey(""), + // }); + // this.logger.log(`Redrived item ${id} from Dead Letter Queue`); + // } catch (error) { + // this.logger.error("Error processing redrive message", { error, message }); + // } + // } + async #trace( name: string, fn: (span: Span) => Promise, @@ -543,6 +612,20 @@ export class RunQueue { ); } + async #setupSubscriber() { + const channel = `${this.options.name}:redrive`; + this.subscriber.subscribe(channel, (err) => { + if (err) { + this.logger.error(`Failed to subscribe to ${channel}`, { error: err }); + } else { + this.logger.log(`Subscribed to ${channel}`); + } + }); + + //todo + // this.subscriber.on("message", this.handleRedriveMessage.bind(this)); + } + async #readMessage(orgId: string, messageId: string) { return this.#trace( "readMessage", @@ -1103,7 +1186,11 @@ local taskConcurrencyKey = KEYS[7] -- Args: local messageId = ARGV[1] -local messageScore = tonumber(ARGV[2]) +local messageData = ARGV[2] +local messageScore = tonumber(ARGV[3]) + +-- Update the message data +redis.call('SET', messageKey, messageData) -- Update the concurrency keys redis.call('SREM', concurrencyKey, messageId) @@ -1124,6 +1211,44 @@ end `, }); + this.redis.defineCommand("moveToDeadLetterQueue", { + numberOfKeys: 8, + lua: ` +-- Keys: +local parentQueue = KEYS[1] +local messageKey = KEYS[2] +local messageQueue = KEYS[3] +local concurrencyKey = KEYS[4] +local envCurrentConcurrencyKey = KEYS[5] +local projectCurrentConcurrencyKey = KEYS[6] +local taskCurrentConcurrencyKey = KEYS[7] +local deadLetterQueueKey = KEYS[8] + +-- Args: +local messageId = ARGV[1] + +-- Remove the message from the queue +redis.call('ZREM', messageQueue, messageId) + +-- Rebalance the parent queue +local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') +if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, messageQueue) +else + redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) +end + +-- Add the message to the dead letter queue +redis.call('ZADD', deadLetterQueueKey, tonumber(redis.call('TIME')[1]), messageId) + +-- Update the concurrency keys +redis.call('SREM', concurrencyKey, messageId) +redis.call('SREM', envCurrentConcurrencyKey, messageId) +redis.call('SREM', projectCurrentConcurrencyKey, messageId) +redis.call('SREM', taskCurrentConcurrencyKey, messageId) +`, + }); + this.redis.defineCommand("releaseConcurrency", { numberOfKeys: 3, lua: ` @@ -1197,13 +1322,48 @@ return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurr `, }); + this.redis.defineCommand("redriveFromDeadLetterQueue", { + numberOfKeys: 3, + lua: ` + -- Keys: + local deadLetterQueueKey = KEYS[1] + local messageQueueKey = KEYS[2] + local parentQueueKey = KEYS[3] + + -- Args: + local messageId = ARGV[1] + + -- Get the message data from the dead letter queue + local messageData = redis.call('GET', messageId) + + if not messageData then + return redis.error_reply("Message not found in dead letter queue") + end + + -- Remove the message from the dead letter queue + redis.call('ZREM', deadLetterQueueKey, messageId) + + -- Add the message back to the original queue + local currentTime = redis.call('TIME')[1] + redis.call('ZADD', messageQueueKey, currentTime, messageId) + + -- Rebalance the parent queue + local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES') + if #earliestMessage > 0 then + redis.call('ZADD', parentQueueKey, earliestMessage[2], messageQueueKey) + end + + return redis.status_reply("OK") + `, + }); + this.redis.defineCommand("updateGlobalConcurrencyLimits", { numberOfKeys: 1, lua: ` --- Keys: envConcurrencyLimitKey, orgConcurrencyLimitKey +-- Keys: envConcurrencyLimitKey local envConcurrencyLimitKey = KEYS[1] --- Args: envConcurrencyLimit, orgConcurrencyLimit +-- Args: envConcurrencyLimit local envConcurrencyLimit = ARGV[1] redis.call('SET', envConcurrencyLimitKey, envConcurrencyLimit) @@ -1301,10 +1461,24 @@ declare module "ioredis" { projectConcurrencyKey: string, taskConcurrencyKey: string, messageId: string, + messageData: string, messageScore: string, callback?: Callback ): Result; + moveToDeadLetterQueue( + parentQueue: string, + messageKey: string, + messageQueue: string, + concurrencyKey: string, + envConcurrencyKey: string, + projectConcurrencyKey: string, + taskConcurrencyKey: string, + deadLetterQueueKey: string, + messageId: string, + callback?: Callback + ): Result; + releaseConcurrency( concurrencyKey: string, envConcurrencyKey: string, diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 914193fb85..e6c7125b78 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -12,6 +12,7 @@ export const InputPayload = z.object({ queue: z.string(), concurrencyKey: z.string().optional(), timestamp: z.number(), + attempt: z.number(), }); export type InputPayload = z.infer; diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index eb57873d3d..7c88533173 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -127,6 +127,7 @@ export const TaskRunInternalError = z.object({ "POD_UNKNOWN_ERROR", "TASK_HAS_N0_EXECUTION_SNAPSHOT", "TASK_DEQUEUED_INVALID_STATE", + "TASK_DEQUEUED_QUEUE_NOT_FOUND", ]), message: z.string().optional(), stackTrace: z.string().optional(), From b175d2b195d15fd34e8bb41a40d910c99094aedc Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 11:23:45 +0100 Subject: [PATCH 059/485] Redriving is working --- .../run-engine/src/run-queue/index.test.ts | 34 +++++- .../run-engine/src/run-queue/index.ts | 108 +++++++++--------- 2 files changed, 84 insertions(+), 58 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index d84f2acb0d..733e833875 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -1,13 +1,12 @@ +import { redisTest } from "@internal/testcontainers"; import { trace } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; +import Redis from "ioredis"; import { describe } from "node:test"; -import { redisTest } from "@internal/testcontainers"; +import { setTimeout } from "node:timers/promises"; import { RunQueue } from "./index.js"; -import { RunQueueShortKeyProducer } from "./keyProducer.js"; import { SimpleWeightedChoiceStrategy } from "./simpleWeightedPriorityStrategy.js"; import { InputPayload } from "./types.js"; -import { abort } from "node:process"; -import { setTimeout } from "node:timers/promises"; const testOptions = { name: "rq", @@ -464,7 +463,7 @@ describe("RunQueue", () => { } }); - redisTest("Dead Letter Queue", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + redisTest("Dead Letter Queue", { timeout: 8_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, retryOptions: { @@ -519,9 +518,32 @@ describe("RunQueue", () => { const dlqKey = "dlq"; const dlqExists = await redis.exists(dlqKey); expect(dlqExists).toBe(1); - const dlqMembers = await redis.zrange(dlqKey, 0, -1); expect(dlqMembers).toContain(messageProd.runId); + + //redrive + const redisClient = new Redis({ + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + }); + + // Publish redrive message + await redisClient.publish( + "rq:redrive", + JSON.stringify({ runId: messageProd.runId, orgId: messageProd.orgId }) + ); + + // Wait for the item to be redrived and processed + await setTimeout(5_000); + + //shouldn't be in the dlq now + const dlqMembersAfter = await redis.zrange(dlqKey, 0, -1); + expect(dlqMembersAfter).not.toContain(messageProd.runId); + + //dequeue + const message3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); + expect(message3?.messageId).toBe(messageProd.runId); } finally { await queue.quit(); } diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 8920099d92..ae2d611937 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -564,22 +564,61 @@ export class RunQueue { await this.redis.quit(); } - // private async handleRedriveMessage(channel: string, message: string) { - // try { - // const { id } = JSON.parse(message); - // if (typeof id !== "string") { - // throw new Error("Invalid message format: id must be a string"); - // } - // await this.enqueueMessage({ - // env: { orgId: "", id: "" }, // You might need to adjust this based on your actual implementation - // message: (await this.#readMessage("", id)) as InputPayload, - // masterQueue: this.keys.deadLetterQueueKey(""), - // }); - // this.logger.log(`Redrived item ${id} from Dead Letter Queue`); - // } catch (error) { - // this.logger.error("Error processing redrive message", { error, message }); - // } - // } + private async handleRedriveMessage(channel: string, message: string) { + try { + const { runId, orgId } = JSON.parse(message); + if (typeof orgId !== "string" || typeof runId !== "string") { + this.logger.error( + "handleRedriveMessage: invalid message format: runId and orgId must be strings", + { message, channel } + ); + return; + } + + const data = await this.#readMessage(orgId, runId); + + if (!data) { + this.logger.error(`handleRedriveMessage: couldn't read message`, { orgId, runId, channel }); + return; + } + + await this.enqueueMessage({ + env: { + id: data.environmentId, + type: data.environmentType, + //this isn't used in enqueueMessage + maximumConcurrencyLimit: -1, + project: { + id: data.projectId, + }, + organization: { + id: data.orgId, + }, + }, + message: { + ...data, + attempt: 0, + }, + masterQueue: data.masterQueue, + }); + + //remove from the dlq + const result = await this.redis.zrem("dlq", runId); + + if (result === 0) { + this.logger.error(`handleRedriveMessage: couldn't remove message from dlq`, { + orgId, + runId, + channel, + }); + return; + } + + this.logger.log(`handleRedriveMessage: redrived item ${runId} from Dead Letter Queue`); + } catch (error) { + this.logger.error("Error processing redrive message", { error, message }); + } + } async #trace( name: string, @@ -623,7 +662,7 @@ export class RunQueue { }); //todo - // this.subscriber.on("message", this.handleRedriveMessage.bind(this)); + this.subscriber.on("message", this.handleRedriveMessage.bind(this)); } async #readMessage(orgId: string, messageId: string) { @@ -1322,41 +1361,6 @@ return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurr `, }); - this.redis.defineCommand("redriveFromDeadLetterQueue", { - numberOfKeys: 3, - lua: ` - -- Keys: - local deadLetterQueueKey = KEYS[1] - local messageQueueKey = KEYS[2] - local parentQueueKey = KEYS[3] - - -- Args: - local messageId = ARGV[1] - - -- Get the message data from the dead letter queue - local messageData = redis.call('GET', messageId) - - if not messageData then - return redis.error_reply("Message not found in dead letter queue") - end - - -- Remove the message from the dead letter queue - redis.call('ZREM', deadLetterQueueKey, messageId) - - -- Add the message back to the original queue - local currentTime = redis.call('TIME')[1] - redis.call('ZADD', messageQueueKey, currentTime, messageId) - - -- Rebalance the parent queue - local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES') - if #earliestMessage > 0 then - redis.call('ZADD', parentQueueKey, earliestMessage[2], messageQueueKey) - end - - return redis.status_reply("OK") - `, - }); - this.redis.defineCommand("updateGlobalConcurrencyLimits", { numberOfKeys: 1, lua: ` From d0a7ead5d3e173ea5688f64e4beac5ba131ee08b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 11:34:43 +0100 Subject: [PATCH 060/485] Remove todos --- internal-packages/run-engine/src/run-queue/index.ts | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index ae2d611937..5f2cb94776 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -45,13 +45,6 @@ export type RunQueueOptions = { retryOptions?: RetryOptions; }; -//todo -//1. Track the number of attempts in the payload -//2. When nacking, pass in `maxAttempts`, bump the `attempt` number each time. -//3. Check if > maxAttempts, if so, send to a dead letter queue -//4. Add redrive using Redis pubsub to send back to the queue -//5. For everything else, like enqueue reset the `attempt` to zero - /** * RunQueue – the queue that's used to process runs */ @@ -661,7 +654,6 @@ export class RunQueue { } }); - //todo this.subscriber.on("message", this.handleRedriveMessage.bind(this)); } From 64ee9deb4348f8ed75553a0cdce0bb0f8a08f56f Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 12:05:10 +0100 Subject: [PATCH 061/485] Updated the comment --- internal-packages/run-engine/src/run-queue/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 5f2cb94776..2fc35e3cd1 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -250,7 +250,7 @@ export class RunQueue { } /** - * Dequeue a message from the shared queue (this should be used in production environments) + * Dequeue a message */ public async dequeueMessageFromMasterQueue(consumerId: string, masterQueue: string) { return this.#trace( From 5632abd231007c687c4e180c9a5b4eb7f4e85886 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 13:21:02 +0100 Subject: [PATCH 062/485] Added support for multiple master queues --- .../run-engine/src/run-queue/index.test.ts | 16 +- .../run-engine/src/run-queue/index.ts | 256 ++++++++---------- .../run-engine/src/run-queue/types.ts | 2 +- 3 files changed, 121 insertions(+), 153 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 733e833875..fa1f2423d0 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -162,7 +162,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvDev, message: messageDev, - masterQueue: `env:${authenticatedEnvDev.id}`, + masterQueues: `env:${authenticatedEnvDev.id}`, }); //queue length @@ -196,7 +196,7 @@ describe("RunQueue", () => { expect(dequeued?.messageId).toEqual(messageDev.runId); expect(dequeued?.message.orgId).toEqual(messageDev.orgId); expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueue).toEqual(envMasterQueue); + expect(dequeued?.message.masterQueues).toEqual([envMasterQueue]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -247,7 +247,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main", + masterQueues: "main", }); //queue length @@ -282,7 +282,7 @@ describe("RunQueue", () => { expect(dequeued?.messageId).toEqual(messageProd.runId); expect(dequeued?.message.orgId).toEqual(messageProd.orgId); expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueue).toEqual("main"); + expect(dequeued?.message.masterQueues).toEqual(["main"]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -327,7 +327,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main", + masterQueues: "main", }); const result2 = await queue.getSharedQueueDetails("main"); @@ -352,7 +352,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main", + masterQueues: "main", }); const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); @@ -403,7 +403,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main2", + masterQueues: "main2", }); const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main2"); @@ -476,7 +476,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueue: "main", + masterQueues: "main", }); const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 2fc35e3cd1..a603373a7f 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -24,7 +24,7 @@ import { RunQueueShortKeyProducer } from "./keyProducer.js"; const SemanticAttributes = { QUEUE: "runqueue.queue", - MASTER_QUEUE: "runqueue.masterQueue", + MASTER_QUEUES: "runqueue.masterQueues", RUN_ID: "runqueue.runId", CONCURRENCY_KEY: "runqueue.concurrencyKey", ORG_ID: "runqueue.orgId", @@ -176,11 +176,11 @@ export class RunQueue { public async enqueueMessage({ env, message, - masterQueue, + masterQueues, }: { env: MinimalAuthenticatedEnvironment; message: InputPayload; - masterQueue: string; + masterQueues: string | string[]; }) { return await this.#trace( "enqueueMessage", @@ -191,22 +191,24 @@ export class RunQueue { propagation.inject(context.active(), message); + const parentQueues = typeof masterQueues === "string" ? [masterQueues] : masterQueues; + span.setAttributes({ [SemanticAttributes.QUEUE]: queue, [SemanticAttributes.RUN_ID]: runId, [SemanticAttributes.CONCURRENCY_KEY]: concurrencyKey, - [SemanticAttributes.MASTER_QUEUE]: masterQueue, + [SemanticAttributes.MASTER_QUEUES]: parentQueues.join(","), }); const messagePayload: OutputPayload = { ...message, version: "1", queue, - masterQueue, + masterQueues: parentQueues, attempt: 0, }; - await this.#callEnqueueMessage(messagePayload, masterQueue); + await this.#callEnqueueMessage(messagePayload, parentQueues); }, { kind: SpanKind.PRODUCER, @@ -271,7 +273,6 @@ export class RunQueue { // If the queue includes a concurrency key, we need to remove the ck:concurrencyKey from the queue name const message = await this.#callDequeueMessage({ messageQueue, - masterQueue, concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(messageQueue), currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue), envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(messageQueue), @@ -292,7 +293,7 @@ export class RunQueue { [SemanticAttributes.QUEUE]: message.message.queue, [SemanticAttributes.RUN_ID]: message.message.runId, [SemanticAttributes.CONCURRENCY_KEY]: message.message.concurrencyKey, - [SemanticAttributes.MASTER_QUEUE]: masterQueue, + [SemanticAttributes.MASTER_QUEUES]: masterQueue, }); return message; @@ -338,7 +339,7 @@ export class RunQueue { await this.#callAcknowledgeMessage({ messageId, messageQueue: message.queue, - masterQueue: message.masterQueue, + masterQueues: message.masterQueues, messageKey: this.keys.messageKey(orgId, messageId), concurrencyKey: this.keys.currentConcurrencyKeyFromQueue(message.queue), envConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(message.queue), @@ -385,12 +386,11 @@ export class RunQueue { [SemanticAttributes.QUEUE]: message.queue, [SemanticAttributes.RUN_ID]: messageId, [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, - [SemanticAttributes.MASTER_QUEUE]: message.masterQueue, + [SemanticAttributes.MASTER_QUEUES]: message.masterQueues.join(","), }); const messageKey = this.keys.messageKey(orgId, messageId); const messageQueue = message.queue; - const parentQueue = message.masterQueue; const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); const taskConcurrencyKey = this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( @@ -404,7 +404,6 @@ export class RunQueue { message.attempt = message.attempt + 1; if (message.attempt >= maxAttempts) { await this.redis.moveToDeadLetterQueue( - parentQueue, messageKey, messageQueue, concurrencyKey, @@ -412,7 +411,8 @@ export class RunQueue { projectConcurrencyKey, taskConcurrencyKey, "dlq", - messageId + messageId, + JSON.stringify(message.masterQueues) ); return false; } @@ -423,7 +423,7 @@ export class RunQueue { this.logger.debug("Calling nackMessage", { messageKey, messageQueue, - parentQueue, + masterQueues: message.masterQueues, concurrencyKey, envConcurrencyKey, projectConcurrencyKey, @@ -438,7 +438,6 @@ export class RunQueue { //keys messageKey, messageQueue, - parentQueue, concurrencyKey, envConcurrencyKey, projectConcurrencyKey, @@ -446,7 +445,8 @@ export class RunQueue { //args messageId, JSON.stringify(message), - String(messageScore) + String(messageScore), + JSON.stringify(message.masterQueues) ); return true; }, @@ -592,7 +592,7 @@ export class RunQueue { ...data, attempt: 0, }, - masterQueue: data.masterQueue, + masterQueues: data.masterQueues, }); //remove from the dlq @@ -774,7 +774,7 @@ export class RunQueue { attributes: { [SEMATTRS_MESSAGING_OPERATION]: "receive", [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", - [SemanticAttributes.MASTER_QUEUE]: parentQueue, + [SemanticAttributes.MASTER_QUEUES]: parentQueue, }, } ); @@ -845,7 +845,7 @@ export class RunQueue { return result; } - async #callEnqueueMessage(message: OutputPayload, parentQueue: string) { + async #callEnqueueMessage(message: OutputPayload, masterQueues: string[]) { const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); const taskConcurrencyKey = this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( @@ -858,12 +858,12 @@ export class RunQueue { messagePayload: message, concurrencyKey, envConcurrencyKey, + masterQueues, service: this.name, }); return this.redis.enqueueMessage( message.queue, - parentQueue, this.keys.messageKey(message.orgId, message.runId), concurrencyKey, envConcurrencyKey, @@ -872,13 +872,13 @@ export class RunQueue { message.queue, message.runId, JSON.stringify(message), - String(message.timestamp) + String(message.timestamp), + JSON.stringify(masterQueues) ); } async #callDequeueMessage({ messageQueue, - masterQueue, concurrencyLimitKey, envConcurrencyLimitKey, currentConcurrencyKey, @@ -888,7 +888,6 @@ export class RunQueue { taskCurrentConcurrentKeyPrefix, }: { messageQueue: string; - masterQueue: string; concurrencyLimitKey: string; envConcurrencyLimitKey: string; currentConcurrencyKey: string; @@ -900,7 +899,6 @@ export class RunQueue { const result = await this.redis.dequeueMessage( //keys messageQueue, - masterQueue, concurrencyLimitKey, envConcurrencyLimitKey, currentConcurrencyKey, @@ -956,7 +954,7 @@ export class RunQueue { async #callAcknowledgeMessage({ messageId, - masterQueue, + masterQueues, messageKey, messageQueue, concurrencyKey, @@ -964,7 +962,7 @@ export class RunQueue { taskConcurrencyKey, projectConcurrencyKey, }: { - masterQueue: string; + masterQueues: string[]; messageKey: string; messageQueue: string; concurrencyKey: string; @@ -981,19 +979,19 @@ export class RunQueue { projectConcurrencyKey, taskConcurrencyKey, messageId, - masterQueue, + masterQueues, service: this.name, }); return this.redis.acknowledgeMessage( - masterQueue, messageKey, messageQueue, concurrencyKey, envConcurrencyKey, projectConcurrencyKey, taskConcurrencyKey, - messageId + messageId, + JSON.stringify(masterQueues) ); } @@ -1056,20 +1054,20 @@ export class RunQueue { #registerCommands() { this.redis.defineCommand("enqueueMessage", { - numberOfKeys: 7, + numberOfKeys: 6, lua: ` local queue = KEYS[1] -local parentQueue = KEYS[2] -local messageKey = KEYS[3] -local concurrencyKey = KEYS[4] -local envConcurrencyKey = KEYS[5] -local taskConcurrencyKey = KEYS[6] -local projectConcurrencyKey = KEYS[7] +local messageKey = KEYS[2] +local concurrencyKey = KEYS[3] +local envConcurrencyKey = KEYS[4] +local taskConcurrencyKey = KEYS[5] +local projectConcurrencyKey = KEYS[6] local queueName = ARGV[1] local messageId = ARGV[2] local messageData = ARGV[3] local messageScore = ARGV[4] +local parentQueues = cjson.decode(ARGV[5]) -- Write the message to the message key redis.call('SET', messageKey, messageData) @@ -1077,12 +1075,14 @@ redis.call('SET', messageKey, messageData) -- Add the message to the queue redis.call('ZADD', queue, messageScore, messageId) --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', queue, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueue, queueName) -else - redis.call('ZADD', parentQueue, earliestMessage[2], queueName) +-- Rebalance the parent queues +for _, parentQueue in ipairs(parentQueues) do + local earliestMessage = redis.call('ZRANGE', queue, 0, 0, 'WITHSCORES') + if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, queueName) + else + redis.call('ZADD', parentQueue, earliestMessage[2], queueName) + end end -- Update the concurrency keys @@ -1094,17 +1094,16 @@ redis.call('SREM', projectConcurrencyKey, messageId) }); this.redis.defineCommand("dequeueMessage", { - numberOfKeys: 9, + numberOfKeys: 8, lua: ` local childQueue = KEYS[1] -local parentQueue = KEYS[2] -local concurrencyLimitKey = KEYS[3] -local envConcurrencyLimitKey = KEYS[4] -local currentConcurrencyKey = KEYS[5] -local envCurrentConcurrencyKey = KEYS[6] -local projectConcurrencyKey = KEYS[7] -local messageKeyPrefix = KEYS[8] -local taskCurrentConcurrentKeyPrefix = KEYS[9] +local concurrencyLimitKey = KEYS[2] +local envConcurrencyLimitKey = KEYS[3] +local currentConcurrencyKey = KEYS[4] +local envCurrentConcurrencyKey = KEYS[5] +local projectConcurrencyKey = KEYS[6] +local messageKeyPrefix = KEYS[7] +local taskCurrentConcurrentKeyPrefix = KEYS[8] local childQueueName = ARGV[1] local currentTime = tonumber(ARGV[2]) @@ -1140,9 +1139,10 @@ local messageScore = tonumber(messages[2]) -- Get the message payload local messageKey = messageKeyPrefix .. messageId local messagePayload = redis.call('GET', messageKey) +local decodedPayload = cjson.decode(messagePayload); --- Parse JSON payload and extract taskIdentifier -local taskIdentifier = cjson.decode(messagePayload).taskIdentifier +-- Extract taskIdentifier +local taskIdentifier = decodedPayload.taskIdentifier -- Perform SADD with taskIdentifier and messageId local taskConcurrencyKey = taskCurrentConcurrentKeyPrefix .. taskIdentifier @@ -1154,12 +1154,14 @@ redis.call('SADD', envCurrentConcurrencyKey, messageId) redis.call('SADD', projectConcurrencyKey, messageId) redis.call('SADD', taskConcurrencyKey, messageId) --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', childQueue, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueue, childQueueName) -else - redis.call('ZADD', parentQueue, earliestMessage[2], childQueueName) +-- Rebalance the parent queues +for _, parentQueue in ipairs(decodedPayload.masterQueues) do + local earliestMessage = redis.call('ZRANGE', childQueue, 0, 0, 'WITHSCORES') + if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, childQueue) + else + redis.call('ZADD', parentQueue, earliestMessage[2], childQueue) + end end return {messageId, messageScore, messagePayload} -- Return message details @@ -1167,19 +1169,19 @@ return {messageId, messageScore, messagePayload} -- Return message details }); this.redis.defineCommand("acknowledgeMessage", { - numberOfKeys: 7, + numberOfKeys: 6, lua: ` -- Keys: -local parentQueue = KEYS[1] -local messageKey = KEYS[2] -local messageQueue = KEYS[3] -local concurrencyKey = KEYS[4] -local envCurrentConcurrencyKey = KEYS[5] -local projectCurrentConcurrencyKey = KEYS[6] -local taskCurrentConcurrencyKey = KEYS[7] +local messageKey = KEYS[1] +local messageQueue = KEYS[2] +local concurrencyKey = KEYS[3] +local envCurrentConcurrencyKey = KEYS[4] +local projectCurrentConcurrencyKey = KEYS[5] +local taskCurrentConcurrencyKey = KEYS[6] -- Args: local messageId = ARGV[1] +local parentQueues = cjson.decode(ARGV[2]) -- Remove the message from the message key redis.call('DEL', messageKey) @@ -1187,12 +1189,14 @@ redis.call('DEL', messageKey) -- Remove the message from the queue redis.call('ZREM', messageQueue, messageId) --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueue, messageQueue) -else - redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) +-- Rebalance the parent queues +for _, parentQueue in ipairs(parentQueues) do + local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') + if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, messageQueue) + else + redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) + end end -- Update the concurrency keys @@ -1204,21 +1208,21 @@ redis.call('SREM', taskCurrentConcurrencyKey, messageId) }); this.redis.defineCommand("nackMessage", { - numberOfKeys: 7, + numberOfKeys: 6, lua: ` -- Keys: local messageKey = KEYS[1] local messageQueueKey = KEYS[2] -local parentQueueKey = KEYS[3] -local concurrencyKey = KEYS[4] -local envConcurrencyKey = KEYS[5] -local projectConcurrencyKey = KEYS[6] -local taskConcurrencyKey = KEYS[7] +local concurrencyKey = KEYS[3] +local envConcurrencyKey = KEYS[4] +local projectConcurrencyKey = KEYS[5] +local taskConcurrencyKey = KEYS[6] -- Args: local messageId = ARGV[1] local messageData = ARGV[2] local messageScore = tonumber(ARGV[3]) +local parentQueues = cjson.decode(ARGV[4]) -- Update the message data redis.call('SET', messageKey, messageData) @@ -1232,41 +1236,45 @@ redis.call('SREM', taskConcurrencyKey, messageId) -- Enqueue the message into the queue redis.call('ZADD', messageQueueKey, messageScore, messageId) --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueueKey, messageQueueKey) -else - redis.call('ZADD', parentQueueKey, earliestMessage[2], messageQueueKey) +-- Rebalance the parent queues +for _, parentQueue in ipairs(parentQueues) do + local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES') + if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, messageQueueKey) + else + redis.call('ZADD', parentQueue, earliestMessage[2], messageQueueKey) + end end `, }); this.redis.defineCommand("moveToDeadLetterQueue", { - numberOfKeys: 8, + numberOfKeys: 7, lua: ` -- Keys: -local parentQueue = KEYS[1] -local messageKey = KEYS[2] -local messageQueue = KEYS[3] -local concurrencyKey = KEYS[4] -local envCurrentConcurrencyKey = KEYS[5] -local projectCurrentConcurrencyKey = KEYS[6] -local taskCurrentConcurrencyKey = KEYS[7] -local deadLetterQueueKey = KEYS[8] +local messageKey = KEYS[1] +local messageQueue = KEYS[2] +local concurrencyKey = KEYS[3] +local envCurrentConcurrencyKey = KEYS[4] +local projectCurrentConcurrencyKey = KEYS[5] +local taskCurrentConcurrencyKey = KEYS[6] +local deadLetterQueueKey = KEYS[7] -- Args: local messageId = ARGV[1] +local parentQueues = cjson.decode(ARGV[2]) -- Remove the message from the queue redis.call('ZREM', messageQueue, messageId) --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueue, messageQueue) -else - redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) +-- Rebalance the parent queues +for _, parentQueue in ipairs(parentQueues) do + local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') + if #earliestMessage == 0 then + redis.call('ZREM', parentQueue, messageQueue) + else + redis.call('ZADD', parentQueue, earliestMessage[2], messageQueue) + end end -- Add the message to the dead letter queue @@ -1365,37 +1373,6 @@ local envConcurrencyLimit = ARGV[1] redis.call('SET', envConcurrencyLimitKey, envConcurrencyLimit) `, }); - - this.redis.defineCommand("rebalanceParentQueueChild", { - numberOfKeys: 2, - lua: ` --- Keys: childQueueKey, parentQueueKey -local childQueueKey = KEYS[1] -local parentQueueKey = KEYS[2] - --- Args: childQueueName, currentScore -local childQueueName = ARGV[1] -local currentScore = ARGV[2] - --- Rebalance the parent queue -local earliestMessage = redis.call('ZRANGE', childQueueKey, 0, 0, 'WITHSCORES') -if #earliestMessage == 0 then - redis.call('ZREM', parentQueueKey, childQueueName) - - -- Return true because the parent queue was rebalanced - return true -else - -- If the earliest message is different, update the parent queue and return true, else return false - if earliestMessage[2] == currentScore then - return false - end - - redis.call('ZADD', parentQueueKey, earliestMessage[2], childQueueName) - - return earliestMessage[2] -end -`, - }); } } @@ -1404,7 +1381,6 @@ declare module "ioredis" { enqueueMessage( //keys queue: string, - parentQueue: string, messageKey: string, concurrencyKey: string, envConcurrencyKey: string, @@ -1415,13 +1391,13 @@ declare module "ioredis" { messageId: string, messageData: string, messageScore: string, + parentQueues: string, callback?: Callback ): Result; dequeueMessage( //keys childQueue: string, - parentQueue: string, concurrencyLimitKey: string, envConcurrencyLimitKey: string, currentConcurrencyKey: string, @@ -1437,7 +1413,6 @@ declare module "ioredis" { ): Result<[string, string, string] | null, Context>; acknowledgeMessage( - parentQueue: string, messageKey: string, messageQueue: string, concurrencyKey: string, @@ -1445,13 +1420,13 @@ declare module "ioredis" { projectConcurrencyKey: string, taskConcurrencyKey: string, messageId: string, + masterQueues: string, callback?: Callback ): Result; nackMessage( messageKey: string, messageQueue: string, - parentQueueKey: string, concurrencyKey: string, envConcurrencyKey: string, projectConcurrencyKey: string, @@ -1459,11 +1434,11 @@ declare module "ioredis" { messageId: string, messageData: string, messageScore: string, + masterQueues: string, callback?: Callback ): Result; moveToDeadLetterQueue( - parentQueue: string, messageKey: string, messageQueue: string, concurrencyKey: string, @@ -1472,6 +1447,7 @@ declare module "ioredis" { taskConcurrencyKey: string, deadLetterQueueKey: string, messageId: string, + masterQueues: string, callback?: Callback ): Result; @@ -1507,13 +1483,5 @@ declare module "ioredis" { envConcurrencyLimit: string, callback?: Callback ): Result; - - rebalanceParentQueueChild( - childQueueKey: string, - parentQueueKey: string, - childQueueName: string, - currentScore: string, - callback?: Callback - ): Result; } } diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index e6c7125b78..5cc008113c 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -18,7 +18,7 @@ export type InputPayload = z.infer; export const OutputPayload = InputPayload.extend({ version: z.literal("1"), - masterQueue: z.string(), + masterQueues: z.string().array(), }); export type OutputPayload = z.infer; From 5fcb9cb87b00a23f9adce78d349f9d2a4da649d3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 14:29:19 +0100 Subject: [PATCH 063/485] Test for enqueuing with multiple master queues --- .../run-engine/src/run-queue/index.test.ts | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index fa1f2423d0..1b4c302b76 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -162,7 +162,7 @@ describe("RunQueue", () => { await queue.enqueueMessage({ env: authenticatedEnvDev, message: messageDev, - masterQueues: `env:${authenticatedEnvDev.id}`, + masterQueues: ["main", envMasterQueue], }); //queue length @@ -196,7 +196,7 @@ describe("RunQueue", () => { expect(dequeued?.messageId).toEqual(messageDev.runId); expect(dequeued?.message.orgId).toEqual(messageDev.orgId); expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueues).toEqual([envMasterQueue]); + expect(dequeued?.message.masterQueues).toEqual(["main", envMasterQueue]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -216,6 +216,9 @@ describe("RunQueue", () => { const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", envMasterQueue); expect(dequeued2).toBe(undefined); + + const dequeued3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); + expect(dequeued3).toBe(undefined); } finally { await queue.quit(); } @@ -223,7 +226,7 @@ describe("RunQueue", () => { ); redisTest( - "Enqueue/Dequeue a message from the shared queue (PROD run, no concurrency key)", + "Enqueue/Dequeue a message from the main queue (PROD run, no concurrency key)", { timeout: 5_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ @@ -243,11 +246,13 @@ describe("RunQueue", () => { ); expect(oldestScore).toBe(undefined); + const envMasterQueue = `env:${authenticatedEnvDev.id}`; + //enqueue message await queue.enqueueMessage({ env: authenticatedEnvProd, message: messageProd, - masterQueues: "main", + masterQueues: ["main", envMasterQueue], }); //queue length @@ -282,7 +287,7 @@ describe("RunQueue", () => { expect(dequeued?.messageId).toEqual(messageProd.runId); expect(dequeued?.message.orgId).toEqual(messageProd.orgId); expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueues).toEqual(["main"]); + expect(dequeued?.message.masterQueues).toEqual(["main", envMasterQueue]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( From 5fd870f635ca01916010ce094d90f3f083429f71 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 15:14:05 +0100 Subject: [PATCH 064/485] Added secondary master queue support to the run engine --- .../database/prisma/schema.prisma | 3 +- .../run-engine/src/engine/index.ts | 58 +++++++++++++++++-- 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 75e2b75a32..6d5649b7b7 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1685,7 +1685,8 @@ model TaskRun { queue String /// The main queue that this run is part of - masterQueue String @default("main") + masterQueue String @default("main") + secondaryMasterQueue String? /// From engine v2+ this will be defined after a run has been dequeued (starting at 1) attemptNumber Int? diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 23970c0d76..618c527025 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -219,6 +219,11 @@ export class RunEngine { async (span) => { const status = delayUntil ? "DELAYED" : "PENDING"; + let secondaryMasterQueue = this.#environmentMasterQueueKey(environment.id); + if (lockedToVersionId) { + secondaryMasterQueue = this.#backgroundWorkerQueueKey(lockedToVersionId); + } + //create run const taskRun = await prisma.taskRun.create({ data: { @@ -240,6 +245,7 @@ export class RunEngine { concurrencyKey, queue: queueName, masterQueue, + secondaryMasterQueue, isTest, delayUntil, queuedAt, @@ -601,9 +607,6 @@ export class RunEngine { const currentAttemptNumber = lockedTaskRun.attempts.at(0)?.number ?? 0; const nextAttemptNumber = currentAttemptNumber + 1; - //todo figure out if it's a continuation or a new run - const isNewRun = true; - const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: { id: runId, @@ -655,6 +658,38 @@ export class RunEngine { }); } + async dequeueFromEnvironmentMasterQueue({ + consumerId, + environmentId, + tx, + }: { + consumerId: string; + environmentId: string; + tx?: PrismaClientOrTransaction; + }) { + return this.dequeueFromMasterQueue({ + consumerId, + masterQueue: this.#environmentMasterQueueKey(environmentId), + tx, + }); + } + + async dequeueFromBackgroundWorkerMasterQueue({ + consumerId, + backgroundWorkerId, + tx, + }: { + consumerId: string; + backgroundWorkerId: string; + tx?: PrismaClientOrTransaction; + }) { + return this.dequeueFromMasterQueue({ + consumerId, + masterQueue: this.#backgroundWorkerQueueKey(backgroundWorkerId), + tx, + }); + } + async startRunAttempt({ runId, snapshotId, @@ -1274,11 +1309,14 @@ export class RunEngine { }, }); + const masterQueues = [run.masterQueue]; + if (run.secondaryMasterQueue) { + masterQueues.push(run.secondaryMasterQueue); + } + await this.runQueue.enqueueMessage({ env, - //todo if the run is locked, use the BackgroundWorker ID - //if not locked then the environmentId master queue - masterQueue: run.masterQueue, + masterQueues, message: { runId: run.id, taskIdentifier: run.taskIdentifier, @@ -1721,6 +1759,14 @@ export class RunEngine { return taskRun?.runtimeEnvironment; } + #environmentMasterQueueKey(environmentId: string) { + return `master-env:${environmentId}`; + } + + #backgroundWorkerQueueKey(backgroundWorkerId: string) { + return `master-background-worker:${backgroundWorkerId}`; + } + async #trace( trace: string, attributes: Attributes | undefined, From d6fb6e95840e876491274237e4c577410077a3c5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 16:32:30 +0100 Subject: [PATCH 065/485] Release and reacquire concurrency (w tests) --- .../run-engine/src/run-queue/index.test.ts | 95 ++++++++++ .../run-engine/src/run-queue/index.ts | 175 +++++++++++++----- 2 files changed, 226 insertions(+), 44 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 1b4c302b76..6a0df46b59 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -468,6 +468,101 @@ describe("RunQueue", () => { } }); + redisTest("Releasing concurrency", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueues: "main", + }); + + const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); + expect(message).toBeDefined(); + + //check the message is gone + const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const exists = await redis.exists(key); + expect(exists).toBe(1); + + //concurrencies + expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( + 1 + ); + expect(await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd)).toBe(1); + expect(await queue.currentConcurrencyOfProject(authenticatedEnvProd)).toBe(1); + expect( + await queue.currentConcurrencyOfTask(authenticatedEnvProd, messageProd.taskIdentifier) + ).toBe(1); + + //release the concurrency (not the queue) + await queue.releaseConcurrency( + authenticatedEnvProd.organization.id, + message!.messageId, + false + ); + + //concurrencies + expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( + 1 + ); + expect(await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd)).toBe(0); + expect(await queue.currentConcurrencyOfProject(authenticatedEnvProd)).toBe(0); + expect( + await queue.currentConcurrencyOfTask(authenticatedEnvProd, messageProd.taskIdentifier) + ).toBe(0); + + //reacquire the concurrency + await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, message!.messageId); + + //check concurrencies are back to what they were before + expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( + 1 + ); + expect(await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd)).toBe(1); + expect(await queue.currentConcurrencyOfProject(authenticatedEnvProd)).toBe(1); + expect( + await queue.currentConcurrencyOfTask(authenticatedEnvProd, messageProd.taskIdentifier) + ).toBe(1); + + //release the concurrency (with the queue this time) + await queue.releaseConcurrency( + authenticatedEnvProd.organization.id, + message!.messageId, + true + ); + + //concurrencies + expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( + 0 + ); + expect(await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd)).toBe(0); + expect(await queue.currentConcurrencyOfProject(authenticatedEnvProd)).toBe(0); + expect( + await queue.currentConcurrencyOfTask(authenticatedEnvProd, messageProd.taskIdentifier) + ).toBe(0); + + //reacquire the concurrency + await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, message!.messageId); + + //check concurrencies are back to what they were before + expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( + 1 + ); + expect(await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd)).toBe(1); + expect(await queue.currentConcurrencyOfProject(authenticatedEnvProd)).toBe(1); + expect( + await queue.currentConcurrencyOfTask(authenticatedEnvProd, messageProd.taskIdentifier) + ).toBe(1); + } finally { + await queue.quit(); + } + }); + redisTest("Dead Letter Queue", { timeout: 8_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index a603373a7f..95daedc712 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -461,47 +461,90 @@ export class RunQueue { ); } - public async releaseConcurrency(messageId: string, releaseForRun: boolean = false) { + public async releaseConcurrency( + orgId: string, + messageId: string, + releaseForRun: boolean = false + ) { return this.#trace( "releaseConcurrency", async (span) => { - // span.setAttributes({ - // [SemanticAttributes.MESSAGE_ID]: messageId, - // }); - // const message = await this.readMessage(messageId); - // if (!message) { - // logger.log(`[${this.name}].releaseConcurrency() message not found`, { - // messageId, - // releaseForRun, - // service: this.name, - // }); - // return; - // } - // span.setAttributes({ - // [SemanticAttributes.QUEUE]: message.queue, - // [SemanticAttributes.MESSAGE_ID]: message.messageId, - // [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, - // [SemanticAttributes.PARENT_QUEUE]: message.parentQueue, - // }); - // const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); - // const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); - // const orgConcurrencyKey = this.keys.orgCurrentConcurrencyKeyFromQueue(message.queue); - // logger.debug("Calling releaseConcurrency", { - // messageId, - // queue: message.queue, - // concurrencyKey, - // envConcurrencyKey, - // orgConcurrencyKey, - // service: this.name, - // releaseForRun, - // }); - // return this.redis.releaseConcurrency( - // //don't release the for the run, it breaks concurrencyLimits - // releaseForRun ? concurrencyKey : "", - // envConcurrencyKey, - // orgConcurrencyKey, - // message.messageId - // ); + const message = await this.#readMessage(orgId, messageId); + + if (!message) { + this.logger.log(`[${this.name}].acknowledgeMessage() message not found`, { + messageId, + service: this.name, + }); + return; + } + + span.setAttributes({ + [SemanticAttributes.QUEUE]: message.queue, + [SemanticAttributes.ORG_ID]: message.orgId, + [SemanticAttributes.RUN_ID]: messageId, + [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, + }); + + return this.redis.releaseConcurrency( + this.keys.messageKey(orgId, messageId), + message.queue, + releaseForRun ? this.keys.currentConcurrencyKeyFromQueue(message.queue) : "", + this.keys.envCurrentConcurrencyKeyFromQueue(message.queue), + this.keys.projectCurrentConcurrencyKeyFromQueue(message.queue), + this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( + message.queue, + message.taskIdentifier + ), + messageId, + JSON.stringify(message.masterQueues) + ); + }, + { + kind: SpanKind.CONSUMER, + attributes: { + [SEMATTRS_MESSAGING_OPERATION]: "releaseConcurrency", + [SEMATTRS_MESSAGE_ID]: messageId, + [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", + }, + } + ); + } + + public async reacquireConcurrency(orgId: string, messageId: string) { + return this.#trace( + "reacquireConcurrency", + async (span) => { + const message = await this.#readMessage(orgId, messageId); + + if (!message) { + this.logger.log(`[${this.name}].acknowledgeMessage() message not found`, { + messageId, + service: this.name, + }); + return; + } + + span.setAttributes({ + [SemanticAttributes.QUEUE]: message.queue, + [SemanticAttributes.ORG_ID]: message.orgId, + [SemanticAttributes.RUN_ID]: messageId, + [SemanticAttributes.CONCURRENCY_KEY]: message.concurrencyKey, + }); + + return this.redis.reacquireConcurrency( + this.keys.messageKey(orgId, messageId), + message.queue, + this.keys.currentConcurrencyKeyFromQueue(message.queue), + this.keys.envCurrentConcurrencyKeyFromQueue(message.queue), + this.keys.projectCurrentConcurrencyKeyFromQueue(message.queue), + this.keys.taskIdentifierCurrentConcurrencyKeyFromQueue( + message.queue, + message.taskIdentifier + ), + messageId, + JSON.stringify(message.masterQueues) + ); }, { kind: SpanKind.CONSUMER, @@ -1289,12 +1332,17 @@ redis.call('SREM', taskCurrentConcurrencyKey, messageId) }); this.redis.defineCommand("releaseConcurrency", { - numberOfKeys: 3, + numberOfKeys: 6, lua: ` -local concurrencyKey = KEYS[1] -local envCurrentConcurrencyKey = KEYS[2] -local orgCurrentConcurrencyKey = KEYS[3] +-- Keys: +local messageKey = KEYS[1] +local messageQueue = KEYS[2] +local concurrencyKey = KEYS[3] +local envCurrentConcurrencyKey = KEYS[4] +local projectCurrentConcurrencyKey = KEYS[5] +local taskCurrentConcurrencyKey = KEYS[6] +-- Args: local messageId = ARGV[1] -- Update the concurrency keys @@ -1302,7 +1350,30 @@ if concurrencyKey ~= "" then redis.call('SREM', concurrencyKey, messageId) end redis.call('SREM', envCurrentConcurrencyKey, messageId) -redis.call('SREM', orgCurrentConcurrencyKey, messageId) +redis.call('SREM', projectCurrentConcurrencyKey, messageId) +redis.call('SREM', taskCurrentConcurrencyKey, messageId) +`, + }); + + this.redis.defineCommand("reacquireConcurrency", { + numberOfKeys: 6, + lua: ` +-- Keys: +local messageKey = KEYS[1] +local messageQueue = KEYS[2] +local concurrencyKey = KEYS[3] +local envCurrentConcurrencyKey = KEYS[4] +local projectCurrentConcurrencyKey = KEYS[5] +local taskCurrentConcurrencyKey = KEYS[6] + +-- Args: +local messageId = ARGV[1] + +-- Update the concurrency keys +redis.call('SADD', concurrencyKey, messageId) +redis.call('SADD', envCurrentConcurrencyKey, messageId) +redis.call('SADD', projectCurrentConcurrencyKey, messageId) +redis.call('SADD', taskCurrentConcurrencyKey, messageId) `, }); @@ -1452,10 +1523,26 @@ declare module "ioredis" { ): Result; releaseConcurrency( + messageKey: string, + messageQueue: string, concurrencyKey: string, envConcurrencyKey: string, - orgConcurrencyKey: string, + projectConcurrencyKey: string, + taskConcurrencyKey: string, messageId: string, + masterQueues: string, + callback?: Callback + ): Result; + + reacquireConcurrency( + messageKey: string, + messageQueue: string, + concurrencyKey: string, + envConcurrencyKey: string, + projectConcurrencyKey: string, + taskConcurrencyKey: string, + messageId: string, + masterQueues: string, callback?: Callback ): Result; From f281c314b2c962573e3ed18fd31b67ed81687e11 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 17:20:07 +0100 Subject: [PATCH 066/485] Releasing the concurrency and reacquiring it when continuing --- .../run-engine/src/engine/index.ts | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 618c527025..5dbf105b59 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -294,6 +294,23 @@ export class RunEngine { runId: parentTaskRunId, waitpoint: associatedWaitpoint, }); + + //release the concurrency + //if the queue is the same then it's recursive and we need to release that too otherwise we could have a deadlock + const parentRun = await prisma.taskRun.findUnique({ + select: { + queue: true, + }, + where: { + id: parentTaskRunId, + }, + }); + const releaseRunConcurrency = parentRun?.queue === taskRun.queue; + await this.runQueue.releaseConcurrency( + environment.organization.id, + parentTaskRunId, + releaseRunConcurrency + ); } //Make sure lock extension succeeded @@ -1361,6 +1378,9 @@ export class RunEngine { completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), }); + //we reacquire the concurrency if it's still running because we're not going to be dequeuing (which also does this) + await this.runQueue.reacquireConcurrency(env.organization.id, run.id); + //todo publish a notification in Redis that the Workers listen to //this will cause the Worker to check for new execution snapshots for its runs } else { @@ -1368,28 +1388,13 @@ export class RunEngine { run: run, snapshot: { executionStatus: "QUEUED", - description: "Run was QUEUED, because it needs to be continued.", + description: "Run is QUEUED, because all waitpoints are completed.", }, completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), }); - //todo instead this should be a call to unblock the run - //we don't want to free up all the concurrency, so this isn't good - // await this.runQueue.enqueueMessage({ - // env, - // masterQueue: run.masterQueue, - // message: { - // runId: run.id, - // taskIdentifier: run.taskIdentifier, - // orgId: env.organization.id, - // projectId: env.project.id, - // environmentId: env.id, - // environmentType: env.type, - // queue: run.queue, - // concurrencyKey: run.concurrencyKey ?? undefined, - // timestamp: Date.now(), - // }, - // }); + //put it back in the queue + await this.#enqueueRun(run, env, prisma); } }); } From 7206d597244794b9a2dd1e55dc7b6a93a9c6b6eb Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 17:27:15 +0100 Subject: [PATCH 067/485] =?UTF-8?q?When=20continuing=20a=20run=20and=20req?= =?UTF-8?q?ueuing,=20use=20the=20run.createdAt=20time=20so=20it=E2=80=99s?= =?UTF-8?q?=20prioritised?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../run-engine/src/engine/index.ts | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 5dbf105b59..7b61d7b506 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -396,7 +396,7 @@ export class RunEngine { //enqueue the run if it's not delayed if (!taskRun.delayUntil) { - await this.#enqueueRun(taskRun, environment, prisma); + await this.#enqueueRun({ run: taskRun, env: environment, tx: prisma }); } }); @@ -1311,11 +1311,17 @@ export class RunEngine { //MARK: RunQueue /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ - async #enqueueRun( - run: TaskRun, - env: MinimalAuthenticatedEnvironment, - tx?: PrismaClientOrTransaction - ) { + async #enqueueRun({ + run, + env, + timestamp, + tx, + }: { + run: TaskRun; + env: MinimalAuthenticatedEnvironment; + timestamp?: number; + tx?: PrismaClientOrTransaction; + }) { const prisma = tx ?? this.prisma; const newSnapshot = await this.#createExecutionSnapshot(prisma, { @@ -1343,7 +1349,7 @@ export class RunEngine { environmentType: env.type, queue: run.queue, concurrencyKey: run.concurrencyKey ?? undefined, - timestamp: Date.now(), + timestamp: timestamp ?? Date.now(), attempt: 0, }, }); @@ -1393,8 +1399,9 @@ export class RunEngine { completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), }); - //put it back in the queue - await this.#enqueueRun(run, env, prisma); + //put it back in the queue, with the original timestamp + //this will prioritise it over new runs + await this.#enqueueRun({ run, env, timestamp: run.createdAt.getTime(), tx: prisma }); } }); } From 11865e509abd7e122315f927b9fd383db3b8873c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 22:41:28 +0100 Subject: [PATCH 068/485] Added environment to Waitpoints --- internal-packages/database/prisma/schema.prisma | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 6d5649b7b7..56c116e335 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -417,6 +417,7 @@ model RuntimeEnvironment { currentSessionId String? taskRunNumberCounter TaskRunNumberCounter[] taskRunCheckpoints TaskRunCheckpoint[] + waitpoints Waitpoint[] @@unique([projectId, slug, orgMemberId]) @@unique([projectId, shortcode]) @@ -1990,10 +1991,13 @@ model Waitpoint { project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String + environment RuntimeEnvironment @relation(fields: [environmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) + environmentId String + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - @@unique([projectId, idempotencyKey]) + @@unique([environmentId, idempotencyKey]) } enum WaitpointType { From c19eda6333cec8cf0a602ada3e1a47da5ca115ce Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 23 Oct 2024 23:14:03 +0100 Subject: [PATCH 069/485] waitForDuration implemented with tests --- .../run-engine/src/engine/index.test.ts | 96 +++++++++++++++ .../run-engine/src/engine/index.ts | 112 +++++++++++++++++- 2 files changed, 203 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 26318d7723..c1d22ed227 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -318,6 +318,102 @@ describe("RunEngine", () => { } ); + containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + }); + assertNonNullable(dequeued); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued.payload.run.id, + snapshotId: dequeued.payload.execution.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //waitForDuration + const date = new Date(Date.now() + 1000); + const result = await engine.waitForDuration({ + runId: run.id, + snapshotId: attemptResult.snapshot.id, + date, + releaseConcurrency: false, + }); + + expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + await setTimeout(1_100); + + const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); + expect(executionDataAfter?.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + } finally { + engine.quit(); + } + }); + //todo batchTriggerAndWait //todo checkpoints diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 7b61d7b506..2c3080077e 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -283,6 +283,7 @@ export class RunEngine { //create associated waitpoint (this completes when the run completes) const associatedWaitpoint = await this.#createRunAssociatedWaitpoint(prisma, { projectId: environment.project.id, + environmentId: environment.id, completedByTaskRunId: taskRun.id, }); @@ -367,6 +368,7 @@ export class RunEngine { if (taskRun.delayUntil) { const delayWaitpoint = await this.#createDateTimeWaitpoint(prisma, { projectId: environment.project.id, + environmentId: environment.id, completedAfter: taskRun.delayUntil, }); @@ -1064,7 +1066,96 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) {} - async waitForDuration() {} + async waitForDuration({ + runId, + snapshotId, + date, + releaseConcurrency = true, + idempotencyKey, + tx, + }: { + runId: string; + snapshotId: string; + date: Date; + releaseConcurrency?: boolean; + idempotencyKey?: string; + tx?: PrismaClientOrTransaction; + }): Promise<{ + willWaitUntil: Date; + }> { + const prisma = tx ?? this.prisma; + + return await this.runLock.lock([runId], 5_000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!snapshot) { + throw new ServiceValidationError("Snapshot not found", 404); + } + + if (snapshot.id !== snapshotId) { + throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); + } + + const run = await prisma.taskRun.findFirst({ + select: { + runtimeEnvironment: { + select: { + id: true, + organizationId: true, + }, + }, + projectId: true, + }, + where: { id: runId }, + }); + + if (!run) { + throw new ServiceValidationError("TaskRun not found", 404); + } + + let waitpoint = idempotencyKey + ? await prisma.waitpoint.findUnique({ + where: { + environmentId_idempotencyKey: { + environmentId: run.runtimeEnvironment.id, + idempotencyKey, + }, + }, + }) + : undefined; + + if (!waitpoint) { + waitpoint = await this.#createDateTimeWaitpoint(prisma, { + projectId: run.projectId, + environmentId: run.runtimeEnvironment.id, + completedAfter: date, + idempotencyKey, + }); + } + + //waitpoint already completed, so we don't need to wait + if (waitpoint.status === "COMPLETED") { + return { willWaitUntil: waitpoint.completedAt ?? new Date() }; + } + + //block the run + await this.#blockRunWithWaitpoint(prisma, { + orgId: run.runtimeEnvironment.organizationId, + runId, + waitpoint, + }); + + //release concurrency + await this.runQueue.releaseConcurrency( + run.runtimeEnvironment.organizationId, + runId, + releaseConcurrency + ); + + return { + willWaitUntil: date, + }; + }); + } async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { const prisma = tx ?? this.prisma; @@ -1409,7 +1500,11 @@ export class RunEngine { //MARK: - Waitpoints async #createRunAssociatedWaitpoint( tx: PrismaClientOrTransaction, - { projectId, completedByTaskRunId }: { projectId: string; completedByTaskRunId: string } + { + projectId, + environmentId, + completedByTaskRunId, + }: { projectId: string; environmentId: string; completedByTaskRunId: string } ) { return tx.waitpoint.create({ data: { @@ -1418,6 +1513,7 @@ export class RunEngine { idempotencyKey: nanoid(24), userProvidedIdempotencyKey: false, projectId, + environmentId, completedByTaskRunId, }, }); @@ -1425,15 +1521,21 @@ export class RunEngine { async #createDateTimeWaitpoint( tx: PrismaClientOrTransaction, - { projectId, completedAfter }: { projectId: string; completedAfter: Date } + { + projectId, + environmentId, + completedAfter, + idempotencyKey, + }: { projectId: string; environmentId: string; completedAfter: Date; idempotencyKey?: string } ) { const waitpoint = await tx.waitpoint.create({ data: { type: "DATETIME", status: "PENDING", - idempotencyKey: nanoid(24), - userProvidedIdempotencyKey: false, + idempotencyKey: idempotencyKey ?? nanoid(24), + userProvidedIdempotencyKey: !!idempotencyKey, projectId, + environmentId, completedAfter, }, }); From 3eb04f68e1b0db8a7db0c710c58b11cee47102e0 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 24 Oct 2024 09:48:37 +0100 Subject: [PATCH 070/485] Latest lockfile --- pnpm-lock.yaml | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 93baf6d8ba..ed5c40f578 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -885,34 +885,6 @@ importers: specifier: ^4.9.4 version: 4.9.5 - internal-packages/message-queue: - dependencies: - '@opentelemetry/api': - specifier: ^1.9.0 - version: 1.9.0 - '@trigger.dev/core': - specifier: workspace:* - version: link:../../packages/core - ioredis: - specifier: ^5.3.2 - version: 5.3.2 - nanoid: - specifier: ^5.0.7 - version: 5.0.7 - typescript: - specifier: ^5.5.4 - version: 5.5.4 - zod: - specifier: 3.22.3 - version: 3.22.3 - devDependencies: - '@internal/testcontainers': - specifier: workspace:* - version: link:../testcontainers - vitest: - specifier: ^1.4.0 - version: 1.6.0(@types/node@20.14.14) - internal-packages/otlp-importer: dependencies: long: From 34a09bfd80a8af16ceef05881efd7684817b2493 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 24 Oct 2024 16:00:11 +0100 Subject: [PATCH 071/485] First Run Engine db migration --- .../migration.sql | 170 ++++++++++++++++++ 1 file changed, 170 insertions(+) create mode 100644 internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql b/internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql new file mode 100644 index 0000000000..88301ce317 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql @@ -0,0 +1,170 @@ +-- CreateEnum +CREATE TYPE "RunEngineVersion" AS ENUM ('V1', 'V2'); + +-- CreateEnum +CREATE TYPE "TaskRunExecutionStatus" AS ENUM ('RUN_CREATED', 'QUEUED', 'PENDING_EXECUTING', 'EXECUTING', 'EXECUTING_WITH_WAITPOINTS', 'BLOCKED_BY_WAITPOINTS', 'PENDING_CANCEL', 'FINISHED'); + +-- CreateEnum +CREATE TYPE "TaskRunCheckpointType" AS ENUM ('DOCKER', 'KUBERNETES'); + +-- CreateEnum +CREATE TYPE "WaitpointType" AS ENUM ('RUN', 'DATETIME', 'EVENT'); + +-- CreateEnum +CREATE TYPE "WaitpointStatus" AS ENUM ('PENDING', 'COMPLETED'); + +-- AlterTable +ALTER TABLE "TaskRun" ADD COLUMN "attemptNumber" INTEGER, +ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1', +ADD COLUMN "masterQueue" TEXT NOT NULL DEFAULT 'main', +ADD COLUMN "secondaryMasterQueue" TEXT; + +-- CreateTable +CREATE TABLE "TaskRunExecutionSnapshot" ( + "id" TEXT NOT NULL, + "engine" "RunEngineVersion" NOT NULL DEFAULT 'V2', + "executionStatus" "TaskRunExecutionStatus" NOT NULL, + "description" TEXT NOT NULL, + "runId" TEXT NOT NULL, + "runStatus" "TaskRunStatus" NOT NULL, + "attemptNumber" INTEGER, + "checkpointId" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "TaskRunExecutionSnapshot_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "TaskRunCheckpoint" ( + "id" TEXT NOT NULL, + "friendlyId" TEXT NOT NULL, + "type" "TaskRunCheckpointType" NOT NULL, + "location" TEXT NOT NULL, + "imageRef" TEXT NOT NULL, + "reason" TEXT, + "metadata" TEXT, + "projectId" TEXT NOT NULL, + "runtimeEnvironmentId" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "TaskRunCheckpoint_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "Waitpoint" ( + "id" TEXT NOT NULL, + "type" "WaitpointType" NOT NULL, + "status" "WaitpointStatus" NOT NULL DEFAULT 'PENDING', + "completedAt" TIMESTAMP(3), + "idempotencyKey" TEXT NOT NULL, + "userProvidedIdempotencyKey" BOOLEAN NOT NULL, + "inactiveIdempotencyKey" TEXT, + "completedByTaskRunId" TEXT, + "completedAfter" TIMESTAMP(3), + "output" TEXT, + "outputType" TEXT NOT NULL DEFAULT 'application/json', + "projectId" TEXT NOT NULL, + "environmentId" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "Waitpoint_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "TaskRunWaitpoint" ( + "id" TEXT NOT NULL, + "taskRunId" TEXT NOT NULL, + "waitpointId" TEXT NOT NULL, + "projectId" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "TaskRunWaitpoint_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "Worker" ( + "id" TEXT NOT NULL, + + CONSTRAINT "Worker_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "WorkerGroup" ( + "id" TEXT NOT NULL, + "masterQueue" TEXT NOT NULL, + + CONSTRAINT "WorkerGroup_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "_completedWaitpoints" ( + "A" TEXT NOT NULL, + "B" TEXT NOT NULL +); + +-- CreateIndex +CREATE INDEX "TaskRunExecutionSnapshot_runId_createdAt_idx" ON "TaskRunExecutionSnapshot"("runId", "createdAt" DESC); + +-- CreateIndex +CREATE UNIQUE INDEX "TaskRunCheckpoint_friendlyId_key" ON "TaskRunCheckpoint"("friendlyId"); + +-- CreateIndex +CREATE UNIQUE INDEX "Waitpoint_completedByTaskRunId_key" ON "Waitpoint"("completedByTaskRunId"); + +-- CreateIndex +CREATE UNIQUE INDEX "Waitpoint_environmentId_idempotencyKey_key" ON "Waitpoint"("environmentId", "idempotencyKey"); + +-- CreateIndex +CREATE INDEX "TaskRunWaitpoint_taskRunId_idx" ON "TaskRunWaitpoint"("taskRunId"); + +-- CreateIndex +CREATE INDEX "TaskRunWaitpoint_waitpointId_idx" ON "TaskRunWaitpoint"("waitpointId"); + +-- CreateIndex +CREATE UNIQUE INDEX "TaskRunWaitpoint_taskRunId_waitpointId_key" ON "TaskRunWaitpoint"("taskRunId", "waitpointId"); + +-- CreateIndex +CREATE UNIQUE INDEX "_completedWaitpoints_AB_unique" ON "_completedWaitpoints"("A", "B"); + +-- CreateIndex +CREATE INDEX "_completedWaitpoints_B_index" ON "_completedWaitpoints"("B"); + +-- AddForeignKey +ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_runId_fkey" FOREIGN KEY ("runId") REFERENCES "TaskRun"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_checkpointId_fkey" FOREIGN KEY ("checkpointId") REFERENCES "TaskRunCheckpoint"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunCheckpoint" ADD CONSTRAINT "TaskRunCheckpoint_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunCheckpoint" ADD CONSTRAINT "TaskRunCheckpoint_runtimeEnvironmentId_fkey" FOREIGN KEY ("runtimeEnvironmentId") REFERENCES "RuntimeEnvironment"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "Waitpoint" ADD CONSTRAINT "Waitpoint_completedByTaskRunId_fkey" FOREIGN KEY ("completedByTaskRunId") REFERENCES "TaskRun"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "Waitpoint" ADD CONSTRAINT "Waitpoint_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "Waitpoint" ADD CONSTRAINT "Waitpoint_environmentId_fkey" FOREIGN KEY ("environmentId") REFERENCES "RuntimeEnvironment"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_taskRunId_fkey" FOREIGN KEY ("taskRunId") REFERENCES "TaskRun"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_waitpointId_fkey" FOREIGN KEY ("waitpointId") REFERENCES "Waitpoint"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "_completedWaitpoints" ADD CONSTRAINT "_completedWaitpoints_A_fkey" FOREIGN KEY ("A") REFERENCES "TaskRunExecutionSnapshot"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "_completedWaitpoints" ADD CONSTRAINT "_completedWaitpoints_B_fkey" FOREIGN KEY ("B") REFERENCES "Waitpoint"("id") ON DELETE CASCADE ON UPDATE CASCADE; From 9c31c39acd09f2d8363ff9530d5e1bf74aa537e9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 24 Oct 2024 19:55:15 +0100 Subject: [PATCH 072/485] The queue now pulls multiple items at once --- .../run-engine/src/run-queue/index.test.ts | 107 ++++++++++-------- .../run-engine/src/run-queue/index.ts | 96 +++++++++------- .../simpleWeightedPriorityStrategy.ts | 31 +++-- .../run-engine/src/run-queue/types.ts | 9 +- 4 files changed, 140 insertions(+), 103 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 6a0df46b59..42516a2b2d 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -192,11 +192,16 @@ describe("RunQueue", () => { ); expect(taskConcurrency).toBe(0); - const dequeued = await queue.dequeueMessageFromMasterQueue("test_12345", envMasterQueue); - expect(dequeued?.messageId).toEqual(messageDev.runId); - expect(dequeued?.message.orgId).toEqual(messageDev.orgId); - expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueues).toEqual(["main", envMasterQueue]); + const dequeued = await queue.dequeueMessageFromMasterQueue( + "test_12345", + envMasterQueue, + 10 + ); + expect(dequeued.length).toBe(1); + expect(dequeued[0].messageId).toEqual(messageDev.runId); + expect(dequeued[0].message.orgId).toEqual(messageDev.orgId); + expect(dequeued[0].message.version).toEqual("1"); + expect(dequeued[0].message.masterQueues).toEqual(["main", envMasterQueue]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -214,11 +219,15 @@ describe("RunQueue", () => { ); expect(taskConcurrency2).toBe(1); - const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", envMasterQueue); - expect(dequeued2).toBe(undefined); + const dequeued2 = await queue.dequeueMessageFromMasterQueue( + "test_12345", + envMasterQueue, + 10 + ); + expect(dequeued2.length).toBe(0); - const dequeued3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(dequeued3).toBe(undefined); + const dequeued3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued3.length).toBe(0); } finally { await queue.quit(); } @@ -283,11 +292,12 @@ describe("RunQueue", () => { expect(taskConcurrency).toBe(0); //dequeue - const dequeued = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(dequeued?.messageId).toEqual(messageProd.runId); - expect(dequeued?.message.orgId).toEqual(messageProd.orgId); - expect(dequeued?.message.version).toEqual("1"); - expect(dequeued?.message.masterQueues).toEqual(["main", envMasterQueue]); + const dequeued = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued.length).toBe(1); + expect(dequeued[0].messageId).toEqual(messageProd.runId); + expect(dequeued[0].message.orgId).toEqual(messageProd.orgId); + expect(dequeued[0].message.version).toEqual("1"); + expect(dequeued[0].message.masterQueues).toEqual(["main", envMasterQueue]); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -309,8 +319,8 @@ describe("RunQueue", () => { const length2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); expect(length2).toBe(0); - const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(dequeued2).toBe(undefined); + const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued2.length).toBe(0); } finally { await queue.quit(); } @@ -324,10 +334,10 @@ describe("RunQueue", () => { }); try { - const result = await queue.getSharedQueueDetails("main"); + const result = await queue.getSharedQueueDetails("main", 10); expect(result.selectionId).toBe("getSharedQueueDetails"); expect(result.queueCount).toBe(0); - expect(result.queueChoice.choice).toStrictEqual({ abort: true }); + expect(result.queueChoice.choices).toStrictEqual({ abort: true }); await queue.enqueueMessage({ env: authenticatedEnvProd, @@ -335,11 +345,14 @@ describe("RunQueue", () => { masterQueues: "main", }); - const result2 = await queue.getSharedQueueDetails("main"); + const result2 = await queue.getSharedQueueDetails("main", 10); expect(result2.selectionId).toBe("getSharedQueueDetails"); expect(result2.queueCount).toBe(1); expect(result2.queues[0].score).toBe(messageProd.timestamp); - expect(result2.queueChoice.choice).toBe( + if (!Array.isArray(result2.queueChoice.choices)) { + throw new Error("Expected queueChoice.choices to be an array"); + } + expect(result2.queueChoice.choices[0]).toBe( "{org:o1234}:proj:p1234:env:e1234:queue:task/my-task" ); } finally { @@ -360,15 +373,15 @@ describe("RunQueue", () => { masterQueues: "main", }); - const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message).toBeDefined(); + const messages = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages.length).toBe(1); //check the message is gone - const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const key = queue.keys.messageKey(messages[0].message.orgId, messages[0].messageId); const exists = await redis.exists(key); expect(exists).toBe(1); - await queue.acknowledgeMessage(message!.message.orgId, message!.messageId); + await queue.acknowledgeMessage(messages[0].message.orgId, messages[0].messageId); //concurrencies const queueConcurrency = await queue.currentConcurrencyOfQueue( @@ -391,8 +404,8 @@ describe("RunQueue", () => { expect(exists2).toBe(0); //dequeue - const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message2).toBeUndefined(); + const messages2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages2.length).toBe(0); } finally { await queue.quit(); } @@ -411,11 +424,11 @@ describe("RunQueue", () => { masterQueues: "main2", }); - const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main2"); - expect(message).toBeDefined(); + const messages = await queue.dequeueMessageFromMasterQueue("test_12345", "main2", 10); + expect(messages.length).toBe(1); //check the message is there - const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const key = queue.keys.messageKey(messages[0].message.orgId, messages[0].messageId); const exists = await redis.exists(key); expect(exists).toBe(1); @@ -435,7 +448,7 @@ describe("RunQueue", () => { ); expect(taskConcurrency).toBe(1); - await queue.nackMessage(message!.message.orgId, message!.messageId); + await queue.nackMessage(messages[0].message.orgId, messages[0].messageId); //we need to wait because the default wait is 1 second await setTimeout(300); @@ -461,8 +474,8 @@ describe("RunQueue", () => { expect(exists2).toBe(1); //dequeue - const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main2"); - expect(message2?.messageId).toBe(messageProd.runId); + const messages2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main2", 10); + expect(messages2[0].messageId).toBe(messageProd.runId); } finally { await queue.quit(); } @@ -481,11 +494,11 @@ describe("RunQueue", () => { masterQueues: "main", }); - const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message).toBeDefined(); + const messages = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages.length).toBe(1); //check the message is gone - const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const key = queue.keys.messageKey(messages[0].message.orgId, messages[0].messageId); const exists = await redis.exists(key); expect(exists).toBe(1); @@ -502,7 +515,7 @@ describe("RunQueue", () => { //release the concurrency (not the queue) await queue.releaseConcurrency( authenticatedEnvProd.organization.id, - message!.messageId, + messages[0].messageId, false ); @@ -517,7 +530,7 @@ describe("RunQueue", () => { ).toBe(0); //reacquire the concurrency - await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, message!.messageId); + await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, messages[0].messageId); //check concurrencies are back to what they were before expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( @@ -532,7 +545,7 @@ describe("RunQueue", () => { //release the concurrency (with the queue this time) await queue.releaseConcurrency( authenticatedEnvProd.organization.id, - message!.messageId, + messages[0].messageId, true ); @@ -547,7 +560,7 @@ describe("RunQueue", () => { ).toBe(0); //reacquire the concurrency - await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, message!.messageId); + await queue.reacquireConcurrency(authenticatedEnvProd.organization.id, messages[0].messageId); //check concurrencies are back to what they were before expect(await queue.currentConcurrencyOfQueue(authenticatedEnvProd, messageProd.queue)).toBe( @@ -579,20 +592,20 @@ describe("RunQueue", () => { masterQueues: "main", }); - const message = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message).toBeDefined(); + const messages = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages.length).toBe(1); //check the message is there - const key = queue.keys.messageKey(message!.message.orgId, message!.messageId); + const key = queue.keys.messageKey(messages[0].message.orgId, messages[0].messageId); const exists = await redis.exists(key); expect(exists).toBe(1); //nack (we only have attempts set to 1) - await queue.nackMessage(message!.message.orgId, message!.messageId); + await queue.nackMessage(messages[0].message.orgId, messages[0].messageId); //dequeue - const message2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message2?.messageId).toBeUndefined(); + const messages2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages2.length).toBe(0); //concurrencies const queueConcurrency2 = await queue.currentConcurrencyOfQueue( @@ -642,8 +655,8 @@ describe("RunQueue", () => { expect(dlqMembersAfter).not.toContain(messageProd.runId); //dequeue - const message3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main"); - expect(message3?.messageId).toBe(messageProd.runId); + const messages3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages3[0].messageId).toBe(messageProd.runId); } finally { await queue.quit(); } diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 95daedc712..71de267855 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -26,6 +26,7 @@ const SemanticAttributes = { QUEUE: "runqueue.queue", MASTER_QUEUES: "runqueue.masterQueues", RUN_ID: "runqueue.runId", + RESULT_COUNT: "runqueue.resultCount", CONCURRENCY_KEY: "runqueue.concurrencyKey", ORG_ID: "runqueue.orgId", }; @@ -45,6 +46,12 @@ export type RunQueueOptions = { retryOptions?: RetryOptions; }; +type DequeuedMessage = { + messageId: string; + messageScore: string; + message: OutputPayload; +}; + /** * RunQueue – the queue that's used to process runs */ @@ -222,7 +229,7 @@ export class RunQueue { ); } - public async getSharedQueueDetails(masterQueue: string) { + public async getSharedQueueDetails(masterQueue: string, maxCount: number) { const { range } = await this.queuePriorityStrategy.nextCandidateSelection( masterQueue, "getSharedQueueDetails" @@ -234,11 +241,12 @@ export class RunQueue { ); // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue - const choice = this.queuePriorityStrategy.chooseQueue( + const result = this.queuePriorityStrategy.chooseQueues( queuesWithScores, masterQueue, "getSharedQueueDetails", - range + range, + maxCount ); return { @@ -247,56 +255,60 @@ export class RunQueue { queuesWithScores, nextRange: range, queueCount: queues.length, - queueChoice: choice, + queueChoice: result, }; } /** * Dequeue a message */ - public async dequeueMessageFromMasterQueue(consumerId: string, masterQueue: string) { + public async dequeueMessageFromMasterQueue( + consumerId: string, + masterQueue: string, + maxCount: number + ): Promise { return this.#trace( "dequeueMessageInSharedQueue", async (span) => { // Read the parent queue for matching queues - const messageQueue = await this.#getRandomQueueFromParentQueue( + const selectedQueues = await this.#getRandomQueueFromParentQueue( masterQueue, this.options.queuePriorityStrategy, (queue) => this.#calculateMessageQueueCapacities(queue, { checkForDisabled: true }), - consumerId + consumerId, + maxCount ); - if (!messageQueue) { - return; + if (!selectedQueues || selectedQueues.length === 0) { + return []; } - // If the queue includes a concurrency key, we need to remove the ck:concurrencyKey from the queue name - const message = await this.#callDequeueMessage({ - messageQueue, - concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(messageQueue), - currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(messageQueue), - envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(messageQueue), - envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(messageQueue), - projectCurrentConcurrencyKey: - this.keys.projectCurrentConcurrencyKeyFromQueue(messageQueue), - messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(messageQueue), - taskCurrentConcurrentKeyPrefix: - this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(messageQueue), - }); + const messages: DequeuedMessage[] = []; + + for (const queue of selectedQueues) { + const message = await this.#callDequeueMessage({ + messageQueue: queue, + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), + envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), + projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), + messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), + taskCurrentConcurrentKeyPrefix: + this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), + }); - if (!message) { - return; + if (message) { + messages.push(message); + } } span.setAttributes({ - [SEMATTRS_MESSAGE_ID]: message.messageId, - [SemanticAttributes.QUEUE]: message.message.queue, - [SemanticAttributes.RUN_ID]: message.message.runId, - [SemanticAttributes.CONCURRENCY_KEY]: message.message.concurrencyKey, + [SemanticAttributes.RESULT_COUNT]: messages.length, [SemanticAttributes.MASTER_QUEUES]: masterQueue, }); - return message; + return messages; }, { kind: SpanKind.CONSUMER, @@ -739,8 +751,9 @@ export class RunQueue { parentQueue: string, queuePriorityStrategy: RunQueuePriorityStrategy, calculateCapacities: (queue: string) => Promise, - consumerId: string - ) { + consumerId: string, + maxCount: number + ): Promise { return this.#trace( "getRandomQueueFromParentQueue", async (span) => { @@ -758,11 +771,12 @@ export class RunQueue { span.setAttribute("queuesWithScoresCount", queuesWithScores.length); // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue - const { choice, nextRange } = this.queuePriorityStrategy.chooseQueue( + const { choices, nextRange } = queuePriorityStrategy.chooseQueues( queuesWithScores, parentQueue, consumerId, - range + range, + maxCount ); span.setAttributes({ @@ -777,7 +791,7 @@ export class RunQueue { span.setAttribute("nextRange.count", nextRange.count); if (this.options.verbose || nextRange.offset > 0) { - if (typeof choice === "string") { + if (Array.isArray(choices)) { this.logger.debug(`[${this.name}] getRandomQueueFromParentQueue`, { queues, queuesWithScores, @@ -785,7 +799,7 @@ export class RunQueue { nextRange, queueCount: queues.length, queuesWithScoresCount: queuesWithScores.length, - queueChoice: choice, + queueChoices: choices, consumerId, }); } else { @@ -802,14 +816,12 @@ export class RunQueue { } } - if (typeof choice !== "string") { + if (Array.isArray(choices)) { + span.setAttribute("queueChoices", choices); + return choices; + } else { span.setAttribute("noQueueChoice", true); - return; - } else { - span.setAttribute("queueChoice", choice); - - return choice; } }, { @@ -938,7 +950,7 @@ export class RunQueue { projectCurrentConcurrencyKey: string; messageKeyPrefix: string; taskCurrentConcurrentKeyPrefix: string; - }) { + }): Promise { const result = await this.redis.dequeueMessage( //keys messageQueue, diff --git a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts index b85019e449..9535f4bd5f 100644 --- a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts +++ b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts @@ -25,12 +25,13 @@ export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { ); } - chooseQueue( + chooseQueues( queues: QueueWithScores[], parentQueue: string, consumerId: string, - previousRange: QueueRange - ): { choice: PriorityStrategyChoice; nextRange: QueueRange } { + previousRange: QueueRange, + maxCount: number + ): { choices: PriorityStrategyChoice; nextRange: QueueRange } { const filteredQueues = filterQueuesAtCapacity(queues); if (queues.length === this.options.queueSelectionCount) { @@ -47,17 +48,26 @@ export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { if (filteredQueues.length === 0) { return { - choice: { abort: true }, + choices: { abort: true }, nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), }; } const queueWeights = this.#calculateQueueWeights(filteredQueues); - const choice = weightedRandomChoice(queueWeights); + const choices = []; + for (let i = 0; i < maxCount; i++) { + const chosenIndex = weightedRandomIndex(queueWeights); + + const choice = queueWeights.at(chosenIndex)?.queue; + if (choice) { + queueWeights.splice(chosenIndex, 1); + choices.push(choice); + } + } return { - choice, + choices, nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), }; } @@ -102,18 +112,19 @@ function filterQueuesAtCapacity(queues: QueueWithScores[]) { ); } -function weightedRandomChoice(queues: Array<{ queue: string; totalWeight: number }>) { +function weightedRandomIndex(queues: Array<{ queue: string; totalWeight: number }>): number { const totalWeight = queues.reduce((acc, queue) => acc + queue.totalWeight, 0); let randomNum = Math.random() * totalWeight; - for (const queue of queues) { + for (let i = 0; i < queues.length; i++) { + const queue = queues[i]; if (randomNum < queue.totalWeight) { - return queue.queue; + return i; } randomNum -= queue.totalWeight; } // If we get here, we should just return a random queue - return queues[Math.floor(Math.random() * queues.length)].queue; + return Math.floor(Math.random() * queues.length); } diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 5cc008113c..8b83426493 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -84,7 +84,7 @@ export interface RunQueueKeyProducer { }; } -export type PriorityStrategyChoice = string | { abort: true }; +export type PriorityStrategyChoice = string[] | { abort: true }; export interface RunQueuePriorityStrategy { /** @@ -96,12 +96,13 @@ export interface RunQueuePriorityStrategy { * * @returns The queue to process the message from, or an object with `abort: true` if no queue is available */ - chooseQueue( + chooseQueues( queues: Array, parentQueue: string, consumerId: string, - previousRange: QueueRange - ): { choice: PriorityStrategyChoice; nextRange: QueueRange }; + previousRange: QueueRange, + maxCount: number + ): { choices: PriorityStrategyChoice; nextRange: QueueRange }; /** * This function is called to get the next candidate selection for the queue From 8f6b824760511848fb3013e2cb44c4d32d220da0 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 24 Oct 2024 20:10:10 +0100 Subject: [PATCH 073/485] The engine works with multi dequeuing --- .../run-engine/src/engine/index.test.ts | 27 +- .../run-engine/src/engine/index.ts | 457 ++++++++++-------- .../run-engine/src/engine/messages.ts | 5 +- .../run-engine/src/run-queue/index.ts | 3 +- 4 files changed, 268 insertions(+), 224 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index c1d22ed227..db90458fcc 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -116,16 +116,12 @@ describe("RunEngine", () => { const dequeued = await engine.dequeueFromMasterQueue({ consumerId: "test_12345", masterQueue: run.masterQueue, + maxRunCount: 10, }); - expect(dequeued?.action).toBe("SCHEDULE_RUN"); - - if (dequeued?.action !== "SCHEDULE_RUN") { - throw new Error("Expected action to be START_RUN"); - } - - expect(dequeued.payload.run.id).toBe(run.id); - expect(dequeued.payload.run.attemptNumber).toBe(1); - expect(dequeued.payload.execution.status).toBe("PENDING_EXECUTING"); + expect(dequeued.length).toBe(1); + expect(dequeued[0].action).toBe("SCHEDULE_RUN"); + expect(dequeued[0].payload.run.id).toBe(run.id); + expect(dequeued[0].payload.run.attemptNumber).toBe(1); const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment @@ -134,8 +130,8 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ - runId: dequeued.payload.run.id, - snapshotId: dequeued.payload.execution.id, + runId: dequeued[0].payload.run.id, + snapshotId: dequeued[0].payload.execution.id, }); expect(attemptResult.run.id).toBe(run.id); expect(attemptResult.run.status).toBe("EXECUTING"); @@ -381,13 +377,14 @@ describe("RunEngine", () => { const dequeued = await engine.dequeueFromMasterQueue({ consumerId: "test_12345", masterQueue: run.masterQueue, + maxRunCount: 10, }); - assertNonNullable(dequeued); + expect(dequeued.length).toBe(1); //create an attempt const attemptResult = await engine.startRunAttempt({ - runId: dequeued.payload.run.id, - snapshotId: dequeued.payload.execution.id, + runId: dequeued[0].payload.run.id, + snapshotId: dequeued[0].payload.execution.id, }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); @@ -405,7 +402,7 @@ describe("RunEngine", () => { const executionData = await engine.getRunExecutionData({ runId: run.id }); expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - await setTimeout(1_100); + await setTimeout(1_500); const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); expect(executionDataAfter?.snapshot.executionStatus).toBe("PENDING_EXECUTING"); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2c3080077e..81f58342fc 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -40,7 +40,7 @@ import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; -import { CreatedAttemptMessage, RunExecutionData } from "./messages"; +import { DequeuedMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; type Options = { @@ -57,6 +57,11 @@ type Options = { tracer: Tracer; }; +type MachineResources = { + cpu: number; + memory: number; +}; + type TriggerParams = { friendlyId: string; number: number; @@ -424,89 +429,123 @@ export class RunEngine { async dequeueFromMasterQueue({ consumerId, masterQueue, + maxRunCount, + maxResources, tx, }: { consumerId: string; masterQueue: string; + maxRunCount: number; + maxResources?: MachineResources; tx?: PrismaClientOrTransaction; - }): Promise { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { - //gets a fair run from this shared queue - const message = await this.runQueue.dequeueMessageFromMasterQueue(consumerId, masterQueue); - if (!message) { - return null; + //gets multiple runs from the queue + const messages = await this.runQueue.dequeueMessageFromMasterQueue( + consumerId, + masterQueue, + maxRunCount + ); + if (messages.length === 0) { + return []; } - const orgId = message.message.orgId; - const runId = message.messageId; + const dequeuedRuns: DequeuedMessage[] = []; - span.setAttribute("runId", runId); + for (const message of messages) { + const orgId = message.message.orgId; + const runId = message.messageId; - //lock the run so nothing else can modify it - return this.runLock.lock([runId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!snapshot) { - throw new Error( - `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${runId}` - ); - } - - if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { - //todo is there a way to recover this, so the run can be retried? - await this.#systemFailure({ - runId, - error: { - type: "INTERNAL_ERROR", - code: "TASK_DEQUEUED_INVALID_STATE", - message: `Task was in the ${snapshot.executionStatus} state when it was dequeued for execution.`, - }, - tx: prisma, - }); - this.logger.error( - `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${runId}\n ${snapshot.id}:${snapshot.executionStatus}` - ); - return null; - } + span.setAttribute("runId", runId); - const result = await getRunWithBackgroundWorkerTasks(prisma, runId); + //lock the run so nothing else can modify it + try { + const dequeuedRun = await this.runLock.lock([runId], 5000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (!snapshot) { + throw new Error( + `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${runId}` + ); + } - if (!result.success) { - switch (result.code) { - case "NO_RUN": { - //this should not happen, the run is unrecoverable so we'll ack it - this.logger.error("RunEngine.dequeueFromMasterQueue(): No run found", { + if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { + //todo is there a way to recover this, so the run can be retried? + await this.#systemFailure({ runId, - latestSnapshot: snapshot.id, + error: { + type: "INTERNAL_ERROR", + code: "TASK_DEQUEUED_INVALID_STATE", + message: `Task was in the ${snapshot.executionStatus} state when it was dequeued for execution.`, + }, + tx: prisma, }); - await this.runQueue.acknowledgeMessage(orgId, runId); + this.logger.error( + `RunEngine.dequeueFromMasterQueue(): Run is not in a valid state to be dequeued: ${runId}\n ${snapshot.id}:${snapshot.executionStatus}` + ); return null; } - case "NO_WORKER": - case "TASK_NEVER_REGISTERED": - case "TASK_NOT_IN_LATEST": { - this.logger.warn(`RunEngine.dequeueFromMasterQueue(): ${result.code}`, { - runId, - latestSnapshot: snapshot.id, - result, - }); - if (result.run.runtimeEnvironment.type === "DEVELOPMENT") { - //it will automatically be requeued X times depending on the queue retry settings - const gotRequeued = await this.runQueue.nackMessage(orgId, runId); - - if (!gotRequeued) { - await this.#systemFailure({ - runId: result.run.id, - error: { - type: "INTERNAL_ERROR", - code: "COULD_NOT_FIND_TASK", - message: `We tried to dequeue this DEV run multiple times but could not find the task to run: ${result.run.taskIdentifier}`, - }, - tx: prisma, + const result = await getRunWithBackgroundWorkerTasks(prisma, runId); + + if (!result.success) { + switch (result.code) { + case "NO_RUN": { + //this should not happen, the run is unrecoverable so we'll ack it + this.logger.error("RunEngine.dequeueFromMasterQueue(): No run found", { + runId, + latestSnapshot: snapshot.id, }); + await this.runQueue.acknowledgeMessage(orgId, runId); + return null; } - } else { + case "NO_WORKER": + case "TASK_NEVER_REGISTERED": + case "TASK_NOT_IN_LATEST": { + this.logger.warn(`RunEngine.dequeueFromMasterQueue(): ${result.code}`, { + runId, + latestSnapshot: snapshot.id, + result, + }); + + if (result.run.runtimeEnvironment.type === "DEVELOPMENT") { + //it will automatically be requeued X times depending on the queue retry settings + const gotRequeued = await this.runQueue.nackMessage(orgId, runId); + + if (!gotRequeued) { + await this.#systemFailure({ + runId: result.run.id, + error: { + type: "INTERNAL_ERROR", + code: "COULD_NOT_FIND_TASK", + message: `We tried to dequeue this DEV run multiple times but could not find the task to run: ${result.run.taskIdentifier}`, + }, + tx: prisma, + }); + } + } else { + //not deployed yet, so we'll wait for the deploy + await this.#waitingForDeploy({ + runId, + tx: prisma, + }); + //we ack because when it's deployed it will be requeued + await this.runQueue.acknowledgeMessage(orgId, runId); + } + + return null; + } + } + } + + //check for a valid deployment if it's not a development environment + if (result.run.runtimeEnvironment.type !== "DEVELOPMENT") { + if (!result.deployment || !result.deployment.imageReference) { + this.logger.warn("RunEngine.dequeueFromMasterQueue(): No deployment found", { + runId, + latestSnapshot: snapshot.id, + result, + }); //not deployed yet, so we'll wait for the deploy await this.#waitingForDeploy({ runId, @@ -514,181 +553,182 @@ export class RunEngine { }); //we ack because when it's deployed it will be requeued await this.runQueue.acknowledgeMessage(orgId, runId); + return null; } - - return null; } - } - } - //check for a valid deployment if it's not a development environment - if (result.run.runtimeEnvironment.type !== "DEVELOPMENT") { - if (!result.deployment || !result.deployment.imageReference) { - this.logger.warn("RunEngine.dequeueFromMasterQueue(): No deployment found", { - runId, - latestSnapshot: snapshot.id, - result, + const machinePreset = machinePresetFromConfig({ + machines: this.options.machines.machines, + defaultMachine: this.options.machines.defaultMachine, + config: result.task.machineConfig ?? {}, }); - //not deployed yet, so we'll wait for the deploy - await this.#waitingForDeploy({ - runId, - tx: prisma, + + //update the run + const lockedTaskRun = await prisma.taskRun.update({ + where: { + id: runId, + }, + data: { + lockedAt: new Date(), + lockedById: result.task.id, + lockedToVersionId: result.worker.id, + startedAt: result.run.startedAt ?? new Date(), + baseCostInCents: this.options.machines.baseCostInCents, + machinePreset: machinePreset.name, + maxDurationInSeconds: getMaxDuration( + result.run.maxDurationInSeconds, + result.task.maxDurationInSeconds + ), + }, + include: { + runtimeEnvironment: true, + attempts: { + take: 1, + orderBy: { number: "desc" }, + }, + tags: true, + }, }); - //we ack because when it's deployed it will be requeued - await this.runQueue.acknowledgeMessage(orgId, runId); - return null; - } - } - const machinePreset = machinePresetFromConfig({ - machines: this.options.machines.machines, - defaultMachine: this.options.machines.defaultMachine, - config: result.task.machineConfig ?? {}, - }); + if (!lockedTaskRun) { + this.logger.error("RunEngine.dequeueFromMasterQueue(): Failed to lock task run", { + taskRun: result.run.id, + taskIdentifier: result.run.taskIdentifier, + deployment: result.deployment?.id, + worker: result.worker.id, + task: result.task.id, + runId, + }); - //update the run - const lockedTaskRun = await prisma.taskRun.update({ - where: { - id: runId, - }, - data: { - lockedAt: new Date(), - lockedById: result.task.id, - lockedToVersionId: result.worker.id, - startedAt: result.run.startedAt ?? new Date(), - baseCostInCents: this.options.machines.baseCostInCents, - machinePreset: machinePreset.name, - maxDurationInSeconds: getMaxDuration( - result.run.maxDurationInSeconds, - result.task.maxDurationInSeconds - ), - }, - include: { - runtimeEnvironment: true, - attempts: { - take: 1, - orderBy: { number: "desc" }, - }, - tags: true, - }, - }); + await this.runQueue.acknowledgeMessage(orgId, runId); + return null; + } - if (!lockedTaskRun) { - this.logger.error("RunEngine.dequeueFromMasterQueue(): Failed to lock task run", { - taskRun: result.run.id, - taskIdentifier: result.run.taskIdentifier, - deployment: result.deployment?.id, - worker: result.worker.id, - task: result.task.id, - runId, - }); + const queue = await prisma.taskQueue.findUnique({ + where: { + runtimeEnvironmentId_name: { + runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, + name: sanitizeQueueName(lockedTaskRun.queue), + }, + }, + }); - await this.runQueue.acknowledgeMessage(orgId, runId); - return null; - } + if (!queue) { + this.logger.debug( + "RunEngine.dequeueFromMasterQueue(): queue not found, so nacking message", + { + queueMessage: message, + taskRunQueue: lockedTaskRun.queue, + runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, + } + ); - const queue = await prisma.taskQueue.findUnique({ - where: { - runtimeEnvironmentId_name: { - runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, - name: sanitizeQueueName(lockedTaskRun.queue), - }, - }, - }); + //will auto-retry + const gotRequeued = await this.runQueue.nackMessage(orgId, runId); + if (!gotRequeued) { + await this.#systemFailure({ + runId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_DEQUEUED_QUEUE_NOT_FOUND", + message: `Tried to dequeue the run but the queue doesn't exist: ${lockedTaskRun.queue}`, + }, + tx: prisma, + }); + } - if (!queue) { - this.logger.debug( - "RunEngine.dequeueFromMasterQueue(): queue not found, so nacking message", - { - queueMessage: message, - taskRunQueue: lockedTaskRun.queue, - runtimeEnvironmentId: lockedTaskRun.runtimeEnvironmentId, + return null; } - ); - //will auto-retry - const gotRequeued = await this.runQueue.nackMessage(orgId, runId); - if (!gotRequeued) { - await this.#systemFailure({ - runId, - error: { - type: "INTERNAL_ERROR", - code: "TASK_DEQUEUED_QUEUE_NOT_FOUND", - message: `Tried to dequeue the run but the queue doesn't exist: ${lockedTaskRun.queue}`, + const currentAttemptNumber = lockedTaskRun.attempts.at(0)?.number ?? 0; + const nextAttemptNumber = currentAttemptNumber + 1; + + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: { + id: runId, + status: snapshot.runStatus, + }, + snapshot: { + executionStatus: "PENDING_EXECUTING", + description: "Run was dequeued for execution", }, - tx: prisma, + checkpointId: snapshot.checkpointId ?? undefined, }); - } - return null; - } + return { + action: "SCHEDULE_RUN" as const, + payload: { + version: "1" as const, + execution: { + id: newSnapshot.id, + }, + image: result.deployment?.imageReference ?? undefined, + checkpoint: newSnapshot.checkpoint ?? undefined, + backgroundWorker: { + id: result.worker.id, + version: result.worker.version, + }, + run: { + id: lockedTaskRun.id, + friendlyId: lockedTaskRun.friendlyId, + isTest: lockedTaskRun.isTest, + machine: machinePreset, + attemptNumber: nextAttemptNumber, + masterQueue: lockedTaskRun.masterQueue, + traceContext: lockedTaskRun.traceContext as Record, + }, + environment: { + id: lockedTaskRun.runtimeEnvironment.id, + type: lockedTaskRun.runtimeEnvironment.type, + }, + organization: { + id: orgId, + }, + project: { + id: lockedTaskRun.projectId, + }, + }, + }; + }); - const currentAttemptNumber = lockedTaskRun.attempts.at(0)?.number ?? 0; - const nextAttemptNumber = currentAttemptNumber + 1; + if (dequeuedRun !== null) { + dequeuedRuns.push(dequeuedRun); + } + } catch (error) { + this.logger.error( + "RunEngine.dequeueFromMasterQueue(): Error while preparing run to be run", + { + error, + runId, + } + ); - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: { - id: runId, - status: snapshot.runStatus, - }, - snapshot: { - executionStatus: "PENDING_EXECUTING", - description: "Run was dequeued for execution", - }, - checkpointId: snapshot.checkpointId ?? undefined, - }); + await this.runQueue.nackMessage(orgId, runId); + } + } - return { - action: "SCHEDULE_RUN", - payload: { - version: "1", - execution: { - id: newSnapshot.id, - status: "PENDING_EXECUTING", - }, - image: result.deployment?.imageReference ?? undefined, - checkpoint: newSnapshot.checkpoint ?? undefined, - backgroundWorker: { - id: result.worker.id, - version: result.worker.version, - }, - run: { - id: lockedTaskRun.id, - friendlyId: lockedTaskRun.friendlyId, - isTest: lockedTaskRun.isTest, - machine: machinePreset, - attemptNumber: nextAttemptNumber, - masterQueue: lockedTaskRun.masterQueue, - traceContext: lockedTaskRun.traceContext as Record, - }, - environment: { - id: lockedTaskRun.runtimeEnvironment.id, - type: lockedTaskRun.runtimeEnvironment.type, - }, - organization: { - id: orgId, - }, - project: { - id: lockedTaskRun.projectId, - }, - }, - }; - }); + return dequeuedRuns; }); } async dequeueFromEnvironmentMasterQueue({ consumerId, environmentId, + maxRunCount, + maxResources, tx, }: { consumerId: string; environmentId: string; + maxRunCount: number; + maxResources?: MachineResources; tx?: PrismaClientOrTransaction; }) { return this.dequeueFromMasterQueue({ consumerId, masterQueue: this.#environmentMasterQueueKey(environmentId), + maxRunCount, + maxResources, tx, }); } @@ -696,15 +736,21 @@ export class RunEngine { async dequeueFromBackgroundWorkerMasterQueue({ consumerId, backgroundWorkerId, + maxRunCount, + maxResources, tx, }: { consumerId: string; backgroundWorkerId: string; + maxRunCount: number; + maxResources?: MachineResources; tx?: PrismaClientOrTransaction; }) { return this.dequeueFromMasterQueue({ consumerId, masterQueue: this.#backgroundWorkerQueueKey(backgroundWorkerId), + maxRunCount, + maxResources, tx, }); } @@ -957,6 +1003,7 @@ export class RunEngine { return { run, snapshot, + execution, }; }); }); diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 7ebe63ba0e..86018b62b1 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -5,14 +5,13 @@ import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ -const CreatedAttemptMessage = z.object({ +const DequeuedMessage = z.object({ action: z.literal("SCHEDULE_RUN"), // The payload allows us to a discriminated union with the version payload: z.object({ version: z.literal("1"), execution: z.object({ id: z.string(), - status: z.literal("PENDING_EXECUTING"), }), image: z.string().optional(), checkpoint: z @@ -48,7 +47,7 @@ const CreatedAttemptMessage = z.object({ }), }), }); -export type CreatedAttemptMessage = z.infer; +export type DequeuedMessage = z.infer; const CompletedWaitpoint = z.object({ id: z.string(), diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 71de267855..3bc12daaed 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -374,7 +374,8 @@ export class RunQueue { } /** - * Negative acknowledge a message, which will requeue the message (with an optional future date) + * Negative acknowledge a message, which will requeue the message (with an optional future date). + If you pass no date it will get reattempted with exponential backoff. */ public async nackMessage(orgId: string, messageId: string, retryAt?: number) { return this.#trace( From 38b7fe04c10f525528894ffe86ce26a70307586a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 16:15:37 +0100 Subject: [PATCH 074/485] Added notes about workers to the Run engine readme --- internal-packages/run-engine/README.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 230803f835..8f68ef0cbe 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -14,6 +14,30 @@ The execution of a run is stored in the `TaskRunExecutionSnapshot` table in Post ![The execution states](./execution-states.png) +## Workers + +A worker is a server that runs tasks. There are two types of workers: +- Hosted workers (serverless, managed and cloud-only) +- Self-hosted workers + +In the dashboard under the "Workers" page, you can see all worker groups including the "main" group which is the default and not self-hosted. You can also see alternative worker groups that are available to you, such as "EU", "v3.2 (beta)", and any self-hosted worker groups you have created. + +You add a new self-hosted worker group by clicking "Add" and choosing an `id` that is unique to your project. + +Then when triggering runs, you can specify the `workerGroup` to use. It defaults to "main". The workerGroup is used internally to set the `masterQueue` that a run is placed in, this allows pulling runs only for that worker group. + +On the "Workers" page, you can see the status of each worker group, including the number of workers in the group, the number of runs that are queued. + +## Pulling from the queue + +A worker will call the Trigger.dev API with it's `workerGroup`. + +For warm starts, self-hosted workers we will also pass the `BackgroundWorker` id and `environment` id. This allow pulling relevant runs. + +For dev environments, we will pass the `environment` id. + +If there's only a `workerGroup`, we can just `dequeueFromMasterQueue()` to get runs. If there's a `BackgroundWorker` id, we need to determine if that `BackgroundWorker` is the latest. If it's the latest we call `dequeueFromEnvironmentMasterQueue()` to get any runs that aren't locked to a version. If it's not the latest, we call `dequeueFromBackgroundWorkerMasterQueue()` to get runs that are locked to that version. + ## Components ### Run Engine From c8df13a4db087d44d7e55a4df1a434a9949e6a43 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 16:15:42 +0100 Subject: [PATCH 075/485] Removed unused import --- internal-packages/run-engine/src/engine/eventBus.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 295ececf6e..b834c24ce1 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,5 +1,3 @@ -import { EventEmitter } from "node:events"; - export type EventBusEvents = { runExpired: [ { From 5a7eef6ccc3b12845c17cc1163125f5cbd4addf2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 16:27:09 +0100 Subject: [PATCH 076/485] Cleaned some messages up and added `completedWaitpoints` when dequeuing --- .../run-engine/src/engine/index.test.ts | 13 +- .../run-engine/src/engine/index.ts | 111 ++++++++---------- .../run-engine/src/engine/messages.ts | 91 +++++++------- 3 files changed, 102 insertions(+), 113 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index db90458fcc..87589f81a5 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -119,9 +119,8 @@ describe("RunEngine", () => { maxRunCount: 10, }); expect(dequeued.length).toBe(1); - expect(dequeued[0].action).toBe("SCHEDULE_RUN"); - expect(dequeued[0].payload.run.id).toBe(run.id); - expect(dequeued[0].payload.run.attemptNumber).toBe(1); + expect(dequeued[0].run.id).toBe(run.id); + expect(dequeued[0].run.attemptNumber).toBe(1); const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( authenticatedEnvironment @@ -130,8 +129,8 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].payload.run.id, - snapshotId: dequeued[0].payload.execution.id, + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, }); expect(attemptResult.run.id).toBe(run.id); expect(attemptResult.run.status).toBe("EXECUTING"); @@ -383,8 +382,8 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].payload.run.id, - snapshotId: dequeued[0].payload.execution.id, + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 81f58342fc..28119daf68 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -653,40 +653,39 @@ export class RunEngine { description: "Run was dequeued for execution", }, checkpointId: snapshot.checkpointId ?? undefined, + completedWaitpointIds: snapshot.completedWaitpoints.map((wp) => wp.id), }); return { - action: "SCHEDULE_RUN" as const, - payload: { - version: "1" as const, - execution: { - id: newSnapshot.id, - }, - image: result.deployment?.imageReference ?? undefined, - checkpoint: newSnapshot.checkpoint ?? undefined, - backgroundWorker: { - id: result.worker.id, - version: result.worker.version, - }, - run: { - id: lockedTaskRun.id, - friendlyId: lockedTaskRun.friendlyId, - isTest: lockedTaskRun.isTest, - machine: machinePreset, - attemptNumber: nextAttemptNumber, - masterQueue: lockedTaskRun.masterQueue, - traceContext: lockedTaskRun.traceContext as Record, - }, - environment: { - id: lockedTaskRun.runtimeEnvironment.id, - type: lockedTaskRun.runtimeEnvironment.type, - }, - organization: { - id: orgId, - }, - project: { - id: lockedTaskRun.projectId, - }, + version: "1" as const, + execution: { + id: newSnapshot.id, + }, + image: result.deployment?.imageReference ?? undefined, + checkpoint: newSnapshot.checkpoint ?? undefined, + completedWaitpoints: snapshot.completedWaitpoints, + backgroundWorker: { + id: result.worker.id, + version: result.worker.version, + }, + run: { + id: lockedTaskRun.id, + friendlyId: lockedTaskRun.friendlyId, + isTest: lockedTaskRun.isTest, + machine: machinePreset, + attemptNumber: nextAttemptNumber, + masterQueue: lockedTaskRun.masterQueue, + traceContext: lockedTaskRun.traceContext as Record, + }, + environment: { + id: lockedTaskRun.runtimeEnvironment.id, + type: lockedTaskRun.runtimeEnvironment.type, + }, + organization: { + id: orgId, + }, + project: { + id: lockedTaskRun.projectId, }, }; }); @@ -1102,17 +1101,6 @@ export class RunEngine { }); } - /** This is called to get the */ - async resumeRun({ - runId, - snapshotId, - tx, - }: { - runId: string; - snapshotId: string; - tx?: PrismaClientOrTransaction; - }) {} - async waitForDuration({ runId, snapshotId, @@ -1378,6 +1366,7 @@ export class RunEngine { } const executionData: RunExecutionData = { + version: "1" as const, snapshot: { id: snapshot.id, executionStatus: snapshot.executionStatus, @@ -1397,22 +1386,7 @@ export class RunEngine { reason: snapshot.checkpoint.reason ?? undefined, } : undefined, - completedWaitpoints: - snapshot.completedWaitpoints.length === 0 - ? undefined - : snapshot.completedWaitpoints.map((w) => ({ - id: w.id, - type: w.type, - completedAt: w.completedAt ?? new Date(), - idempotencyKey: - w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey - ? w.idempotencyKey - : undefined, - completedByTaskRunId: w.completedByTaskRunId ?? undefined, - completedAfter: w.completedAfter ?? undefined, - output: w.output ?? undefined, - outputType: w.outputType, - })), + completedWaitpoints: snapshot.completedWaitpoints, }; return executionData; @@ -1732,7 +1706,7 @@ export class RunEngine { } async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { - return prisma.taskRunExecutionSnapshot.findFirst({ + const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ where: { runId }, include: { completedWaitpoints: true, @@ -1740,6 +1714,25 @@ export class RunEngine { }, orderBy: { createdAt: "desc" }, }); + + if (!snapshot) { + return null; + } + + return { + ...snapshot, + completedWaitpoints: snapshot.completedWaitpoints.map((w) => ({ + id: w.id, + type: w.type, + completedAt: w.completedAt ?? new Date(), + idempotencyKey: + w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey ? w.idempotencyKey : undefined, + completedByTaskRunId: w.completedByTaskRunId ?? undefined, + completedAfter: w.completedAfter ?? undefined, + output: w.output ?? undefined, + outputType: w.outputType, + })), + }; } async #getExecutionSnapshotCompletedWaitpoints( diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 86018b62b1..8724f5fa19 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -3,52 +3,6 @@ import { TaskRunExecutionStatus, TaskRunStatus, WaitpointType } from "@trigger.d import { z } from "zod"; //todo it will need to move into core because the Worker will need to use these - -/** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ -const DequeuedMessage = z.object({ - action: z.literal("SCHEDULE_RUN"), - // The payload allows us to a discriminated union with the version - payload: z.object({ - version: z.literal("1"), - execution: z.object({ - id: z.string(), - }), - image: z.string().optional(), - checkpoint: z - .object({ - id: z.string(), - type: z.string(), - location: z.string(), - reason: z.string().nullish(), - }) - .optional(), - backgroundWorker: z.object({ - id: z.string(), - version: z.string(), - }), - run: z.object({ - id: z.string(), - friendlyId: z.string(), - isTest: z.boolean(), - machine: MachinePreset, - attemptNumber: z.number(), - masterQueue: z.string(), - traceContext: z.record(z.unknown()), - }), - environment: z.object({ - id: z.string(), - type: EnvironmentType, - }), - organization: z.object({ - id: z.string(), - }), - project: z.object({ - id: z.string(), - }), - }), -}); -export type DequeuedMessage = z.infer; - const CompletedWaitpoint = z.object({ id: z.string(), type: z.enum(Object.values(WaitpointType) as [WaitpointType]), @@ -62,7 +16,50 @@ const CompletedWaitpoint = z.object({ outputType: z.string().optional(), }); +/** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ +const DequeuedMessage = z.object({ + version: z.literal("1"), + execution: z.object({ + id: z.string(), + }), + image: z.string().optional(), + checkpoint: z + .object({ + id: z.string(), + type: z.string(), + location: z.string(), + reason: z.string().nullish(), + }) + .optional(), + completedWaitpoints: z.array(CompletedWaitpoint), + backgroundWorker: z.object({ + id: z.string(), + version: z.string(), + }), + run: z.object({ + id: z.string(), + friendlyId: z.string(), + isTest: z.boolean(), + machine: MachinePreset, + attemptNumber: z.number(), + masterQueue: z.string(), + traceContext: z.record(z.unknown()), + }), + environment: z.object({ + id: z.string(), + type: EnvironmentType, + }), + organization: z.object({ + id: z.string(), + }), + project: z.object({ + id: z.string(), + }), +}); +export type DequeuedMessage = z.infer; + export const RunExecutionData = z.object({ + version: z.literal("1"), snapshot: z.object({ id: z.string(), executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), @@ -82,7 +79,7 @@ export const RunExecutionData = z.object({ reason: z.string().optional(), }) .optional(), - completedWaitpoints: z.array(CompletedWaitpoint).optional(), + completedWaitpoints: z.array(CompletedWaitpoint), }); export type RunExecutionData = z.infer; From 3407343f2c5a51354a32d0fe3387ef61b113eb69 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 17:56:20 +0100 Subject: [PATCH 077/485] Dequeue multiple items (by rotating through the selected queues) with test --- .../run-engine/src/run-queue/index.test.ts | 66 +++++++++++++++++- .../run-engine/src/run-queue/index.ts | 68 ++++++++++++++----- .../simpleWeightedPriorityStrategy.ts | 10 +-- 3 files changed, 119 insertions(+), 25 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 42516a2b2d..746344ba94 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -14,7 +14,7 @@ const testOptions = { queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), workers: 1, - defaultEnvConcurrency: 10, + defaultEnvConcurrency: 25, enableRebalancing: false, logger: new Logger("RunQueue", "warn"), retryOptions: { @@ -121,7 +121,7 @@ describe("RunQueue", () => { try { //initial value const initial = await queue.getEnvConcurrencyLimit(authenticatedEnvProd); - expect(initial).toBe(10); + expect(initial).toBe(25); //set 20 await queue.updateEnvConcurrencyLimits({ @@ -327,6 +327,68 @@ describe("RunQueue", () => { } ); + redisTest( + "Dequeue multiple messages from the queue", + { timeout: 5_000 }, + async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + // Create 20 messages with different runIds and some with different queues + const messages = Array.from({ length: 20 }, (_, i) => ({ + ...messageProd, + runId: `r${i + 1}`, + queue: i < 15 ? "task/my-task" : "task/other-task", // Mix up the queues + })); + + // Enqueue all messages + for (const message of messages) { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message, + masterQueues: "main", + }); + } + + // Check initial queue lengths + const initialLength1 = await queue.lengthOfQueue(authenticatedEnvProd, "task/my-task"); + const initialLength2 = await queue.lengthOfQueue(authenticatedEnvProd, "task/other-task"); + expect(initialLength1).toBe(15); + expect(initialLength2).toBe(5); + + // Dequeue first batch of 10 messages + const dequeued1 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued1.length).toBe(10); + + // Dequeue second batch of 10 messages + const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued2.length).toBe(10); + + // Combine all dequeued message IDs + const dequeuedIds = [...dequeued1, ...dequeued2].map((m) => m.messageId); + + // Check that all original messages were dequeued + const allOriginalIds = messages.map((m) => m.runId); + expect(dequeuedIds.sort()).toEqual(allOriginalIds.sort()); + + // Try to dequeue more - should get none + const dequeued3 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(dequeued3.length).toBe(0); + + // Check final queue lengths + const finalLength1 = await queue.lengthOfQueue(authenticatedEnvProd, "task/my-task"); + const finalLength2 = await queue.lengthOfQueue(authenticatedEnvProd, "task/other-task"); + expect(finalLength1).toBe(0); + expect(finalLength2).toBe(0); + } finally { + await queue.quit(); + } + } + ); + redisTest("Get shared queue details", { timeout: 5_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 3bc12daaed..3290d3fac3 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -260,7 +260,7 @@ export class RunQueue { } /** - * Dequeue a message + * Dequeue messages from the master queue */ public async dequeueMessageFromMasterQueue( consumerId: string, @@ -284,23 +284,47 @@ export class RunQueue { } const messages: DequeuedMessage[] = []; + const remainingMessages = selectedQueues.map((q) => q.size); + let currentQueueIndex = 0; + + while (messages.length < maxCount) { + let foundMessage = false; + + // Try each queue once in this round + for (let i = 0; i < selectedQueues.length; i++) { + currentQueueIndex = (currentQueueIndex + i) % selectedQueues.length; + + // Skip if this queue is empty + if (remainingMessages[currentQueueIndex] <= 0) continue; + + const selectedQueue = selectedQueues[currentQueueIndex]; + const queue = selectedQueue.queue; + + const message = await this.#callDequeueMessage({ + messageQueue: queue, + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), + envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), + projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), + messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), + taskCurrentConcurrentKeyPrefix: + this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), + }); - for (const queue of selectedQueues) { - const message = await this.#callDequeueMessage({ - messageQueue: queue, - concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), - currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), - envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), - envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), - projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), - messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), - taskCurrentConcurrentKeyPrefix: - this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), - }); - - if (message) { - messages.push(message); + if (message) { + messages.push(message); + remainingMessages[currentQueueIndex]--; + foundMessage = true; + break; + } else { + // If we failed to get a message, mark this queue as empty + remainingMessages[currentQueueIndex] = 0; + } } + + // If we couldn't get a message from any queue, break + if (!foundMessage) break; } span.setAttributes({ @@ -754,7 +778,15 @@ export class RunQueue { calculateCapacities: (queue: string) => Promise, consumerId: string, maxCount: number - ): Promise { + ): Promise< + | { + queue: string; + capacities: QueueCapacities; + age: number; + size: number; + }[] + | undefined + > { return this.#trace( "getRandomQueueFromParentQueue", async (span) => { @@ -819,7 +851,7 @@ export class RunQueue { if (Array.isArray(choices)) { span.setAttribute("queueChoices", choices); - return choices; + return queuesWithScores.filter((queue) => choices.includes(queue.queue)); } else { span.setAttribute("noQueueChoice", true); return; diff --git a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts index 9535f4bd5f..04eb68c7d7 100644 --- a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts +++ b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts @@ -35,10 +35,10 @@ export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { const filteredQueues = filterQueuesAtCapacity(queues); if (queues.length === this.options.queueSelectionCount) { - const nextRange: QueueRange = { - offset: previousRange.offset + this.options.queueSelectionCount, - count: this.options.queueSelectionCount, - }; + const nextRange: QueueRange = { + offset: previousRange.offset + this.options.queueSelectionCount, + count: this.options.queueSelectionCount, + }; // If all queues are at capacity, and we were passed the max number of queues, then we will slide the window "to the right" this._nextRangesByParentQueue.set(`${consumerId}:${parentQueue}`, nextRange); @@ -98,7 +98,7 @@ export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { return { queue, - totalWeight: age, + totalWeight, }; }); } From 2ba34d1d895964256f86470f01eb24bf1eaf2b6f Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 19:13:59 +0100 Subject: [PATCH 078/485] =?UTF-8?q?Added=20the=20ability=20to=20store=20sn?= =?UTF-8?q?apshot=20errors,=20they=20are=20for=20debugging=20and=20won?= =?UTF-8?q?=E2=80=99t=20impact=20performance=20or=20grabbing=20the=20lates?= =?UTF-8?q?t=20state?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../migration.sql | 9 ++++++++ .../database/prisma/schema.prisma | 8 +++++-- .../run-engine/src/engine/index.ts | 21 ++++++++++++------- 3 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql b/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql new file mode 100644 index 0000000000..4223408016 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql @@ -0,0 +1,9 @@ +-- DropIndex +DROP INDEX "TaskRunExecutionSnapshot_runId_createdAt_idx"; + +-- AlterTable +ALTER TABLE "TaskRunExecutionSnapshot" ADD COLUMN "error" TEXT, +ADD COLUMN "isValid" BOOLEAN NOT NULL DEFAULT true; + +-- CreateIndex +CREATE INDEX "TaskRunExecutionSnapshot_runId_isValid_createdAt_idx" ON "TaskRunExecutionSnapshot"("runId", "isValid", "createdAt" DESC); diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 1e346cc35d..c158210a63 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1890,6 +1890,10 @@ model TaskRunExecutionSnapshot { /// For debugging description String + /// We store invalid snapshots as a record of the run state when we tried to move + isValid Boolean @default(true) + error String? + /// Run runId String run TaskRun @relation(fields: [runId], references: [id]) @@ -1907,8 +1911,8 @@ model TaskRunExecutionSnapshot { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - /// Used to get the latest state quickly - @@index([runId, createdAt(sort: Desc)]) + /// Used to get the latest valid snapshot quickly + @@index([runId, isValid, createdAt(sort: Desc)]) } enum TaskRunExecutionStatus { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 28119daf68..42ceccd1cc 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1626,6 +1626,7 @@ export class RunEngine { snapshot, checkpointId, completedWaitpointIds, + error, }: { run: { id: string; status: TaskRunStatus; attemptNumber?: number | null }; snapshot: { @@ -1634,6 +1635,7 @@ export class RunEngine { }; checkpointId?: string; completedWaitpointIds?: string[]; + error?: string; } ) { const newSnapshot = await prisma.taskRunExecutionSnapshot.create({ @@ -1648,18 +1650,22 @@ export class RunEngine { completedWaitpoints: { connect: completedWaitpointIds?.map((id) => ({ id })), }, + isValid: error ? false : true, + error: error ?? undefined, }, include: { checkpoint: true, }, }); - //set heartbeat (if relevant) - await this.#setExecutionSnapshotHeartbeat({ - status: newSnapshot.executionStatus, - runId: run.id, - snapshotId: newSnapshot.id, - }); + if (!error) { + //set heartbeat (if relevant) + await this.#setExecutionSnapshotHeartbeat({ + status: newSnapshot.executionStatus, + runId: run.id, + snapshotId: newSnapshot.id, + }); + } return newSnapshot; } @@ -1705,9 +1711,10 @@ export class RunEngine { } } + /* Gets the most recent valid snapshot for a run */ async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { runId }, + where: { runId, isValid: true }, include: { completedWaitpoints: true, checkpoint: true, From b95404e6120afeab3f0b0097756c26813e1a3a5e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 19:23:25 +0100 Subject: [PATCH 079/485] Example of using the error execution snapshot --- .../run-engine/src/engine/index.ts | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 42ceccd1cc..dd7671ed3e 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -470,7 +470,25 @@ export class RunEngine { } if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { + //create a failed snapshot + await this.#createExecutionSnapshot(prisma, { + run: { + id: snapshot.runId, + status: snapshot.runStatus, + }, + snapshot: { + executionStatus: snapshot.executionStatus, + description: + "Tried to dequeue a run that is not in a valid state to be dequeued.", + }, + checkpointId: snapshot.checkpointId ?? undefined, + completedWaitpointIds: snapshot.completedWaitpoints.map((wp) => wp.id), + error: `Tried to dequeue a run that is not in a valid state to be dequeued.`, + }); + //todo is there a way to recover this, so the run can be retried? + //for example should we update the status to a dequeuable status and nack it? + //then at least it has a chance of succeeding and we have the error log above await this.#systemFailure({ runId, error: { From 431ec06af63e909dcc6fa33befa0863370f22dd3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 19:50:42 +0100 Subject: [PATCH 080/485] Emit an event on executionSnapshotCreated, so we can add admin only logs to the runs for debugging --- .../run-engine/src/engine/eventBus.ts | 21 +++++++++++++++++++ .../run-engine/src/engine/index.ts | 11 ++++++++++ 2 files changed, 32 insertions(+) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index b834c24ce1..d07bd84f33 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,3 +1,5 @@ +import { TaskRunExecutionStatus } from "@trigger.dev/database"; + export type EventBusEvents = { runExpired: [ { @@ -20,6 +22,25 @@ export type EventBusEvents = { }; }, ]; + executionSnapshotCreated: [ + { + time: Date; + run: { + id: string; + }; + snapshot: { + id: string; + executionStatus: TaskRunExecutionStatus; + description: string; + runStatus: string; + attemptNumber: number | null; + checkpointId: string | null; + completedWaitpointIds: string[]; + isValid: boolean; + error: string | null; + }; + }, + ]; }; export type EventBusEventArgs = EventBusEvents[T]; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index dd7671ed3e..2a68a899cc 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1685,6 +1685,17 @@ export class RunEngine { }); } + this.eventBus.emit("executionSnapshotCreated", { + time: newSnapshot.createdAt, + run: { + id: newSnapshot.runId, + }, + snapshot: { + ...newSnapshot, + completedWaitpointIds: completedWaitpointIds ?? [], + }, + }); + return newSnapshot; } From 682c8082bdf1f5bfe75a3c85c9bfb051ee6747c9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 25 Oct 2024 20:11:27 +0100 Subject: [PATCH 081/485] Remove the todo from complete attempt --- internal-packages/run-engine/src/engine/index.ts | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2a68a899cc..8f18ffea6e 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1036,13 +1036,6 @@ export class RunEngine { snapshotId: string; completion: TaskRunExecutionResult; }) { - //todo - //1. lock the run - //2. get the latest snapshot - //3. deal with completion errors - //4. update the run status, create final snapshot - //5. complete waitpoints - return this.#trace("completeRunAttempt", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); From 90972f5479ab7f65be438e23412fa54221b1ab1f Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 4 Nov 2024 08:13:12 +0000 Subject: [PATCH 082/485] run engine singleton --- apps/webapp/app/v3/runEngine.server.ts | 41 +++++++++++++ .../app/v3/services/triggerTaskV2.server.ts | 58 ++++++++----------- 2 files changed, 64 insertions(+), 35 deletions(-) create mode 100644 apps/webapp/app/v3/runEngine.server.ts diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts new file mode 100644 index 0000000000..d3552bd1dc --- /dev/null +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -0,0 +1,41 @@ +import { RunEngine } from "@internal/run-engine"; +import { prisma } from "~/db.server"; +import { env } from "~/env.server"; +import { tracer } from "./tracer.server"; +import { singleton } from "~/utils/singleton"; + +export const engine = singleton("RunEngine", createRunEngine); + +export type { RunEngine }; + +function createRunEngine() { + return new RunEngine({ + prisma, + redis: { + port: env.REDIS_PORT, + host: env.REDIS_HOST, + username: env.REDIS_USERNAME, + password: env.REDIS_PASSWORD, + enableAutoPipelining: true, + ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), + }, + worker: { + workers: 1, + tasksPerWorker: env.WORKER_CONCURRENCY, + pollIntervalMs: env.WORKER_POLL_INTERVAL, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer, + }); +} diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 4ea9394d2c..4d8fb4c4f9 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -8,8 +8,7 @@ import { import { env } from "~/env.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; -import { workerQueue } from "~/services/worker.server"; -import { marqs, sanitizeQueueName } from "~/v3/marqs/index.server"; +import { sanitizeQueueName } from "~/v3/marqs/index.server"; import { eventRepository } from "../eventRepository.server"; import { generateFriendlyId } from "../friendlyIdentifiers"; import { uploadToObjectStore } from "../r2.server"; @@ -21,8 +20,8 @@ import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { handleMetadataPacket } from "~/utils/packets"; -import { RunEngine } from "@internal/run-engine"; -import { prisma } from "~/db.server"; +import type { PrismaClientOrTransaction } from "~/db.server"; +import { engine, type RunEngine } from "../runEngine.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -40,35 +39,25 @@ export class OutOfEntitlementError extends Error { } } -//todo move this to a singleton somewhere -const engine = new RunEngine({ - prisma, - redis: { - port: env.REDIS_PORT, - host: env.REDIS_HOST, - username: env.REDIS_USERNAME, - password: env.REDIS_PASSWORD, - enableAutoPipelining: true, - ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), - }, - zodWorker: { - connectionString: env.DATABASE_URL, - concurrency: env.WORKER_CONCURRENCY, - pollInterval: env.WORKER_POLL_INTERVAL, - noPreparedStatements: env.DATABASE_URL !== env.DIRECT_URL, - schema: env.WORKER_SCHEMA, - maxPoolSize: env.WORKER_CONCURRENCY + 1, - shutdownTimeoutInMs: env.GRACEFUL_SHUTDOWN_TIMEOUT, - }, -}); - -export class TriggerTaskService extends BaseService { - public async call( - taskId: string, - environment: AuthenticatedEnvironment, - body: TriggerTaskRequestBody, - options: TriggerTaskServiceOptions = {} - ) { +export class TriggerTaskServiceV2 extends BaseService { + private _engine: RunEngine; + + constructor({ prisma, runEngine }: { prisma: PrismaClientOrTransaction; runEngine: RunEngine }) { + super(prisma); + this._engine = runEngine ?? engine; + } + + public async call({ + taskId, + environment, + body, + options = {}, + }: { + taskId: string; + environment: AuthenticatedEnvironment; + body: TriggerTaskRequestBody; + options?: TriggerTaskServiceOptions; + }) { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); @@ -327,7 +316,7 @@ export class TriggerTaskService extends BaseService { event.setAttribute("runId", runFriendlyId); span.setAttribute("runId", runFriendlyId); - const taskRun = await engine.trigger( + const taskRun = await this._engine.trigger( { number: num, friendlyId: runFriendlyId, @@ -355,7 +344,6 @@ export class TriggerTaskService extends BaseService { ttl, tags: tagIds, parentTaskRunId: parentAttempt?.taskRun.id, - parentTaskRunAttemptId: parentAttempt?.id, rootTaskRunId: parentAttempt?.taskRun.rootTaskRunId ?? parentAttempt?.taskRun.id, batchId: dependentBatchRun?.id ?? parentBatchRun?.id, resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), From 8d22cc7e28a98b2ed2b54e24bb753bfd11f860ac Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 4 Nov 2024 08:15:57 +0000 Subject: [PATCH 083/485] fix engine shutdown and redis errors in tests --- internal-packages/redis-worker/src/worker.ts | 8 +++----- internal-packages/run-engine/src/engine/index.ts | 12 ++++++++++-- internal-packages/run-engine/src/engine/locking.ts | 4 ++++ .../run-engine/src/run-queue/index.test.ts | 1 + internal-packages/run-engine/src/shared/index.ts | 2 +- 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index 91f85440f5..dcfc62b01b 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -308,9 +308,7 @@ class Worker { this.isShuttingDown = true; this.logger.log("Shutting down workers..."); - for (const worker of this.workers) { - worker.terminate(); - } + await Promise.all(this.workers.map((worker) => worker.terminate())); await this.subscriber.unsubscribe(); await this.subscriber.quit(); @@ -326,8 +324,8 @@ class Worker { } } - public stop() { - this.shutdown(); + public async stop() { + await this.shutdown(); } } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8f18ffea6e..8bee4bebbf 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1405,8 +1405,16 @@ export class RunEngine { async quit() { //stop the run queue - this.runQueue.quit(); - this.worker.stop(); + await this.runQueue.quit(); + await this.worker.stop(); + await this.runLock.quit(); + + try { + // This is just a failsafe + await this.redis.quit(); + } catch (error) { + // And should always throw + } } async #systemFailure({ diff --git a/internal-packages/run-engine/src/engine/locking.ts b/internal-packages/run-engine/src/engine/locking.ts index 11ee1a3e2d..cd3aecc7c6 100644 --- a/internal-packages/run-engine/src/engine/locking.ts +++ b/internal-packages/run-engine/src/engine/locking.ts @@ -53,4 +53,8 @@ export class RunLocker { getCurrentResources(): string | undefined { return this.asyncLocalStorage.getStore()?.resources; } + + async quit() { + await this.redlock.quit(); + } } diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 746344ba94..8ad1a95438 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -711,6 +711,7 @@ describe("RunQueue", () => { // Wait for the item to be redrived and processed await setTimeout(5_000); + await redisClient.quit(); //shouldn't be in the dlq now const dlqMembersAfter = await redis.zrange(dlqKey, 0, -1); diff --git a/internal-packages/run-engine/src/shared/index.ts b/internal-packages/run-engine/src/shared/index.ts index 6bd3e304e3..3790918eab 100644 --- a/internal-packages/run-engine/src/shared/index.ts +++ b/internal-packages/run-engine/src/shared/index.ts @@ -1,5 +1,5 @@ import { Attributes } from "@opentelemetry/api"; -import { Prisma } from "../../../database/src"; +import { Prisma } from "@trigger.dev/database"; export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ include: { project: true; organization: true; orgMember: true }; From 41d79badf36ef2f2acf57dcc16cb5ce88f4374ca Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 4 Nov 2024 08:17:34 +0000 Subject: [PATCH 084/485] move shared test setup to internal package --- .../run-engine/src/engine/index.test.ts | 130 +---------------- internal-packages/testcontainers/package.json | 1 + internal-packages/testcontainers/src/index.ts | 3 + internal-packages/testcontainers/src/setup.ts | 133 ++++++++++++++++++ 4 files changed, 142 insertions(+), 125 deletions(-) create mode 100644 internal-packages/testcontainers/src/setup.ts diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 87589f81a5..69ffbaec6f 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -1,10 +1,10 @@ -import { containerTest } from "@internal/testcontainers"; +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; import { trace } from "@opentelemetry/api"; -import { generateFriendlyId, sanitizeQueueName } from "@trigger.dev/core/v3/apps"; -import { PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; import { expect } from "vitest"; -import { AuthenticatedEnvironment } from "../shared/index.js"; -import { CURRENT_DEPLOYMENT_LABEL } from "./consts.js"; import { RunEngine } from "./index.js"; import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "./eventBus.js"; @@ -515,123 +515,3 @@ describe("RunEngine", () => { //todo delaying a run }); - -async function setupAuthenticatedEnvironment(prisma: PrismaClient, type: RuntimeEnvironmentType) { - // Your database setup logic here - const org = await prisma.organization.create({ - data: { - title: "Test Organization", - slug: "test-organization", - }, - }); - - const project = await prisma.project.create({ - data: { - name: "Test Project", - slug: "test-project", - externalRef: "proj_1234", - organizationId: org.id, - }, - }); - - const environment = await prisma.runtimeEnvironment.create({ - data: { - type, - slug: "slug", - projectId: project.id, - organizationId: org.id, - apiKey: "api_key", - pkApiKey: "pk_api_key", - shortcode: "short_code", - maximumConcurrencyLimit: 10, - }, - }); - - return await prisma.runtimeEnvironment.findUniqueOrThrow({ - where: { - id: environment.id, - }, - include: { - project: true, - organization: true, - orgMember: true, - }, - }); -} - -async function setupBackgroundWorker( - prisma: PrismaClient, - environment: AuthenticatedEnvironment, - taskIdentifier: string -) { - const worker = await prisma.backgroundWorker.create({ - data: { - friendlyId: generateFriendlyId("worker"), - contentHash: "hash", - projectId: environment.project.id, - runtimeEnvironmentId: environment.id, - version: "20241015.1", - metadata: {}, - }, - }); - - const task = await prisma.backgroundWorkerTask.create({ - data: { - friendlyId: generateFriendlyId("task"), - slug: taskIdentifier, - filePath: `/trigger/myTask.ts`, - exportName: "myTask", - workerId: worker.id, - runtimeEnvironmentId: environment.id, - projectId: environment.project.id, - }, - }); - - const queueName = sanitizeQueueName(`task/${taskIdentifier}`); - const taskQueue = await prisma.taskQueue.create({ - data: { - friendlyId: generateFriendlyId("queue"), - name: queueName, - concurrencyLimit: 10, - runtimeEnvironmentId: worker.runtimeEnvironmentId, - projectId: worker.projectId, - type: "VIRTUAL", - }, - }); - - if (environment.type !== "DEVELOPMENT") { - const deployment = await prisma.workerDeployment.create({ - data: { - friendlyId: generateFriendlyId("deployment"), - contentHash: worker.contentHash, - version: worker.version, - shortCode: "short_code", - imageReference: `trigger/${environment.project.externalRef}:${worker.version}.${environment.slug}`, - status: "DEPLOYED", - projectId: environment.project.id, - environmentId: environment.id, - workerId: worker.id, - }, - }); - - const promotion = await prisma.workerDeploymentPromotion.create({ - data: { - label: CURRENT_DEPLOYMENT_LABEL, - deploymentId: deployment.id, - environmentId: environment.id, - }, - }); - - return { - worker, - task, - deployment, - promotion, - }; - } - - return { - worker, - task, - }; -} diff --git a/internal-packages/testcontainers/package.json b/internal-packages/testcontainers/package.json index d64add0cab..1b629e7911 100644 --- a/internal-packages/testcontainers/package.json +++ b/internal-packages/testcontainers/package.json @@ -13,6 +13,7 @@ "devDependencies": { "@testcontainers/postgresql": "^10.13.1", "@testcontainers/redis": "^10.13.1", + "@trigger.dev/core": "workspace:*", "testcontainers": "^10.13.1", "tinyexec": "^0.3.0", "vitest": "^1.4.0" diff --git a/internal-packages/testcontainers/src/index.ts b/internal-packages/testcontainers/src/index.ts index 77e5f6294f..9932149f64 100644 --- a/internal-packages/testcontainers/src/index.ts +++ b/internal-packages/testcontainers/src/index.ts @@ -6,6 +6,9 @@ import { PrismaClient } from "@trigger.dev/database"; import { createPostgresContainer, createRedisContainer, createElectricContainer } from "./utils"; import { Network, type StartedNetwork, type StartedTestContainer } from "testcontainers"; +export { StartedRedisContainer }; +export * from "./setup"; + type NetworkContext = { network: StartedNetwork }; type PostgresContext = NetworkContext & { diff --git a/internal-packages/testcontainers/src/setup.ts b/internal-packages/testcontainers/src/setup.ts new file mode 100644 index 0000000000..c7a8ab0c6a --- /dev/null +++ b/internal-packages/testcontainers/src/setup.ts @@ -0,0 +1,133 @@ +import { + CURRENT_DEPLOYMENT_LABEL, + generateFriendlyId, + sanitizeQueueName, +} from "@trigger.dev/core/v3/apps"; +import { Prisma, PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; + +export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ + include: { project: true; organization: true; orgMember: true }; +}>; + +export async function setupAuthenticatedEnvironment( + prisma: PrismaClient, + type: RuntimeEnvironmentType +) { + // Your database setup logic here + const org = await prisma.organization.create({ + data: { + title: "Test Organization", + slug: "test-organization", + }, + }); + + const project = await prisma.project.create({ + data: { + name: "Test Project", + slug: "test-project", + externalRef: "proj_1234", + organizationId: org.id, + }, + }); + + const environment = await prisma.runtimeEnvironment.create({ + data: { + type, + slug: "slug", + projectId: project.id, + organizationId: org.id, + apiKey: "api_key", + pkApiKey: "pk_api_key", + shortcode: "short_code", + maximumConcurrencyLimit: 10, + }, + }); + + return await prisma.runtimeEnvironment.findUniqueOrThrow({ + where: { + id: environment.id, + }, + include: { + project: true, + organization: true, + orgMember: true, + }, + }); +} + +export async function setupBackgroundWorker( + prisma: PrismaClient, + environment: AuthenticatedEnvironment, + taskIdentifier: string +) { + const worker = await prisma.backgroundWorker.create({ + data: { + friendlyId: generateFriendlyId("worker"), + contentHash: "hash", + projectId: environment.project.id, + runtimeEnvironmentId: environment.id, + version: "20241015.1", + metadata: {}, + }, + }); + + const task = await prisma.backgroundWorkerTask.create({ + data: { + friendlyId: generateFriendlyId("task"), + slug: taskIdentifier, + filePath: `/trigger/myTask.ts`, + exportName: "myTask", + workerId: worker.id, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + }, + }); + + const queueName = sanitizeQueueName(`task/${taskIdentifier}`); + const taskQueue = await prisma.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit: 10, + runtimeEnvironmentId: worker.runtimeEnvironmentId, + projectId: worker.projectId, + type: "VIRTUAL", + }, + }); + + if (environment.type !== "DEVELOPMENT") { + const deployment = await prisma.workerDeployment.create({ + data: { + friendlyId: generateFriendlyId("deployment"), + contentHash: worker.contentHash, + version: worker.version, + shortCode: "short_code", + imageReference: `trigger/${environment.project.externalRef}:${worker.version}.${environment.slug}`, + status: "DEPLOYED", + projectId: environment.project.id, + environmentId: environment.id, + workerId: worker.id, + }, + }); + + const promotion = await prisma.workerDeploymentPromotion.create({ + data: { + label: CURRENT_DEPLOYMENT_LABEL, + deploymentId: deployment.id, + environmentId: environment.id, + }, + }); + + return { + worker, + task, + deployment, + promotion, + }; + } + + return { + worker, + task, + }; +} From 37e3f12b609aac53094d2755582b4e88f916fd4b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 4 Nov 2024 13:35:53 +0000 Subject: [PATCH 085/485] add run engine errors to should retry helper --- packages/core/src/v3/errors.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/core/src/v3/errors.ts b/packages/core/src/v3/errors.ts index 232bb8aa94..9578083140 100644 --- a/packages/core/src/v3/errors.ts +++ b/packages/core/src/v3/errors.ts @@ -168,6 +168,10 @@ export function shouldRetryError(error: TaskRunError): boolean { case "DISK_SPACE_EXCEEDED": case "TASK_RUN_HEARTBEAT_TIMEOUT": case "OUTDATED_SDK_VERSION": + // run engine errors + case "TASK_DEQUEUED_INVALID_STATE": + case "TASK_DEQUEUED_QUEUE_NOT_FOUND": + case "TASK_HAS_N0_EXECUTION_SNAPSHOT": return false; case "GRACEFUL_EXIT_TIMEOUT": From a659f9d4d52777a7052fe5af2d077631130e026a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 4 Nov 2024 13:43:04 +0000 Subject: [PATCH 086/485] add missing consts export --- packages/core/src/v3/apps/consts.ts | 1 + packages/core/src/v3/apps/index.ts | 1 + 2 files changed, 2 insertions(+) create mode 100644 packages/core/src/v3/apps/consts.ts diff --git a/packages/core/src/v3/apps/consts.ts b/packages/core/src/v3/apps/consts.ts new file mode 100644 index 0000000000..6f62cf064a --- /dev/null +++ b/packages/core/src/v3/apps/consts.ts @@ -0,0 +1 @@ +export const CURRENT_DEPLOYMENT_LABEL = "current"; diff --git a/packages/core/src/v3/apps/index.ts b/packages/core/src/v3/apps/index.ts index a2a1f47020..c4028cf4b2 100644 --- a/packages/core/src/v3/apps/index.ts +++ b/packages/core/src/v3/apps/index.ts @@ -9,3 +9,4 @@ export * from "./friendlyId.js"; export * from "./duration.js"; export * from "./maxDuration.js"; export * from "./queueName.js"; +export * from "./consts.js"; From c2555925bd4703df446ab6a34dfa4cc001ef7bd4 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 4 Nov 2024 13:48:30 +0000 Subject: [PATCH 087/485] Latest lockfile --- pnpm-lock.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9841777a83..7bdac754b9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1011,6 +1011,9 @@ importers: '@testcontainers/redis': specifier: ^10.13.1 version: 10.13.1 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core testcontainers: specifier: ^10.13.1 version: 10.13.1 From 7388b878dc86032148894dff1337a5e2d459682e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 4 Nov 2024 18:25:41 +0000 Subject: [PATCH 088/485] Cleaned up getting the latest snapshot. Started on attempt failures --- .../run-engine/src/engine/index.ts | 381 ++++++++++-------- 1 file changed, 205 insertions(+), 176 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8bee4bebbf..f77a1b3d34 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -9,7 +9,9 @@ import { sanitizeError, TaskRunExecution, TaskRunExecutionResult, + TaskRunFailedExecutionResult, TaskRunInternalError, + TaskRunSuccessfulExecutionResult, } from "@trigger.dev/core/v3"; import { generateFriendlyId, @@ -163,7 +165,7 @@ export class RunEngine { await this.#handleStalledSnapshot(payload); }, expireRun: async ({ payload }) => { - await this.expire({ runId: payload.runId }); + await this.expireRun({ runId: payload.runId }); }, }, }); @@ -463,11 +465,6 @@ export class RunEngine { try { const dequeuedRun = await this.runLock.lock([runId], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!snapshot) { - throw new Error( - `RunEngine.dequeueFromMasterQueue(): No snapshot found for run: ${runId}` - ); - } if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { //create a failed snapshot @@ -786,18 +783,6 @@ export class RunEngine { return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!latestSnapshot) { - await this.#systemFailure({ - runId, - error: { - type: "INTERNAL_ERROR", - code: "TASK_HAS_N0_EXECUTION_SNAPSHOT", - message: "Task had no execution snapshot when trying to create a run attempt", - }, - tx: prisma, - }); - throw new ServiceValidationError("No snapshot", 404); - } if (latestSnapshot.id !== snapshotId) { //if there is a big delay between the snapshot and the attempt, the snapshot might have changed @@ -1035,81 +1020,15 @@ export class RunEngine { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - }) { - return this.#trace("completeRunAttempt", { runId, snapshotId }, async (span) => { - return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); - if (!latestSnapshot) { - throw new Error(`No execution snapshot found for TaskRun ${runId}`); - } - - if (latestSnapshot.id !== snapshotId) { - throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); - } - - span.setAttribute("completionStatus", completion.ok); - - if (completion.ok) { - const completedAt = new Date(); - const run = await this.prisma.taskRun.update({ - where: { id: runId }, - data: { - status: "COMPLETED_SUCCESSFULLY", - completedAt, - output: completion.output, - outputType: completion.outputType, - executionSnapshots: { - create: { - executionStatus: "FINISHED", - description: "Task completed successfully", - runStatus: "COMPLETED_SUCCESSFULLY", - attemptNumber: latestSnapshot.attemptNumber, - }, - }, - }, - select: { - spanId: true, - associatedWaitpoint: { - select: { - id: true, - }, - }, - project: { - select: { - organizationId: true, - }, - }, - }, - }); - await this.runQueue.acknowledgeMessage(run.project.organizationId, runId); - - if (!run.associatedWaitpoint) { - throw new ServiceValidationError("No associated waitpoint found", 400); - } - - await this.completeWaitpoint({ - id: run.associatedWaitpoint.id, - output: completion.output - ? { value: completion.output, type: completion.outputType } - : undefined, - }); - - this.eventBus.emit("runCompletedSuccessfully", { - time: completedAt, - run: { - id: runId, - spanId: run.spanId, - output: completion.output, - outputType: completion.outputType, - }, - }); - } else { - const error = sanitizeError(completion.error); - //todo look at CompleteAttemptService - throw new NotImplementedError("TaskRun completion error handling not implemented yet"); - } - }); - }); + }): Promise<"COMPLETED" | "RETRIED"> { + switch (completion.ok) { + case true: { + return this.#completeRunAttemptSuccess({ runId, snapshotId, completion }); + } + case false: { + return this.#completeRunAttemptFailure({ runId, snapshotId, completion }); + } + } } async waitForDuration({ @@ -1133,9 +1052,6 @@ export class RunEngine { return await this.runLock.lock([runId], 5_000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!snapshot) { - throw new ServiceValidationError("Snapshot not found", 404); - } if (snapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1203,13 +1119,10 @@ export class RunEngine { }); } - async expire({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { + async expireRun({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { const prisma = tx ?? this.prisma; await this.runLock.lock([runId], 5_000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!snapshot) { - throw new Error(`No execution snapshot found for TaskRun ${runId}`); - } //if we're executing then we won't expire the run if (isExecuting(snapshot.executionStatus)) { @@ -1258,6 +1171,26 @@ export class RunEngine { }); } + async cancelRun({ + runId, + completedAt, + reason, + tx, + }: { + runId: string; + completedAt: Date; + reason: string; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + return this.#trace("cancelRun", { runId }, async (span) => { + return this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + }); + }); + } + /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ async completeWaitpoint({ @@ -1278,10 +1211,6 @@ export class RunEngine { throw new Error(`Waitpoint ${id} not found`); } - if (waitpoint.status === "COMPLETED") { - return; - } - await $transaction( this.prisma, async (tx) => { @@ -1292,7 +1221,9 @@ export class RunEngine { }); if (affectedTaskRuns.length === 0) { - throw new Error(`No TaskRunWaitpoints found for waitpoint ${id}`); + this.logger.warn(`No TaskRunWaitpoints found for waitpoint`, { + waitpoint, + }); } // 2. Delete the TaskRunWaitpoint entries for this specific waitpoint @@ -1315,9 +1246,6 @@ export class RunEngine { for (const run of affectedTaskRuns) { await this.runLock.lock([run.taskRunId], 5_000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, run.taskRunId); - if (!latestSnapshot) { - throw new Error(`No execution snapshot found for TaskRun ${run.taskRunId}`); - } await tx.taskRunExecutionSnapshot.update({ where: { id: latestSnapshot.id }, @@ -1371,36 +1299,40 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (!snapshot) { - return null; - } + try { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - const executionData: RunExecutionData = { - version: "1" as const, - snapshot: { - id: snapshot.id, - executionStatus: snapshot.executionStatus, - description: snapshot.description, - }, - run: { - id: snapshot.runId, - status: snapshot.runStatus, - attemptNumber: snapshot.attemptNumber ?? undefined, - }, - checkpoint: snapshot.checkpoint - ? { - id: snapshot.checkpoint.id, - type: snapshot.checkpoint.type, - location: snapshot.checkpoint.location, - imageRef: snapshot.checkpoint.imageRef, - reason: snapshot.checkpoint.reason ?? undefined, - } - : undefined, - completedWaitpoints: snapshot.completedWaitpoints, - }; + const executionData: RunExecutionData = { + version: "1" as const, + snapshot: { + id: snapshot.id, + executionStatus: snapshot.executionStatus, + description: snapshot.description, + }, + run: { + id: snapshot.runId, + status: snapshot.runStatus, + attemptNumber: snapshot.attemptNumber ?? undefined, + }, + checkpoint: snapshot.checkpoint + ? { + id: snapshot.checkpoint.id, + type: snapshot.checkpoint.type, + location: snapshot.checkpoint.location, + imageRef: snapshot.checkpoint.imageRef, + reason: snapshot.checkpoint.reason ?? undefined, + } + : undefined, + completedWaitpoints: snapshot.completedWaitpoints, + }; - return executionData; + return executionData; + } catch (e) { + this.logger.error("Failed to getRunExecutionData", { + message: e instanceof Error ? e.message : e, + }); + return null; + } } async quit() { @@ -1439,6 +1371,120 @@ export class RunEngine { async #waitingForDeploy({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + async #completeRunAttemptSuccess({ + runId, + snapshotId, + completion, + }: { + runId: string; + snapshotId: string; + completion: TaskRunSuccessfulExecutionResult; + }) { + return this.#trace("#completeRunAttemptSuccess", { runId, snapshotId }, async (span) => { + return this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + + if (latestSnapshot.id !== snapshotId) { + throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); + } + + span.setAttribute("completionStatus", completion.ok); + + const completedAt = new Date(); + const run = await this.prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "COMPLETED_SUCCESSFULLY", + completedAt, + output: completion.output, + outputType: completion.outputType, + executionSnapshots: { + create: { + executionStatus: "FINISHED", + description: "Task completed successfully", + runStatus: "COMPLETED_SUCCESSFULLY", + attemptNumber: latestSnapshot.attemptNumber, + }, + }, + }, + select: { + spanId: true, + associatedWaitpoint: { + select: { + id: true, + }, + }, + project: { + select: { + organizationId: true, + }, + }, + }, + }); + await this.runQueue.acknowledgeMessage(run.project.organizationId, runId); + + if (!run.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + await this.completeWaitpoint({ + id: run.associatedWaitpoint.id, + output: completion.output + ? { value: completion.output, type: completion.outputType } + : undefined, + }); + + this.eventBus.emit("runCompletedSuccessfully", { + time: completedAt, + run: { + id: runId, + spanId: run.spanId, + output: completion.output, + outputType: completion.outputType, + }, + }); + + return "COMPLETED" as const; + }); + }); + } + + async #completeRunAttemptFailure({ + runId, + snapshotId, + completion, + }: { + runId: string; + snapshotId: string; + completion: TaskRunFailedExecutionResult; + }): Promise<"COMPLETED" | "RETRIED"> { + return this.#trace("completeRunAttemptFailure", { runId, snapshotId }, async (span) => { + return this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + + if (latestSnapshot.id !== snapshotId) { + throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); + } + + span.setAttribute("completionStatus", completion.ok); + + if ( + completion.error.type === "INTERNAL_ERROR" && + completion.error.code === "TASK_RUN_CANCELLED" + ) { + // We need to cancel the task run instead of fail it + await this.cancelRun({ runId, completedAt: new Date(), reason: "Cancelled by user" }); + + return "COMPLETED" as const; + } + + const error = sanitizeError(completion.error); + //todo look at CompleteAttemptService + throw new NotImplementedError("TaskRun completion error handling not implemented yet"); + }); + }); + } + //MARK: RunQueue /** The run can be added to the queue. When it's pulled from the queue it will be executed. */ @@ -1495,9 +1541,6 @@ export class RunEngine { await this.runLock.lock([run.id], 5000, async (signal) => { const snapshot = await this.#getLatestExecutionSnapshot(prisma, run.id); - if (!snapshot) { - throw new Error(`RunEngine.#continueRun(): No snapshot found for run: ${run.id}`); - } const completedWaitpoints = await this.#getExecutionSnapshotCompletedWaitpoints( prisma, @@ -1595,11 +1638,6 @@ export class RunEngine { { orgId, runId, waitpoint }: { orgId: string; runId: string; waitpoint: Waitpoint } ) { await this.runLock.lock([runId], 5000, async (signal) => { - //todo it would be better if we didn't remove from the queue, because this removes the payload - //todo better would be to have a "block" function which remove it from the queue but doesn't remove the payload - //todo release concurrency and make sure the run isn't in the queue - // await this.runQueue.blockMessage(orgId, runId); - const taskWaitpoint = await tx.taskRunWaitpoint.create({ data: { taskRunId: runId, @@ -1610,29 +1648,27 @@ export class RunEngine { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, runId); - if (latestSnapshot) { - let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; - if ( - latestSnapshot.executionStatus === "EXECUTING" || - latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" - ) { - newStatus = "EXECUTING_WITH_WAITPOINTS"; - } + let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; + if ( + latestSnapshot.executionStatus === "EXECUTING" || + latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" + ) { + newStatus = "EXECUTING_WITH_WAITPOINTS"; + } - //if the state has changed, create a new snapshot - if (newStatus !== latestSnapshot.executionStatus) { - await this.#createExecutionSnapshot(tx, { - run: { - id: latestSnapshot.runId, - status: latestSnapshot.runStatus, - attemptNumber: latestSnapshot.attemptNumber, - }, - snapshot: { - executionStatus: newStatus, - description: "Run was blocked by a waitpoint.", - }, - }); - } + //if the state has changed, create a new snapshot + if (newStatus !== latestSnapshot.executionStatus) { + await this.#createExecutionSnapshot(tx, { + run: { + id: latestSnapshot.runId, + status: latestSnapshot.runStatus, + attemptNumber: latestSnapshot.attemptNumber, + }, + snapshot: { + executionStatus: newStatus, + description: "Run was blocked by a waitpoint.", + }, + }); } }); } @@ -1753,7 +1789,7 @@ export class RunEngine { }); if (!snapshot) { - return null; + throw new Error(`No execution snapshot found for TaskRun ${runId}`); } return { @@ -1829,7 +1865,7 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); - if (latestSnapshot?.id !== snapshotId) { + if (latestSnapshot.id !== snapshotId) { this.logger.log( "RunEngine.#extendHeartbeatTimeout() no longer the latest snapshot, stopping the heartbeat.", { @@ -1857,15 +1893,8 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) { const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); - if (!latestSnapshot) { - this.logger.error("RunEngine.#handleStalledSnapshot() no latest snapshot found", { - runId, - snapshotId, - }); - return; - } - if (latestSnapshot?.id !== snapshotId) { + if (latestSnapshot.id !== snapshotId) { this.logger.log( "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", { From c83d6bd1bd12f54b0788519fce2b336b4d5c34d6 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 12:53:07 +0000 Subject: [PATCH 089/485] Work on reattempting --- .../run-engine/src/engine/eventBus.ts | 15 +++ .../run-engine/src/engine/index.ts | 127 ++++++++++++++++-- 2 files changed, 128 insertions(+), 14 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index d07bd84f33..8e26c61941 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,4 +1,5 @@ import { TaskRunExecutionStatus } from "@trigger.dev/database"; +import { AuthenticatedEnvironment } from "../shared"; export type EventBusEvents = { runExpired: [ @@ -22,6 +23,20 @@ export type EventBusEvents = { }; }, ]; + runRetryScheduled: [ + { + time: Date; + run: { + id: string; + attemptNumber: number; + queue: string; + traceContext: Record; + taskIdentifier: string; + }; + environment: AuthenticatedEnvironment; + retryAt: Date; + }, + ]; executionSnapshotCreated: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index f77a1b3d34..63620cf548 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -7,6 +7,8 @@ import { parsePacket, QueueOptions, sanitizeError, + shouldRetryError, + taskRunErrorEnhancer, TaskRunExecution, TaskRunExecutionResult, TaskRunFailedExecutionResult, @@ -56,6 +58,8 @@ type Options = { machines: Record; baseCostInCents: number; }; + /** If not set then checkpoints won't ever be used */ + checkpointThresholdMs?: number; tracer: Tracer; }; @@ -409,9 +413,6 @@ export class RunEngine { } }); - //todo release parent concurrency (for the project, task, and environment, but not for the queue?) - //todo if this has been triggered with triggerAndWait or batchTriggerAndWait - return taskRun; } ); @@ -1023,10 +1024,10 @@ export class RunEngine { }): Promise<"COMPLETED" | "RETRIED"> { switch (completion.ok) { case true: { - return this.#completeRunAttemptSuccess({ runId, snapshotId, completion }); + return this.#attemptSucceeded({ runId, snapshotId, completion, tx: this.prisma }); } case false: { - return this.#completeRunAttemptFailure({ runId, snapshotId, completion }); + return this.#attemptFailed({ runId, snapshotId, completion, tx: this.prisma }); } } } @@ -1371,18 +1372,21 @@ export class RunEngine { async #waitingForDeploy({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} - async #completeRunAttemptSuccess({ + async #attemptSucceeded({ runId, snapshotId, completion, + tx, }: { runId: string; snapshotId: string; completion: TaskRunSuccessfulExecutionResult; + tx: PrismaClientOrTransaction; }) { + const prisma = tx ?? this.prisma; return this.#trace("#completeRunAttemptSuccess", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1391,7 +1395,7 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); const completedAt = new Date(); - const run = await this.prisma.taskRun.update({ + const run = await prisma.taskRun.update({ where: { id: runId }, data: { status: "COMPLETED_SUCCESSFULLY", @@ -1449,18 +1453,23 @@ export class RunEngine { }); } - async #completeRunAttemptFailure({ + async #attemptFailed({ runId, snapshotId, completion, + tx, }: { runId: string; snapshotId: string; completion: TaskRunFailedExecutionResult; + + tx: PrismaClientOrTransaction; }): Promise<"COMPLETED" | "RETRIED"> { + const prisma = this.prisma; + return this.#trace("completeRunAttemptFailure", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1468,19 +1477,105 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); + const failedAt = new Date(); + if ( completion.error.type === "INTERNAL_ERROR" && completion.error.code === "TASK_RUN_CANCELLED" ) { // We need to cancel the task run instead of fail it - await this.cancelRun({ runId, completedAt: new Date(), reason: "Cancelled by user" }); - + await this.cancelRun({ + runId, + completedAt: failedAt, + reason: "Cancelled by user", + tx: prisma, + }); return "COMPLETED" as const; } const error = sanitizeError(completion.error); - //todo look at CompleteAttemptService - throw new NotImplementedError("TaskRun completion error handling not implemented yet"); + const retriableError = shouldRetryError(taskRunErrorEnhancer(completion.error)); + + if ( + retriableError && + completion.retry !== undefined && + (latestSnapshot.attemptNumber === null || + latestSnapshot.attemptNumber < MAX_TASK_RUN_ATTEMPTS) + ) { + const retryAt = new Date(completion.retry.timestamp); + + const attemptNumber = + latestSnapshot.attemptNumber === null ? 1 : latestSnapshot.attemptNumber + 1; + + const run = await prisma.taskRun.update({ + where: { + id: runId, + }, + data: { + status: "RETRYING_AFTER_FAILURE", + attemptNumber, + }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + orgMember: true, + }, + }, + }, + }); + + this.eventBus.emit("runRetryScheduled", { + time: failedAt, + run: { + id: run.id, + attemptNumber, + queue: run.queue, + taskIdentifier: run.taskIdentifier, + traceContext: run.traceContext as Record, + }, + environment: run.runtimeEnvironment, + retryAt, + }); + + //todo anything special for DEV? Ideally not. + + //if it's a long delay and we support checkpointing, put it back in the queue + if ( + this.options.checkpointThresholdMs !== undefined && + completion.retry.delay >= this.options.checkpointThresholdMs + ) { + //long delay for retry, so requeue + await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "QUEUED", + description: "Attempt failed with a long delay, putting back into the queue.", + }, + }); + await this.runQueue.nackMessage( + run.runtimeEnvironment.organizationId, + runId, + retryAt.getTime() + ); + } else { + //it will continue running because the retry delay is short + await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "PENDING_EXECUTING", + description: "Attempt failed wth a short delay, starting a new attempt.", + }, + }); + await this.#sendRunChangedNotificationToWorker({ runId }); + } + + return "RETRIED" as const; + } + + //run permanently failed + //todo }); }); } @@ -1956,6 +2051,10 @@ export class RunEngine { //#endregion + async #sendRunChangedNotificationToWorker({ runId }: { runId: string }) { + //todo: implement + } + async #getAuthenticatedEnvironmentFromRun(runId: string, tx?: PrismaClientOrTransaction) { const prisma = tx ?? this.prisma; const taskRun = await prisma.taskRun.findUnique({ From e9062bec9aef0f44f7175ccf439dc772a9084d4b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 15:10:34 +0000 Subject: [PATCH 090/485] Added Waitpoint outputIsError column --- .../migration.sql | 2 ++ internal-packages/database/prisma/schema.prisma | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql b/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql new file mode 100644 index 0000000000..d994f02179 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "Waitpoint" ADD COLUMN "outputIsError" BOOLEAN NOT NULL DEFAULT false; diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 6a1ac393d2..ea485a8e08 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2004,8 +2004,9 @@ model Waitpoint { completedExecutionSnapshots TaskRunExecutionSnapshot[] @relation("completedWaitpoints") /// When completed, an output can be stored here - output String? - outputType String @default("application/json") + output String? + outputType String @default("application/json") + outputIsError Boolean @default(false) project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String From d01b888b3f60dbed558fe022d463793ba64fbeb9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 15:10:58 +0000 Subject: [PATCH 091/485] Work on dealing with attempts that fail --- .../run-engine/src/engine/errors.ts | 50 +++++++++++ .../run-engine/src/engine/eventBus.ts | 16 +++- .../run-engine/src/engine/index.test.ts | 6 +- .../run-engine/src/engine/index.ts | 90 +++++++++++++++---- .../run-engine/src/engine/messages.ts | 1 + 5 files changed, 140 insertions(+), 23 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/errors.ts diff --git a/internal-packages/run-engine/src/engine/errors.ts b/internal-packages/run-engine/src/engine/errors.ts new file mode 100644 index 0000000000..bdf5077ee0 --- /dev/null +++ b/internal-packages/run-engine/src/engine/errors.ts @@ -0,0 +1,50 @@ +import { assertExhaustive } from "@trigger.dev/core"; +import { TaskRunError } from "@trigger.dev/core/v3"; +import { TaskRunStatus } from "@trigger.dev/database"; + +export function runStatusFromError(error: TaskRunError): TaskRunStatus { + if (error.type !== "INTERNAL_ERROR") { + return "COMPLETED_WITH_ERRORS"; + } + + //"CRASHED" should be used if it's a user-error or something they've misconfigured + //e.g. not enough memory + //"SYSTEM_FAILURE" should be used if it's an error from our system + //e.g. a bug + switch (error.code) { + case "TASK_RUN_CANCELLED": + return "CANCELED"; + case "MAX_DURATION_EXCEEDED": + return "TIMED_OUT"; + case "TASK_PROCESS_OOM_KILLED": + case "TASK_PROCESS_MAYBE_OOM_KILLED": + case "TASK_PROCESS_SIGSEGV": + case "DISK_SPACE_EXCEEDED": + case "OUTDATED_SDK_VERSION": + case "HANDLE_ERROR_ERROR": + case "TASK_RUN_CRASHED": + case "TASK_PROCESS_EXITED_WITH_NON_ZERO_CODE": + return "CRASHED"; + case "COULD_NOT_FIND_EXECUTOR": + case "COULD_NOT_FIND_TASK": + case "COULD_NOT_IMPORT_TASK": + case "CONFIGURED_INCORRECTLY": + case "TASK_ALREADY_RUNNING": + case "TASK_PROCESS_SIGKILL_TIMEOUT": + case "TASK_RUN_HEARTBEAT_TIMEOUT": + case "TASK_DEQUEUED_INVALID_STATE": + case "TASK_DEQUEUED_QUEUE_NOT_FOUND": + case "TASK_HAS_N0_EXECUTION_SNAPSHOT": + case "GRACEFUL_EXIT_TIMEOUT": + case "TASK_INPUT_ERROR": + case "TASK_OUTPUT_ERROR": + case "POD_EVICTED": + case "POD_UNKNOWN_ERROR": + case "TASK_EXECUTION_ABORTED": + case "TASK_EXECUTION_FAILED": + case "TASK_PROCESS_SIGTERM": + return "SYSTEM_FAILURE"; + default: + assertExhaustive(error.code); + } +} diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 8e26c61941..460ef9617d 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,5 +1,6 @@ -import { TaskRunExecutionStatus } from "@trigger.dev/database"; +import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; import { AuthenticatedEnvironment } from "../shared"; +import { TaskRunError } from "@trigger.dev/core/v3"; export type EventBusEvents = { runExpired: [ @@ -12,7 +13,7 @@ export type EventBusEvents = { }; }, ]; - runCompletedSuccessfully: [ + runSucceeded: [ { time: Date; run: { @@ -23,6 +24,17 @@ export type EventBusEvents = { }; }, ]; + runFailed: [ + { + time: Date; + run: { + id: string; + status: TaskRunStatus; + spanId: string; + error: TaskRunError; + }; + }, + ]; runRetryScheduled: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 69ffbaec6f..7d8d9a070d 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -251,8 +251,8 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - let event: EventBusEventArgs<"runCompletedSuccessfully">[0] | undefined = undefined; - engine.eventBus.on("runCompletedSuccessfully", (result) => { + let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + engine.eventBus.on("runSucceeded", (result) => { event = result; }); @@ -269,7 +269,7 @@ describe("RunEngine", () => { //event assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runCompletedSuccessfully">[0]; + const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; expect(completedEvent.run.spanId).toBe(childRun.spanId); expect(completedEvent.run.output).toBe('{"foo":"bar"}'); expect(completedEvent.run.outputType).toBe("application/json"); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 63620cf548..303de81549 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -8,6 +8,7 @@ import { QueueOptions, sanitizeError, shouldRetryError, + TaskRunError, taskRunErrorEnhancer, TaskRunExecution, TaskRunExecutionResult, @@ -46,6 +47,7 @@ import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { DequeuedMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; +import { runStatusFromError } from "./errors"; type Options = { redis: RedisOptions; @@ -1147,16 +1149,18 @@ export class RunEngine { return; } + const error: TaskRunError = { + type: "STRING_ERROR", + raw: `Run expired because the TTL (${run.ttl}) was reached`, + }; + const updatedRun = await prisma.taskRun.update({ where: { id: runId }, data: { status: "EXPIRED", completedAt: new Date(), expiredAt: new Date(), - error: { - type: "STRING_ERROR", - raw: `Run expired because the TTL (${run.ttl}) was reached`, - }, + error, executionSnapshots: { create: { engine: "V2", @@ -1166,6 +1170,18 @@ export class RunEngine { }, }, }, + include: { + associatedWaitpoint: true, + }, + }); + + if (!updatedRun.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + await this.completeWaitpoint({ + id: updatedRun.associatedWaitpoint.id, + output: { value: JSON.stringify(error), isError: true }, }); this.eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); @@ -1188,6 +1204,10 @@ export class RunEngine { return this.#trace("cancelRun", { runId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + + //todo + //ack + //complete waitpoint }); }); } @@ -1202,6 +1222,7 @@ export class RunEngine { output?: { value: string; type?: string; + isError: boolean; }; }) { const waitpoint = await this.prisma.waitpoint.findUnique({ @@ -1240,6 +1261,7 @@ export class RunEngine { completedAt: new Date(), output: output?.value, outputType: output?.type, + outputIsError: output?.isError, }, }); @@ -1434,11 +1456,11 @@ export class RunEngine { await this.completeWaitpoint({ id: run.associatedWaitpoint.id, output: completion.output - ? { value: completion.output, type: completion.outputType } + ? { value: completion.output, type: completion.outputType, isError: false } : undefined, }); - this.eventBus.emit("runCompletedSuccessfully", { + this.eventBus.emit("runSucceeded", { time: completedAt, run: { id: runId, @@ -1462,7 +1484,6 @@ export class RunEngine { runId: string; snapshotId: string; completion: TaskRunFailedExecutionResult; - tx: PrismaClientOrTransaction; }): Promise<"COMPLETED" | "RETRIED"> { const prisma = this.prisma; @@ -1547,18 +1568,12 @@ export class RunEngine { completion.retry.delay >= this.options.checkpointThresholdMs ) { //long delay for retry, so requeue - await this.#createExecutionSnapshot(prisma, { + await this.#enqueueRun({ run, - snapshot: { - executionStatus: "QUEUED", - description: "Attempt failed with a long delay, putting back into the queue.", - }, + env: run.runtimeEnvironment, + timestamp: retryAt.getTime(), + tx: prisma, }); - await this.runQueue.nackMessage( - run.runtimeEnvironment.organizationId, - runId, - retryAt.getTime() - ); } else { //it will continue running because the retry delay is short await this.#createExecutionSnapshot(prisma, { @@ -1574,8 +1589,46 @@ export class RunEngine { return "RETRIED" as const; } + const status = runStatusFromError(completion.error); + //run permanently failed - //todo + const run = await prisma.taskRun.update({ + where: { + id: runId, + }, + data: { + status, + completedAt: failedAt, + error, + }, + include: { + runtimeEnvironment: true, + associatedWaitpoint: true, + }, + }); + + this.eventBus.emit("runFailed", { + time: failedAt, + run: { + id: runId, + status: run.status, + spanId: run.spanId, + error, + }, + }); + + if (!run.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + await this.completeWaitpoint({ + id: run.associatedWaitpoint.id, + output: { value: JSON.stringify(error), isError: true }, + }); + + await this.runQueue.acknowledgeMessage(run.runtimeEnvironment.organizationId, runId); + + return "COMPLETED" as const; }); }); } @@ -1899,6 +1952,7 @@ export class RunEngine { completedAfter: w.completedAfter ?? undefined, output: w.output ?? undefined, outputType: w.outputType, + outputIsError: w.outputIsError, })), }; } diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 8724f5fa19..47ee64d72b 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -14,6 +14,7 @@ const CompletedWaitpoint = z.object({ completedAfter: z.coerce.date().optional(), output: z.string().optional(), outputType: z.string().optional(), + outputIsError: z.boolean(), }); /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ From 3194257cff1da56799356aa731b5392a584809dd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:43:36 +0000 Subject: [PATCH 092/485] runs triggered via run engine should be V2 --- internal-packages/run-engine/src/engine/index.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 303de81549..9a99db0204 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -240,6 +240,7 @@ export class RunEngine { //create run const taskRun = await prisma.taskRun.create({ data: { + engine: "V2", status, number, friendlyId, From c66d3fe20d18e802b7a1e58c19771f8470876590 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:45:36 +0000 Subject: [PATCH 093/485] current unmanaged deployment label --- apps/webapp/app/consts.ts | 1 + .../app/v3/models/workerDeployment.server.ts | 28 +++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/apps/webapp/app/consts.ts b/apps/webapp/app/consts.ts index 8b27c3c955..84f56d1bd4 100644 --- a/apps/webapp/app/consts.ts +++ b/apps/webapp/app/consts.ts @@ -1,6 +1,7 @@ export const LIVE_ENVIRONMENT = "live"; export const DEV_ENVIRONMENT = "development"; export const CURRENT_DEPLOYMENT_LABEL = "current"; +export const CURRENT_UNMANAGED_DEPLOYMENT_LABEL = "current-unmanaged"; export const MAX_LIVE_PROJECTS = 1; export const DEFAULT_MAX_CONCURRENT_RUNS = 10; export const MAX_CONCURRENT_RUNS_LIMIT = 20; diff --git a/apps/webapp/app/v3/models/workerDeployment.server.ts b/apps/webapp/app/v3/models/workerDeployment.server.ts index 49595dab7e..0ea330a1a4 100644 --- a/apps/webapp/app/v3/models/workerDeployment.server.ts +++ b/apps/webapp/app/v3/models/workerDeployment.server.ts @@ -1,6 +1,6 @@ import type { Prettify } from "@trigger.dev/core"; import { BackgroundWorker } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; +import { CURRENT_DEPLOYMENT_LABEL, CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; import { Prisma, prisma } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; @@ -19,13 +19,14 @@ type WorkerDeploymentWithWorkerTasks = Prisma.WorkerDeploymentGetPayload<{ }>; export async function findCurrentWorkerDeployment( - environmentId: string + environmentId: string, + label = CURRENT_DEPLOYMENT_LABEL ): Promise { const promotion = await prisma.workerDeploymentPromotion.findUnique({ where: { environmentId_label: { environmentId, - label: CURRENT_DEPLOYMENT_LABEL, + label, }, }, include: { @@ -44,8 +45,15 @@ export async function findCurrentWorkerDeployment( return promotion?.deployment; } +export async function findCurrentUnmanagedWorkerDeployment( + environmentId: string +): Promise { + return await findCurrentWorkerDeployment(environmentId, CURRENT_UNMANAGED_DEPLOYMENT_LABEL); +} + export async function findCurrentWorkerFromEnvironment( - environment: Pick + environment: Pick, + label = CURRENT_DEPLOYMENT_LABEL ): Promise { if (environment.type === "DEVELOPMENT") { const latestDevWorker = await prisma.backgroundWorker.findFirst({ @@ -58,11 +66,21 @@ export async function findCurrentWorkerFromEnvironment( }); return latestDevWorker; } else { - const deployment = await findCurrentWorkerDeployment(environment.id); + const deployment = await findCurrentWorkerDeployment(environment.id, label); return deployment?.worker ?? null; } } +export async function findCurrentUnmanagedWorkerFromEnvironment( + environment: Pick +): Promise { + if (environment.type === "DEVELOPMENT") { + return null; + } + + return await findCurrentWorkerFromEnvironment(environment, CURRENT_UNMANAGED_DEPLOYMENT_LABEL); +} + export async function getWorkerDeploymentFromWorker( workerId: string ): Promise { From 79d4432d825c9e0abd126d50ba4a4c94db4455b1 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 17:25:12 +0000 Subject: [PATCH 094/485] schema changes --- .../database/prisma/schema.prisma | 105 +++++++++++++++++- 1 file changed, 100 insertions(+), 5 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index ea485a8e08..6794654a67 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -148,6 +148,8 @@ model Organization { integrations Integration[] sources TriggerSource[] organizationIntegrations OrganizationIntegration[] + workerGroups WorkerInstanceGroup[] + workerInstances WorkerInstance[] } model ExternalAccount { @@ -421,6 +423,7 @@ model RuntimeEnvironment { taskRunNumberCounter TaskRunNumberCounter[] taskRunCheckpoints TaskRunCheckpoint[] waitpoints Waitpoint[] + workerInstance WorkerInstance[] @@unique([projectId, slug, orgMemberId]) @@unique([projectId, shortcode]) @@ -451,6 +454,12 @@ model Project { builderProjectId String? + workerGroups WorkerInstanceGroup[] + workers WorkerInstance[] + + defaultWorkerGroup WorkerInstanceGroup? @relation("ProjectDefaultWorkerGroup", fields: [defaultWorkerGroupId], references: [id]) + defaultWorkerGroupId String? @unique + environments RuntimeEnvironment[] endpoints Endpoint[] jobs Job[] @@ -1585,6 +1594,9 @@ model BackgroundWorker { deployment WorkerDeployment? + workerGroup WorkerInstanceGroup? @relation(fields: [workerGroupId], references: [id], onDelete: SetNull, onUpdate: Cascade) + workerGroupId String? + supportsLazyAttempts Boolean @default(false) @@unique([projectId, runtimeEnvironmentId, version]) @@ -1917,9 +1929,15 @@ model TaskRunExecutionSnapshot { checkpointId String? checkpoint TaskRunCheckpoint? @relation(fields: [checkpointId], references: [id]) + /// Worker + workerId String? + worker WorkerInstance? @relation(fields: [workerId], references: [id]) + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + lastHeartbeatAt DateTime? + /// Used to get the latest valid snapshot quickly @@index([runId, isValid, createdAt(sort: Desc)]) } @@ -2051,14 +2069,90 @@ model TaskRunWaitpoint { @@index([waitpointId]) } -model Worker { +model FeatureFlag { id String @id @default(cuid()) + + key String @unique + value Json? +} + +model WorkerInstance { + id String @id @default(cuid()) + name String + + workerGroup WorkerInstanceGroup @relation(fields: [workerGroupId], references: [id]) + workerGroupId String + + TaskRunExecutionSnapshot TaskRunExecutionSnapshot[] + + organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade, onUpdate: Cascade) + organizationId String? + + project Project? @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) + projectId String? + + environment RuntimeEnvironment? @relation(fields: [environmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) + environmentId String? + + deployment WorkerDeployment? @relation(fields: [deploymentId], references: [id], onDelete: SetNull, onUpdate: Cascade) + deploymentId String? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + lastPullAt DateTime? + lastHeartbeatAt DateTime? + + @@unique([workerGroupId, name]) +} + +enum WorkerInstanceGroupType { + SHARED + UNMANAGED +} + +model WorkerInstanceGroup { + id String @id @default(cuid()) + type WorkerInstanceGroupType + + /// For example "us-east-1" + name String + + /// If managed, it will default to the name, e.g. "us-east-1" + /// If unmanged, it will be prefixed with the project ID e.g. "project_1-us-east-1" + masterQueue String @unique + + description String? + + token WorkerGroupToken @relation(fields: [tokenId], references: [id], onDelete: Cascade, onUpdate: Cascade) + tokenId String @unique + + workers WorkerInstance[] + backgroundWorkers BackgroundWorker[] + + defaultForProject Project? @relation("ProjectDefaultWorkerGroup") + + organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade, onUpdate: Cascade) + organizationId String? + + project Project? @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) + projectId String? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@map("WorkerGroup") } -model WorkerGroup { +model WorkerGroupToken { id String @id @default(cuid()) - masterQueue String + tokenHash String @unique + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + workerGroup WorkerInstanceGroup? } model TaskRunTag { @@ -2532,8 +2626,9 @@ model WorkerDeployment { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - promotions WorkerDeploymentPromotion[] - alerts ProjectAlert[] + promotions WorkerDeploymentPromotion[] + alerts ProjectAlert[] + workerInstance WorkerInstance[] @@unique([projectId, shortCode]) @@unique([environmentId, version]) From 80893970d370fd1f99683697c7594c9fabb7f9be Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 17:26:42 +0000 Subject: [PATCH 095/485] run engine base service extension --- apps/webapp/app/v3/services/baseService.server.ts | 15 +++++++++++++++ .../app/v3/services/triggerTaskV2.server.ts | 13 ++----------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/apps/webapp/app/v3/services/baseService.server.ts b/apps/webapp/app/v3/services/baseService.server.ts index 4e7c79d46e..2b5396c8be 100644 --- a/apps/webapp/app/v3/services/baseService.server.ts +++ b/apps/webapp/app/v3/services/baseService.server.ts @@ -2,6 +2,7 @@ import { Span, SpanKind } from "@opentelemetry/api"; import { PrismaClientOrTransaction, prisma } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { attributesFromAuthenticatedEnv, tracer } from "../tracer.server"; +import { engine, RunEngine } from "../runEngine.server"; export abstract class BaseService { constructor(protected readonly _prisma: PrismaClientOrTransaction = prisma) {} @@ -37,6 +38,20 @@ export abstract class BaseService { } } +export type WithRunEngineOptions = T & { + prisma?: PrismaClientOrTransaction; + engine?: RunEngine; +}; + +export class WithRunEngine extends BaseService { + protected readonly _engine: RunEngine; + + constructor(opts: { prisma?: PrismaClientOrTransaction; engine?: RunEngine }) { + super(opts.prisma); + this._engine = opts.engine ?? engine; + } +} + export class ServiceValidationError extends Error { constructor(message: string, public status?: number) { super(message); diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 4d8fb4c4f9..a3db88f2c5 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -14,14 +14,12 @@ import { generateFriendlyId } from "../friendlyIdentifiers"; import { uploadToObjectStore } from "../r2.server"; import { startActiveSpan } from "../tracer.server"; import { getEntitlement } from "~/services/platform.v3.server"; -import { BaseService, ServiceValidationError } from "./baseService.server"; +import { ServiceValidationError, WithRunEngine } from "./baseService.server"; import { logger } from "~/services/logger.server"; import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { handleMetadataPacket } from "~/utils/packets"; -import type { PrismaClientOrTransaction } from "~/db.server"; -import { engine, type RunEngine } from "../runEngine.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -39,14 +37,7 @@ export class OutOfEntitlementError extends Error { } } -export class TriggerTaskServiceV2 extends BaseService { - private _engine: RunEngine; - - constructor({ prisma, runEngine }: { prisma: PrismaClientOrTransaction; runEngine: RunEngine }) { - super(prisma); - this._engine = runEngine ?? engine; - } - +export class TriggerTaskServiceV2 extends WithRunEngine { public async call({ taskId, environment, From 2dab2ea0c935d8c221ec50971b3d0cf49ffd81c7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 17:32:58 +0000 Subject: [PATCH 096/485] Improved the triggerAndWait test to start the parentRun before triggering the child --- .../run-engine/src/engine/index.test.ts | 496 ++++++++++-------- .../run-engine/src/engine/index.ts | 2 +- internal-packages/testcontainers/src/setup.ts | 65 ++- 3 files changed, 319 insertions(+), 244 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 7d8d9a070d..219e064600 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -15,139 +15,8 @@ function assertNonNullable(value: T): asserts value is NonNullable { } describe("RunEngine", () => { - containerTest("Trigger a simple run", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); - - //check it's actually in the db - const runFromDb = await prisma.taskRun.findUnique({ - where: { - friendlyId: "run_1234", - }, - }); - expect(runFromDb).toBeDefined(); - expect(runFromDb?.id).toBe(run.id); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("QUEUED"); - - //check the waitpoint is created - const runWaitpoint = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpoint.length).toBe(1); - expect(runWaitpoint[0].type).toBe("RUN"); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); - expect(queueLength).toBe(1); - - //concurrency before - const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyBefore).toBe(0); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - expect(dequeued.length).toBe(1); - expect(dequeued[0].run.id).toBe(run.id); - expect(dequeued[0].run.attemptNumber).toBe(1); - - const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyAfter).toBe(1); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData2.run.attemptNumber).toBe(1); - expect(executionData2.run.status).toBe("EXECUTING"); - } finally { - engine.quit(); - } - }); - containerTest( - "triggerAndWait (not executing)", + "Single run (trigger to success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment @@ -185,13 +54,17 @@ describe("RunEngine", () => { const taskIdentifier = "test-task"; //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); //trigger the run - const parentRun = await engine.trigger( + const run = await engine.trigger( { number: 1, - friendlyId: "run_p1234", + friendlyId: "run_1234", environment: authenticatedEnvironment, taskIdentifier, payload: "{}", @@ -207,112 +80,301 @@ describe("RunEngine", () => { }, prisma ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); - const childRun = await engine.trigger( - { - number: 1, - friendlyId: "run_c1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", }, - prisma - ); - - const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); - assertNonNullable(childExecutionData); - expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); + }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); - const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentExecutionData); - expect(parentExecutionData.snapshot.executionStatus).toBe("BLOCKED_BY_WAITPOINTS"); + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); - //check the waitpoint blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, + completedByTaskRunId: run.id, }, }); - assertNonNullable(runWaitpoint); - expect(runWaitpoint.waitpoint.type).toBe("RUN"); - expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); - let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; - engine.eventBus.on("runSucceeded", (result) => { - event = result; + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue( + authenticatedEnvironment, + run.queue + ); + expect(queueLength).toBe(1); + + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, }); + expect(dequeued.length).toBe(1); + expect(dequeued[0].run.id).toBe(run.id); + expect(dequeued[0].run.attemptNumber).toBe(1); - await engine.completeRunAttempt({ - runId: childRun.id, - snapshotId: childExecutionData.snapshot.id, + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("EXECUTING"); + + //complete the run + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, completion: { - id: childRun.id, ok: true, - output: '{"foo":"bar"}', + id: dequeued[0].run.id, + output: `{ "foo": "bar" }`, outputType: "application/json", }, }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); - //event - assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; - expect(completedEvent.run.spanId).toBe(childRun.spanId); - expect(completedEvent.run.output).toBe('{"foo":"bar"}'); - expect(completedEvent.run.outputType).toBe("application/json"); - - //child snapshot - const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); - assertNonNullable(childExecutionDataAfter); - expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); - - const waitpointAfter = await prisma.waitpoint.findFirst({ - where: { - id: runWaitpoint.waitpointId, - }, - }); - expect(waitpointAfter?.completedAt).not.toBeNull(); - expect(waitpointAfter?.status).toBe("COMPLETED"); - - const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, + completedByTaskRunId: run.id, }, }); - expect(runWaitpointAfter).toBeNull(); - - //parent snapshot - const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentExecutionDataAfter); - expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("QUEUED"); - expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); - expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); - expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( - childRun.id - ); - expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + expect(runWaitpointAfter[0].output).toBe(`{ "foo": "bar" }`); } finally { engine.quit(); } } ); + containerTest("triggerAndWait", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue parent + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + expect(dequeued.length).toBe(1); + expect(dequeued[0].run.id).toBe(parentRun.id); + expect(dequeued[0].run.attemptNumber).toBe(1); + + //create an attempt + const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(initialExecutionData); + const attemptResult = await engine.startRunAttempt({ + runId: parentRun.id, + snapshotId: initialExecutionData.snapshot.id, + }); + expect(attemptResult.run.id).toBe(parentRun.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); + + const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionData); + expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); + + const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionData); + expect(parentExecutionData.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + assertNonNullable(runWaitpoint); + expect(runWaitpoint.waitpoint.type).toBe("RUN"); + expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + + let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + engine.eventBus.on("runSucceeded", (result) => { + event = result; + }); + + await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childExecutionData.snapshot.id, + completion: { + id: childRun.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); + + //event + assertNonNullable(event); + const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; + expect(completedEvent.run.spanId).toBe(childRun.spanId); + expect(completedEvent.run.output).toBe('{"foo":"bar"}'); + expect(completedEvent.run.outputType).toBe("application/json"); + + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + + const waitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: runWaitpoint.waitpointId, + }, + }); + expect(waitpointAfter?.completedAt).not.toBeNull(); + expect(waitpointAfter?.status).toBe("COMPLETED"); + expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); + + const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointAfter).toBeNull(); + + //parent snapshot + const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionDataAfter); + expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); + expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); + expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + childRun.id + ); + expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); + } finally { + engine.quit(); + } + }); + containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -404,7 +466,7 @@ describe("RunEngine", () => { await setTimeout(1_500); const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); - expect(executionDataAfter?.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); } finally { engine.quit(); } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 9a99db0204..d98ee3fb7c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1701,7 +1701,7 @@ export class RunEngine { const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: run, snapshot: { - executionStatus: "PENDING_EXECUTING", + executionStatus: "EXECUTING", description: "Run was continued, whilst still executing.", }, completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), diff --git a/internal-packages/testcontainers/src/setup.ts b/internal-packages/testcontainers/src/setup.ts index c7a8ab0c6a..f9e2cf5bae 100644 --- a/internal-packages/testcontainers/src/setup.ts +++ b/internal-packages/testcontainers/src/setup.ts @@ -3,7 +3,12 @@ import { generateFriendlyId, sanitizeQueueName, } from "@trigger.dev/core/v3/apps"; -import { Prisma, PrismaClient, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { + BackgroundWorkerTask, + Prisma, + PrismaClient, + RuntimeEnvironmentType, +} from "@trigger.dev/database"; export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ include: { project: true; organization: true; orgMember: true }; @@ -58,7 +63,7 @@ export async function setupAuthenticatedEnvironment( export async function setupBackgroundWorker( prisma: PrismaClient, environment: AuthenticatedEnvironment, - taskIdentifier: string + taskIdentifier: string | string[] ) { const worker = await prisma.backgroundWorker.create({ data: { @@ -71,29 +76,37 @@ export async function setupBackgroundWorker( }, }); - const task = await prisma.backgroundWorkerTask.create({ - data: { - friendlyId: generateFriendlyId("task"), - slug: taskIdentifier, - filePath: `/trigger/myTask.ts`, - exportName: "myTask", - workerId: worker.id, - runtimeEnvironmentId: environment.id, - projectId: environment.project.id, - }, - }); + const taskIdentifiers = Array.isArray(taskIdentifier) ? taskIdentifier : [taskIdentifier]; - const queueName = sanitizeQueueName(`task/${taskIdentifier}`); - const taskQueue = await prisma.taskQueue.create({ - data: { - friendlyId: generateFriendlyId("queue"), - name: queueName, - concurrencyLimit: 10, - runtimeEnvironmentId: worker.runtimeEnvironmentId, - projectId: worker.projectId, - type: "VIRTUAL", - }, - }); + const tasks: BackgroundWorkerTask[] = []; + + for (const identifier of taskIdentifiers) { + const task = await prisma.backgroundWorkerTask.create({ + data: { + friendlyId: generateFriendlyId("task"), + slug: identifier, + filePath: `/trigger/${identifier}.ts`, + exportName: identifier, + workerId: worker.id, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + }, + }); + + tasks.push(task); + + const queueName = sanitizeQueueName(`task/${identifier}`); + const taskQueue = await prisma.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit: 10, + runtimeEnvironmentId: worker.runtimeEnvironmentId, + projectId: worker.projectId, + type: "VIRTUAL", + }, + }); + } if (environment.type !== "DEVELOPMENT") { const deployment = await prisma.workerDeployment.create({ @@ -120,7 +133,7 @@ export async function setupBackgroundWorker( return { worker, - task, + tasks, deployment, promotion, }; @@ -128,6 +141,6 @@ export async function setupBackgroundWorker( return { worker, - task, + tasks, }; } From 8c3cabf9c8db0dadf8b1d3975fcb6807b0ed8cea Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 17:44:14 +0000 Subject: [PATCH 097/485] =?UTF-8?q?Simplified=20the=20tests=20so=20there?= =?UTF-8?q?=E2=80=99s=20less=20repeated=20checks,=20keep=20them=20more=20f?= =?UTF-8?q?ocused?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../run-engine/src/engine/index.test.ts | 156 +++++++++++++++--- 1 file changed, 134 insertions(+), 22 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 219e064600..3ba77aba9c 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -148,7 +148,139 @@ describe("RunEngine", () => { expect(executionData2.run.attemptNumber).toBe(1); expect(executionData2.run.status).toBe("EXECUTING"); + let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + engine.eventBus.on("runSucceeded", (result) => { + event = result; + }); + //complete the run + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: true, + id: dequeued[0].run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); + + //event + assertNonNullable(event); + const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; + expect(completedEvent.run.spanId).toBe(run.spanId); + expect(completedEvent.run.output).toBe('{"foo":"bar"}'); + expect(completedEvent.run.outputType).toBe("application/json"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); + } finally { + engine.quit(); + } + } + ); + + containerTest( + "Single run (trigger to user-failure)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, + }); + + //fail the attempt const result = await engine.completeRunAttempt({ runId: dequeued[0].run.id, snapshotId: attemptResult.snapshot.id, @@ -255,9 +387,6 @@ describe("RunEngine", () => { masterQueue: parentRun.masterQueue, maxRunCount: 10, }); - expect(dequeued.length).toBe(1); - expect(dequeued[0].run.id).toBe(parentRun.id); - expect(dequeued[0].run.attemptNumber).toBe(1); //create an attempt const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); @@ -266,9 +395,6 @@ describe("RunEngine", () => { runId: parentRun.id, snapshotId: initialExecutionData.snapshot.id, }); - expect(attemptResult.run.id).toBe(parentRun.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); const childRun = await engine.trigger( { @@ -313,11 +439,7 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; - engine.eventBus.on("runSucceeded", (result) => { - event = result; - }); - + // complete the child run await engine.completeRunAttempt({ runId: childRun.id, snapshotId: childExecutionData.snapshot.id, @@ -329,13 +451,6 @@ describe("RunEngine", () => { }, }); - //event - assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; - expect(completedEvent.run.spanId).toBe(childRun.spanId); - expect(completedEvent.run.output).toBe('{"foo":"bar"}'); - expect(completedEvent.run.outputType).toBe("application/json"); - //child snapshot const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); assertNonNullable(childExecutionDataAfter); @@ -440,7 +555,6 @@ describe("RunEngine", () => { masterQueue: run.masterQueue, maxRunCount: 10, }); - expect(dequeued.length).toBe(1); //create an attempt const attemptResult = await engine.startRunAttempt({ @@ -483,7 +597,7 @@ describe("RunEngine", () => { //todo cancelling a run //todo expiring a run - containerTest("Run expiring", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -546,8 +660,6 @@ describe("RunEngine", () => { }, prisma ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); const executionData = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData); From cb269572328aed540c0a91296dead7d7e1738f45 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 18:06:22 +0000 Subject: [PATCH 098/485] Failed run test passing --- .../run-engine/src/engine/index.test.ts | 17 ++++++++++++----- .../run-engine/src/engine/index.ts | 8 ++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 3ba77aba9c..a7b7f69e25 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -281,14 +281,19 @@ describe("RunEngine", () => { }); //fail the attempt + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; const result = await engine.completeRunAttempt({ runId: dequeued[0].run.id, snapshotId: attemptResult.snapshot.id, completion: { - ok: true, + ok: false, id: dequeued[0].run.id, - output: `{ "foo": "bar" }`, - outputType: "application/json", + error, }, }); expect(result).toBe("COMPLETED"); @@ -298,7 +303,7 @@ describe("RunEngine", () => { assertNonNullable(executionData3); expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); + expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); //concurrency should have been released const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( @@ -314,7 +319,9 @@ describe("RunEngine", () => { }); expect(runWaitpointAfter.length).toBe(1); expect(runWaitpointAfter[0].type).toBe("RUN"); - expect(runWaitpointAfter[0].output).toBe(`{ "foo": "bar" }`); + const output = JSON.parse(runWaitpointAfter[0].output as string); + expect(output.type).toBe(error.type); + expect(runWaitpointAfter[0].outputIsError).toBe(true); } finally { engine.quit(); } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index d98ee3fb7c..4d3203b8a0 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1608,6 +1608,14 @@ export class RunEngine { }, }); + await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "FINISHED", + description: "Run failed", + }, + }); + this.eventBus.emit("runFailed", { time: failedAt, run: { From dc4a88cc030fcb02b77899c151c8ce1efb4ea8f3 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:18:38 +0000 Subject: [PATCH 099/485] hidden worker groups --- internal-packages/database/prisma/schema.prisma | 1 + 1 file changed, 1 insertion(+) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 6794654a67..c92d08ce68 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2123,6 +2123,7 @@ model WorkerInstanceGroup { masterQueue String @unique description String? + hidden Boolean @default(false) token WorkerGroupToken @relation(fields: [tokenId], references: [id], onDelete: Cascade, onUpdate: Cascade) tokenId String @unique From 6bb7a4cefb3727a100435596c12666c900882716 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:31:32 +0000 Subject: [PATCH 100/485] feature flag helpers --- apps/webapp/app/v3/featureFlags.server.ts | 54 +++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 apps/webapp/app/v3/featureFlags.server.ts diff --git a/apps/webapp/app/v3/featureFlags.server.ts b/apps/webapp/app/v3/featureFlags.server.ts new file mode 100644 index 0000000000..1cc57ed48c --- /dev/null +++ b/apps/webapp/app/v3/featureFlags.server.ts @@ -0,0 +1,54 @@ +import { z } from "zod"; +import { prisma, PrismaClientOrTransaction } from "~/db.server"; + +const FeatureFlagCatalog = { + defaultWorkerInstanceGroupId: z.string(), +}; + +type FeatureFlagKey = keyof typeof FeatureFlagCatalog; + +export type FlagsOptions = { + key: FeatureFlagKey; +}; + +export function makeFlags(_prisma: PrismaClientOrTransaction = prisma) { + return async function flags( + opts: FlagsOptions + ): Promise | undefined> { + const value = await _prisma.featureFlag.findUnique({ + where: { + key: opts.key, + }, + }); + + const parsed = FeatureFlagCatalog[opts.key].safeParse(value?.value); + + if (!parsed.success) { + return; + } + + return parsed.data; + }; +} + +export function makeSetFlags(_prisma: PrismaClientOrTransaction = prisma) { + return async function setFlags( + opts: FlagsOptions & { value: z.infer<(typeof FeatureFlagCatalog)[T]> } + ): Promise { + await _prisma.featureFlag.upsert({ + where: { + key: opts.key, + }, + create: { + key: opts.key, + value: opts.value, + }, + update: { + value: opts.value, + }, + }); + }; +} + +export const flags = makeFlags(); +export const setFlags = makeSetFlags(); From a7ecfc928a34eeecebcd40236235326f46cee8ad Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:05:14 +0000 Subject: [PATCH 101/485] add basic worker package --- packages/worker/LICENSE | 21 ++++++ packages/worker/README.md | 3 + packages/worker/package.json | 71 +++++++++++++++++++ packages/worker/src/apiClient.ts | 111 ++++++++++++++++++++++++++++++ packages/worker/src/consts.ts | 4 ++ packages/worker/src/index.ts | 2 + packages/worker/src/schemas.ts | 19 +++++ packages/worker/src/version.ts | 1 + packages/worker/tsconfig.json | 8 +++ packages/worker/tsconfig.src.json | 10 +++ 10 files changed, 250 insertions(+) create mode 100644 packages/worker/LICENSE create mode 100644 packages/worker/README.md create mode 100644 packages/worker/package.json create mode 100644 packages/worker/src/apiClient.ts create mode 100644 packages/worker/src/consts.ts create mode 100644 packages/worker/src/index.ts create mode 100644 packages/worker/src/schemas.ts create mode 100644 packages/worker/src/version.ts create mode 100644 packages/worker/tsconfig.json create mode 100644 packages/worker/tsconfig.src.json diff --git a/packages/worker/LICENSE b/packages/worker/LICENSE new file mode 100644 index 0000000000..e51e7b10aa --- /dev/null +++ b/packages/worker/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Trigger.dev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/worker/README.md b/packages/worker/README.md new file mode 100644 index 0000000000..53b5a8a581 --- /dev/null +++ b/packages/worker/README.md @@ -0,0 +1,3 @@ +# @trigger.dev/worker + +This package provides shared worker functionality. diff --git a/packages/worker/package.json b/packages/worker/package.json new file mode 100644 index 0000000000..41182733ca --- /dev/null +++ b/packages/worker/package.json @@ -0,0 +1,71 @@ +{ + "name": "@trigger.dev/worker", + "version": "3.1.2", + "description": "trigger.dev worker", + "license": "MIT", + "publishConfig": { + "access": "public" + }, + "repository": { + "type": "git", + "url": "https://github.com/triggerdotdev/trigger.dev", + "directory": "packages/worker" + }, + "type": "module", + "files": [ + "dist" + ], + "tshy": { + "selfLink": false, + "main": true, + "module": true, + "project": "./tsconfig.src.json", + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + }, + "sourceDialects": [ + "@triggerdotdev/source" + ] + }, + "scripts": { + "clean": "rimraf dist", + "build": "tshy && pnpm run update-version", + "dev": "tshy --watch", + "typecheck": "tsc --noEmit -p tsconfig.src.json", + "update-version": "tsx ../../scripts/updateVersion.ts", + "check-exports": "attw --pack ." + }, + "dependencies": { + "@trigger.dev/core": "workspace:*", + "zod": "3.22.3" + }, + "devDependencies": { + "@arethetypeswrong/cli": "^0.15.4", + "@types/node": "20.14.14", + "rimraf": "6.0.1", + "tshy": "^3.0.2", + "tsx": "4.17.0", + "typescript": "^5.5.4" + }, + "engines": { + "node": ">=18.20.0" + }, + "exports": { + "./package.json": "./package.json", + ".": { + "import": { + "@triggerdotdev/source": "./src/index.ts", + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" + } + } + }, + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", + "module": "./dist/esm/index.js" +} diff --git a/packages/worker/src/apiClient.ts b/packages/worker/src/apiClient.ts new file mode 100644 index 0000000000..c0b03d25d2 --- /dev/null +++ b/packages/worker/src/apiClient.ts @@ -0,0 +1,111 @@ +import { z } from "zod"; +import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; +import { WorkerApiHeartbeatRequestBody, WorkerApiHeartbeatResponseBody } from "./schemas.js"; +import { HEADER_NAME } from "./consts.js"; + +type WorkerApiClientOptions = { + apiURL: string; + workerToken: string; + instanceName: string; + deploymentId?: string; +}; + +export class WorkerApiClient { + private readonly apiURL: string; + private readonly workerToken: string; + private readonly instanceName: string; + private readonly deploymentId?: string; + + constructor(opts: WorkerApiClientOptions) { + this.apiURL = opts.apiURL.replace(/\/$/, ""); + this.workerToken = opts.workerToken; + this.instanceName = opts.instanceName; + this.deploymentId = opts.deploymentId; + + if (!this.apiURL) { + throw new Error("apiURL is required and needs to be a non-empty string"); + } + + if (!this.workerToken) { + throw new Error("workerToken is required and needs to be a non-empty string"); + } + + if (!this.instanceName) { + throw new Error("instanceName is required and needs to be a non-empty string"); + } + } + + async heartbeat(body: WorkerApiHeartbeatRequestBody) { + return wrapZodFetch(WorkerApiHeartbeatResponseBody, `${this.apiURL}/api/v1/worker/heartbeat`, { + method: "POST", + headers: { + ...this.defaultHeaders, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + }); + } + + async dequeue() { + return wrapZodFetch(WorkerApiHeartbeatResponseBody, `${this.apiURL}/api/v1/worker/heartbeat`, { + headers: { + ...this.defaultHeaders, + }, + }); + } + + private get defaultHeaders(): HeadersInit { + return { + Authorization: `Bearer ${this.workerToken}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: this.instanceName, + ...(this.deploymentId && { [HEADER_NAME.WORKER_DEPLOYMENT_ID]: this.deploymentId }), + }; + } +} + +type ApiResult = + | { success: true; data: TSuccessResult } + | { + success: false; + error: string; + }; + +async function wrapZodFetch( + schema: T, + url: string, + requestInit?: RequestInit +): Promise>> { + try { + const response = await zodfetch(schema, url, requestInit, { + retry: { + minTimeoutInMs: 500, + maxTimeoutInMs: 5000, + maxAttempts: 5, + factor: 2, + randomize: false, + }, + }); + + return { + success: true, + data: response, + }; + } catch (error) { + if (error instanceof ApiError) { + return { + success: false, + error: error.message, + }; + } else if (error instanceof Error) { + return { + success: false, + error: error.message, + }; + } else { + return { + success: false, + error: String(error), + }; + } + } +} diff --git a/packages/worker/src/consts.ts b/packages/worker/src/consts.ts new file mode 100644 index 0000000000..b935b1a710 --- /dev/null +++ b/packages/worker/src/consts.ts @@ -0,0 +1,4 @@ +export const HEADER_NAME = { + WORKER_INSTANCE_NAME: "x-trigger-worker-instance-name", + WORKER_DEPLOYMENT_ID: "x-trigger-worker-deployment-id", +}; diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts new file mode 100644 index 0000000000..22a2121276 --- /dev/null +++ b/packages/worker/src/index.ts @@ -0,0 +1,2 @@ +export { VERSION as WORKER_VERSION } from "./version.js"; +export * from "./consts.js"; diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts new file mode 100644 index 0000000000..3a519ac8c5 --- /dev/null +++ b/packages/worker/src/schemas.ts @@ -0,0 +1,19 @@ +import { z } from "zod"; + +export const WorkerApiHeartbeatRequestBody = z.object({ + cpu: z.object({ + used: z.number(), + available: z.number(), + }), + memory: z.object({ + used: z.number(), + available: z.number(), + }), + tasks: z.array(z.string()), +}); +export type WorkerApiHeartbeatRequestBody = z.infer; + +export const WorkerApiHeartbeatResponseBody = z.object({ + ok: z.literal(true), +}); +export type WorkerApiHeartbeatResponseBody = z.infer; diff --git a/packages/worker/src/version.ts b/packages/worker/src/version.ts new file mode 100644 index 0000000000..2e47a88682 --- /dev/null +++ b/packages/worker/src/version.ts @@ -0,0 +1 @@ +export const VERSION = "0.0.0"; diff --git a/packages/worker/tsconfig.json b/packages/worker/tsconfig.json new file mode 100644 index 0000000000..16881b51b6 --- /dev/null +++ b/packages/worker/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../.configs/tsconfig.base.json", + "references": [ + { + "path": "./tsconfig.src.json" + } + ] +} diff --git a/packages/worker/tsconfig.src.json b/packages/worker/tsconfig.src.json new file mode 100644 index 0000000000..db06c53317 --- /dev/null +++ b/packages/worker/tsconfig.src.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig.json", + "include": ["./src/**/*.ts"], + "compilerOptions": { + "isolatedDeclarations": false, + "composite": true, + "sourceMap": true, + "customConditions": ["@triggerdotdev/source"] + } +} From 41eb240bb3f9518172dc24f7b63f7afc80753992 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:06:46 +0000 Subject: [PATCH 102/485] add worker group services --- .../worker/workerGroupService.server.ts | 243 +++++++++++ .../worker/workerGroupTokenService.server.ts | 408 ++++++++++++++++++ apps/webapp/package.json | 5 +- apps/webapp/tsconfig.json | 2 + pnpm-lock.yaml | 31 ++ 5 files changed, 687 insertions(+), 2 deletions(-) create mode 100644 apps/webapp/app/v3/services/worker/workerGroupService.server.ts create mode 100644 apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts new file mode 100644 index 0000000000..3416b4cde3 --- /dev/null +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -0,0 +1,243 @@ +import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/database"; +import { WithRunEngine } from "../baseService.server"; +import { WorkerGroupTokenService } from "./workerGroupTokenService.server"; +import { logger } from "~/services/logger.server"; +import { makeFlags } from "~/v3/featureFlags.server"; + +export class WorkerGroupService extends WithRunEngine { + private readonly defaultNamePrefix = "worker_group"; + + async createWorkerGroup({ + projectId, + organizationId, + name, + description, + type, + }: { + projectId?: string; + organizationId?: string; + name?: string; + description?: string; + type?: WorkerInstanceGroupType; + }) { + name = name ?? (await this.generateWorkerName({ projectId })); + + const tokenService = new WorkerGroupTokenService({ + prisma: this._prisma, + engine: this._engine, + }); + const token = await tokenService.createToken(); + + const workerGroup = await this._prisma.workerInstanceGroup.create({ + data: { + projectId, + organizationId, + type: projectId + ? WorkerInstanceGroupType.UNMANAGED + : type ?? WorkerInstanceGroupType.SHARED, + masterQueue: this.generateMasterQueueName({ projectId, name }), + tokenId: token.id, + description, + name, + }, + }); + + return { + workerGroup, + token, + }; + } + + async updateWorkerGroup({ + projectId, + workerGroupId, + name, + description, + }: { + projectId: string; + workerGroupId: string; + name?: string; + description?: string; + }) { + const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ + where: { + id: workerGroupId, + projectId, + }, + }); + + if (!workerGroup) { + logger.error("[WorkerGroupService] No worker group found for update", { + workerGroupId, + name, + description, + }); + return; + } + + await this._prisma.workerInstanceGroup.update({ + where: { + id: workerGroup.id, + }, + data: { + description, + name, + }, + }); + } + + async listWorkerGroups({ projectId }: { projectId?: string }) { + const workerGroups = await this._prisma.workerInstanceGroup.findMany({ + where: { + OR: [ + { + type: WorkerInstanceGroupType.SHARED, + }, + { + projectId, + }, + ], + }, + }); + + return workerGroups; + } + + async deleteWorkerGroup({ + projectId, + workerGroupId, + }: { + projectId: string; + workerGroupId: string; + }) { + const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ + where: { + id: workerGroupId, + }, + }); + + if (!workerGroup) { + logger.error("[WorkerGroupService] WorkerGroup not found for deletion", { + workerGroupId, + projectId, + }); + return; + } + + if (workerGroup.projectId !== projectId) { + logger.error("[WorkerGroupService] WorkerGroup does not belong to project", { + workerGroupId, + projectId, + }); + return; + } + + await this._prisma.workerInstanceGroup.delete({ + where: { + id: workerGroupId, + }, + }); + } + + async getGlobalDefaultWorkerGroup() { + const flags = makeFlags(this._prisma); + + const defaultWorkerInstanceGroupId = await flags({ + key: "defaultWorkerInstanceGroupId", + }); + + if (!defaultWorkerInstanceGroupId) { + logger.error("[WorkerGroupService] Default worker group not found in feature flags"); + return; + } + + const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ + where: { + id: defaultWorkerInstanceGroupId, + }, + }); + + if (!workerGroup) { + logger.error("[WorkerGroupService] Default worker group not found", { + defaultWorkerInstanceGroupId, + }); + return; + } + + return workerGroup; + } + + async getDefaultWorkerGroupForProject({ + projectId, + }: { + projectId: string; + }): Promise { + const project = await this._prisma.project.findUnique({ + where: { + id: projectId, + }, + include: { + defaultWorkerGroup: true, + }, + }); + + if (!project) { + logger.error("[WorkerGroupService] Project not found", { projectId }); + return; + } + + if (project.defaultWorkerGroup) { + return project.defaultWorkerGroup; + } + + return await this.getGlobalDefaultWorkerGroup(); + } + + async setDefaultWorkerGroupForProject({ + projectId, + workerGroupId, + }: { + projectId: string; + workerGroupId: string; + }) { + const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ + where: { + id: workerGroupId, + }, + }); + + if (!workerGroup) { + logger.error("[WorkerGroupService] WorkerGroup not found", { + workerGroupId, + }); + return; + } + + await this._prisma.project.update({ + where: { + id: projectId, + }, + data: { + defaultWorkerGroupId: workerGroupId, + }, + }); + } + + private async generateWorkerName({ projectId }: { projectId?: string }) { + const workerGroups = await this._prisma.workerInstanceGroup.count({ + where: { + projectId: projectId ?? null, + }, + }); + + return `${this.defaultNamePrefix}_${workerGroups + 1}`; + } + + private generateMasterQueueName({ projectId, name }: { projectId?: string; name: string }) { + if (!projectId) { + return name; + } + + return `${projectId}-${name}`; + } +} diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts new file mode 100644 index 0000000000..f3666bb934 --- /dev/null +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -0,0 +1,408 @@ +import { customAlphabet } from "nanoid"; +import { WithRunEngine, WithRunEngineOptions } from "../baseService.server"; +import { createHash } from "crypto"; +import { logger } from "~/services/logger.server"; +import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/database"; +import { z } from "zod"; +import { HEADER_NAME } from "@trigger.dev/worker"; +import { DequeuedMessage } from "@internal/run-engine/engine/messages"; + +export class WorkerGroupTokenService extends WithRunEngine { + private readonly tokenPrefix = "tr_wgt_"; + private readonly tokenLength = 40; + private readonly tokenChars = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + private readonly tokenGenerator = customAlphabet(this.tokenChars, this.tokenLength); + + async createToken() { + const rawToken = await this.generateToken(); + + const workerGroupToken = await this._prisma.workerGroupToken.create({ + data: { + tokenHash: rawToken.hash, + }, + }); + + return { + id: workerGroupToken.id, + tokenHash: workerGroupToken.tokenHash, + plaintext: rawToken.plaintext, + }; + } + + async findWorkerGroup({ token }: { token: string }) { + const tokenHash = await this.hashToken(token); + + const workerGroupToken = await this._prisma.workerGroupToken.findFirst({ + where: { + workerGroup: { + isNot: null, + }, + tokenHash, + }, + include: { + workerGroup: true, + }, + }); + + if (!workerGroupToken) { + logger.warn("[WorkerGroupTokenService] Token not found", { token }); + return; + } + + return workerGroupToken.workerGroup; + } + + async rotateToken({ workerGroupId }: { workerGroupId: string }) { + const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ + where: { + id: workerGroupId, + }, + }); + + if (!workerGroup) { + logger.error("[WorkerGroupTokenService] WorkerGroup not found", { workerGroupId }); + return; + } + + const rawToken = await this.generateToken(); + + const workerGroupToken = await this._prisma.workerGroupToken.update({ + where: { + id: workerGroup.tokenId, + }, + data: { + tokenHash: rawToken.hash, + }, + }); + + if (!workerGroupToken) { + logger.error("[WorkerGroupTokenService] WorkerGroupToken not found", { workerGroupId }); + return; + } + + return { + id: workerGroupToken.id, + tokenHash: workerGroupToken.tokenHash, + plaintext: rawToken.plaintext, + }; + } + + private async hashToken(token: string) { + return createHash("sha256").update(token).digest("hex"); + } + + private async generateToken() { + const plaintext = `${this.tokenPrefix}${this.tokenGenerator()}`; + const hash = await this.hashToken(plaintext); + + return { + plaintext, + hash, + }; + } + + async authenticate(request: Request): Promise { + const token = request.headers.get("Authorization")?.replace("Bearer ", "").trim(); + + if (!token) { + logger.error("[WorkerGroupTokenService] Token not found in request", { + headers: this.sanitizeHeaders(request), + }); + return; + } + + if (!token.startsWith(this.tokenPrefix)) { + logger.error("[WorkerGroupTokenService] Token does not start with expected prefix", { + token, + prefix: this.tokenPrefix, + }); + return; + } + + const instanceName = request.headers.get(HEADER_NAME.WORKER_INSTANCE_NAME); + + if (!instanceName) { + logger.error("[WorkerGroupTokenService] Instance name not found in request", { + headers: this.sanitizeHeaders(request), + }); + return; + } + + const workerGroup = await this.findWorkerGroup({ token }); + + if (!workerGroup) { + logger.warn("[WorkerGroupTokenService] Worker group not found", { token }); + return; + } + + const workerInstance = await this.getOrCreateWorkerInstance({ + workerGroup, + instanceName, + deploymentId: request.headers.get(HEADER_NAME.WORKER_DEPLOYMENT_ID) ?? undefined, + }); + + if (!workerInstance) { + logger.error("[WorkerGroupTokenService] Unable to get or create worker instance", { + workerGroup, + instanceName, + }); + return; + } + + if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + return new AuthenticatedWorkerInstance({ + prisma: this._prisma, + engine: this._engine, + type: WorkerInstanceGroupType.SHARED, + workerGroupId: workerGroup.id, + workerInstanceId: workerInstance.id, + masterQueue: workerGroup.masterQueue, + }); + } + + if (!workerInstance.environmentId) { + logger.error( + "[WorkerGroupTokenService] Non-shared worker instance not linked to environment", + { workerGroup, workerInstance } + ); + return; + } + + if (!workerInstance.deployment) { + logger.error( + "[WorkerGroupTokenService] Non-shared worker instance not linked to deployment", + { workerGroup, workerInstance } + ); + return; + } + + if (!workerInstance.deployment.workerId) { + logger.error( + "[WorkerGroupTokenService] Non-shared worker instance deployment not linked to background worker", + { workerGroup, workerInstance } + ); + return; + } + + return new AuthenticatedWorkerInstance({ + prisma: this._prisma, + engine: this._engine, + type: WorkerInstanceGroupType.UNMANAGED, + workerGroupId: workerGroup.id, + workerInstanceId: workerInstance.id, + masterQueue: workerGroup.masterQueue, + environmentId: workerInstance.environmentId, + deploymentId: workerInstance.deployment.id, + backgroundWorkerId: workerInstance.deployment.workerId, + }); + } + + private async getOrCreateWorkerInstance({ + workerGroup, + instanceName, + deploymentId, + }: { + workerGroup: WorkerInstanceGroup; + instanceName: string; + deploymentId?: string; + }) { + const workerInstance = await this._prisma.workerInstance.findUnique({ + where: { + workerGroupId_name: { + workerGroupId: workerGroup.id, + name: instanceName, + }, + }, + include: { + deployment: true, + }, + }); + + if (workerInstance) { + return workerInstance; + } + + if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + return this._prisma.workerInstance.create({ + data: { + workerGroupId: workerGroup.id, + name: instanceName, + }, + include: { + deployment: true, + }, + }); + } + + if (!workerGroup.projectId || !workerGroup.organizationId) { + logger.error( + "[WorkerGroupTokenService] Non-shared worker group missing project or organization", + workerGroup + ); + return; + } + + // Unmanaged workers instances are locked to a specific deployment version + + const deployment = await this._prisma.workerDeployment.findUnique({ + where: { + id: deploymentId, + }, + }); + + if (!deployment) { + logger.error("[WorkerGroupTokenService] Deployment not found", { deploymentId }); + return; + } + + if (deployment.projectId !== workerGroup.projectId) { + logger.error("[WorkerGroupTokenService] Deployment does not match worker group project", { + deployment, + workerGroup, + }); + return; + } + + const nonSharedWorkerInstance = this._prisma.workerInstance.create({ + data: { + workerGroupId: workerGroup.id, + name: instanceName, + environmentId: deployment.environmentId, + deploymentId: deployment.id, + }, + include: { + deployment: true, + }, + }); + + return nonSharedWorkerInstance; + } + + private sanitizeHeaders(request: Request, skipHeaders = ["authorization"]) { + const sanitizedHeaders: Partial> = {}; + + for (const [key, value] of request.headers.entries()) { + if (!skipHeaders.includes(key.toLowerCase())) { + sanitizedHeaders[key] = value; + } + } + + return sanitizedHeaders; + } +} + +export const WorkerInstanceEnv = z.enum(["dev", "staging", "prod"]).default("prod"); +export type WorkerInstanceEnv = z.infer; + +export type AuthenticatedWorkerInstanceOptions = WithRunEngineOptions<{ + type: WorkerInstanceGroupType; + workerGroupId: string; + workerInstanceId: string; + masterQueue: string; + environmentId?: string; + deploymentId?: string; + backgroundWorkerId?: string; +}>; + +export class AuthenticatedWorkerInstance extends WithRunEngine { + readonly type: WorkerInstanceGroupType; + readonly workerGroupId: string; + readonly workerInstanceId: string; + readonly masterQueue: string; + readonly environmentId?: string; + readonly deploymentId?: string; + readonly backgroundWorkerId?: string; + + // FIXME + readonly isLatestDeployment = true; + + constructor(opts: AuthenticatedWorkerInstanceOptions) { + super({ prisma: opts.prisma, engine: opts.engine }); + + this.type = opts.type; + this.workerGroupId = opts.workerGroupId; + this.workerInstanceId = opts.workerInstanceId; + this.masterQueue = opts.masterQueue; + this.environmentId = opts.environmentId; + this.deploymentId = opts.deploymentId; + this.backgroundWorkerId = opts.backgroundWorkerId; + } + + async dequeue(maxRunCount = 10): Promise { + if (this.type === WorkerInstanceGroupType.SHARED) { + return await this._engine.dequeueFromMasterQueue({ + consumerId: this.workerInstanceId, + masterQueue: this.masterQueue, + maxRunCount, + }); + } + + if (!this.environmentId || !this.deploymentId || !this.backgroundWorkerId) { + logger.error("[AuthenticatedWorkerInstance] Missing environment or deployment", { + ...this.toJSON(), + }); + return []; + } + + if (this.isLatestDeployment) { + return await this._engine.dequeueFromEnvironmentMasterQueue({ + consumerId: this.workerInstanceId, + environmentId: this.environmentId, + maxRunCount, + }); + } + + return await this._engine.dequeueFromBackgroundWorkerMasterQueue({ + consumerId: this.workerInstanceId, + backgroundWorkerId: this.deploymentId, + maxRunCount, + }); + } + + async heartbeat() { + await this._prisma.workerInstance.update({ + where: { + id: this.workerInstanceId, + }, + data: { + lastHeartbeatAt: new Date(), + }, + }); + } + + toJSON(): WorkerGroupTokenAuthenticationResponse { + if (this.type === WorkerInstanceGroupType.SHARED) { + return { + type: WorkerInstanceGroupType.SHARED, + workerGroupId: this.workerGroupId, + workerInstanceId: this.workerInstanceId, + masterQueue: this.masterQueue, + }; + } + + return { + type: WorkerInstanceGroupType.UNMANAGED, + workerGroupId: this.workerGroupId, + workerInstanceId: this.workerInstanceId, + masterQueue: this.masterQueue, + environmentId: this.environmentId!, + deploymentId: this.deploymentId!, + }; + } +} + +export type WorkerGroupTokenAuthenticationResponse = + | { + type: typeof WorkerInstanceGroupType.SHARED; + workerGroupId: string; + workerInstanceId: string; + masterQueue: string; + } + | { + type: typeof WorkerInstanceGroupType.UNMANAGED; + workerGroupId: string; + workerInstanceId: string; + masterQueue: string; + environmentId: string; + deploymentId: string; + }; diff --git a/apps/webapp/package.json b/apps/webapp/package.json index da90a73bd0..c56f0ea1d8 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -49,8 +49,8 @@ "@electric-sql/react": "^0.3.5", "@headlessui/react": "^1.7.8", "@heroicons/react": "^2.0.12", - "@internal/zod-worker": "workspace:*", "@internal/run-engine": "workspace:*", + "@internal/zod-worker": "workspace:*", "@internationalized/date": "^3.5.1", "@lezer/highlight": "^1.1.6", "@opentelemetry/api": "1.9.0", @@ -100,6 +100,7 @@ "@trigger.dev/otlp-importer": "workspace:*", "@trigger.dev/platform": "1.0.13", "@trigger.dev/sdk": "workspace:*", + "@trigger.dev/worker": "workspace:*", "@trigger.dev/yalt": "npm:@trigger.dev/yalt", "@types/pg": "8.6.6", "@uiw/react-codemirror": "^4.19.5", @@ -255,4 +256,4 @@ "engines": { "node": ">=16.0.0" } -} \ No newline at end of file +} diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index bb6a03acc1..b402860b1b 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -31,6 +31,8 @@ "@trigger.dev/yalt/*": ["../../packages/yalt/src/*"], "@trigger.dev/otlp-importer": ["../../internal-packages/otlp-importer/src/index"], "@trigger.dev/otlp-importer/*": ["../../internal-packages/otlp-importer/src/*"], + "@trigger.dev/worker": ["../../packages/worker/src/index"], + "@trigger.dev/worker/*": ["../../packages/worker/src/*"], "emails": ["../../internal-packages/emails/src/index"], "emails/*": ["../../internal-packages/emails/src/*"], "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3d15d9ec45..8601d37634 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -393,6 +393,9 @@ importers: '@trigger.dev/sdk': specifier: workspace:* version: link:../../packages/trigger-sdk + '@trigger.dev/worker': + specifier: workspace:* + version: link:../../packages/worker '@trigger.dev/yalt': specifier: npm:@trigger.dev/yalt version: 2.3.19 @@ -1543,6 +1546,34 @@ importers: specifier: ^5.5.4 version: 5.5.4 + packages/worker: + dependencies: + '@trigger.dev/core': + specifier: workspace:* + version: link:../core + zod: + specifier: 3.22.3 + version: 3.22.3 + devDependencies: + '@arethetypeswrong/cli': + specifier: ^0.15.4 + version: 0.15.4 + '@types/node': + specifier: 20.14.14 + version: 20.14.14 + rimraf: + specifier: 6.0.1 + version: 6.0.1 + tshy: + specifier: ^3.0.2 + version: 3.0.2 + tsx: + specifier: 4.17.0 + version: 4.17.0 + typescript: + specifier: ^5.5.4 + version: 5.5.4 + references/bun-catalog: dependencies: '@trigger.dev/sdk': From accee542324dfa529c19b0ff2cd6704a7dd49606 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:07:38 +0000 Subject: [PATCH 103/485] get master queue from default worker group on trigger --- .../app/v3/services/baseService.server.ts | 2 +- .../app/v3/services/triggerTaskV2.server.ts | 20 +++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/apps/webapp/app/v3/services/baseService.server.ts b/apps/webapp/app/v3/services/baseService.server.ts index 2b5396c8be..7686f41b6f 100644 --- a/apps/webapp/app/v3/services/baseService.server.ts +++ b/apps/webapp/app/v3/services/baseService.server.ts @@ -46,7 +46,7 @@ export type WithRunEngineOptions = T & { export class WithRunEngine extends BaseService { protected readonly _engine: RunEngine; - constructor(opts: { prisma?: PrismaClientOrTransaction; engine?: RunEngine }) { + constructor(opts: { prisma?: PrismaClientOrTransaction; engine?: RunEngine } = {}) { super(opts.prisma); this._engine = opts.engine ?? engine; } diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index a3db88f2c5..2ec75c8c9c 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -20,6 +20,7 @@ import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { handleMetadataPacket } from "~/utils/packets"; +import { WorkerGroupService } from "./worker/workerGroupService.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -307,6 +308,22 @@ export class TriggerTaskServiceV2 extends WithRunEngine { event.setAttribute("runId", runFriendlyId); span.setAttribute("runId", runFriendlyId); + const workerGroupService = new WorkerGroupService({ + prisma: this._prisma, + engine: this._engine, + }); + const workerGroup = await workerGroupService.getDefaultWorkerGroupForProject({ + projectId: environment.projectId, + }); + + if (!workerGroup) { + logger.error("Default worker group not found", { + projectId: environment.projectId, + }); + + return; + } + const taskRun = await this._engine.trigger( { number: num, @@ -326,8 +343,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { concurrencyKey: body.options?.concurrencyKey, queueName, queue: body.options?.queue, - //todo multiple worker pools support - masterQueue: "main", + masterQueue: workerGroup.masterQueue, isTest: body.options?.test ?? false, delayUntil, queuedAt: delayUntil ? undefined : new Date(), From a6a7b40fb806e518ea0d29e3e5f5ebe722b861bd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:07:57 +0000 Subject: [PATCH 104/485] basic worker api routes --- .../app/routes/api.v1.worker.dequeue.ts | 6 ++ .../app/routes/api.v1.worker.heartbeat.ts | 7 ++ .../routeBuiilders/apiBuilder.server.ts | 84 +++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 apps/webapp/app/routes/api.v1.worker.dequeue.ts create mode 100644 apps/webapp/app/routes/api.v1.worker.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.worker.dequeue.ts b/apps/webapp/app/routes/api.v1.worker.dequeue.ts new file mode 100644 index 0000000000..8213ac6cec --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker.dequeue.ts @@ -0,0 +1,6 @@ +import { json } from "@remix-run/server-runtime"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute({}, async ({ authenticatedWorker }) => { + return json(await authenticatedWorker.dequeue()); +}); diff --git a/apps/webapp/app/routes/api.v1.worker.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker.heartbeat.ts new file mode 100644 index 0000000000..a10c1e9cc8 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker.heartbeat.ts @@ -0,0 +1,7 @@ +import { json } from "@remix-run/server-runtime"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute({}, async ({ authenticatedWorker }) => { + await authenticatedWorker.heartbeat(); + return json({ ok: true }); +}); diff --git a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts index a0561cecb3..978b6b260c 100644 --- a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts +++ b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts @@ -13,6 +13,10 @@ import { authenticateApiRequestWithPersonalAccessToken, PersonalAccessTokenAuthenticationResult, } from "../personalAccessToken.server"; +import { + AuthenticatedWorkerInstance, + WorkerGroupTokenService, +} from "~/v3/services/worker/workerGroupTokenService.server"; type ApiKeyRouteBuilderOptions< TParamsSchema extends z.AnyZodObject | undefined = undefined, @@ -258,3 +262,83 @@ export function createLoaderPATApiRoute< function wrapResponse(request: Request, response: Response, useCors: boolean) { return useCors ? apiCors(request, response) : response; } + +type WorkerRouteBuilderOptions< + TParamsSchema extends z.AnyZodObject | undefined = undefined, + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined +> = { + params?: TParamsSchema; + searchParams?: TSearchParamsSchema; +}; + +type WorkerHandlerFunction< + TParamsSchema extends z.AnyZodObject | undefined, + TSearchParamsSchema extends z.AnyZodObject | undefined +> = (args: { + params: TParamsSchema extends z.AnyZodObject ? z.infer : undefined; + searchParams: TSearchParamsSchema extends z.AnyZodObject + ? z.infer + : undefined; + authenticatedWorker: AuthenticatedWorkerInstance; + request: Request; +}) => Promise; + +export function createLoaderWorkerApiRoute< + TParamsSchema extends z.AnyZodObject | undefined = undefined, + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined +>( + options: WorkerRouteBuilderOptions, + handler: WorkerHandlerFunction +) { + return async function loader({ request, params }: LoaderFunctionArgs) { + const { params: paramsSchema, searchParams: searchParamsSchema } = options; + + const service = new WorkerGroupTokenService(); + const authenticationResult = await service.authenticate(request); + + if (!authenticationResult) { + return json({ error: "Invalid or missing worker token" }, { status: 401 }); + } + + let parsedParams: any = undefined; + if (paramsSchema) { + const parsed = paramsSchema.safeParse(params); + if (!parsed.success) { + return json( + { error: "Params Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ); + } + parsedParams = parsed.data; + } + + let parsedSearchParams: any = undefined; + if (searchParamsSchema) { + const searchParams = Object.fromEntries(new URL(request.url).searchParams); + const parsed = searchParamsSchema.safeParse(searchParams); + if (!parsed.success) { + return json( + { error: "Query Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ); + } + parsedSearchParams = parsed.data; + } + + try { + const result = await handler({ + params: parsedParams, + searchParams: parsedSearchParams, + authenticatedWorker: authenticationResult, + request, + }); + return result; + } catch (error) { + console.error("Error in API route:", error); + if (error instanceof Response) { + return error; + } + return json({ error: "Internal Server Error" }, { status: 500 }); + } + }; +} From a035ab1bfda7f6750ab1d74a028db25619827686 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 5 Nov 2024 19:08:06 +0000 Subject: [PATCH 105/485] add worker group tests --- apps/webapp/test/workerGroup.test.ts | 228 +++++++++++++++++++++++++++ 1 file changed, 228 insertions(+) create mode 100644 apps/webapp/test/workerGroup.test.ts diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts new file mode 100644 index 0000000000..ced58be84d --- /dev/null +++ b/apps/webapp/test/workerGroup.test.ts @@ -0,0 +1,228 @@ +import { describe, expect, assert } from "vitest"; +import { + AuthenticatedEnvironment, + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + StartedRedisContainer, +} from "@internal/testcontainers"; +import { WorkerGroupTokenService } from "~/v3/services/worker/workerGroupTokenService.server"; +import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.server"; +import { + PrismaClient, + PrismaClientOrTransaction, + TaskRunStatus, + WorkerInstanceGroupType, +} from "@trigger.dev/database"; +import { HEADER_NAME } from "@trigger.dev/worker"; +import { RunEngine } from "@internal/run-engine"; +import { trace } from "@opentelemetry/api"; +import { TriggerTaskServiceV2 } from "~/v3/services/triggerTaskV2.server"; + +describe("worker", () => { + const defaultInstanceName = "test_worker"; + + describe("auth", { concurrent: true, timeout: 10000 }, () => { + containerTest("should fail", async ({ prisma }) => { + const { workerGroup, token } = await setupWorkerGroup({ prisma }); + expect(workerGroup.type).toBe(WorkerInstanceGroupType.SHARED); + + const missingToken = new Request("https://example.com", { + headers: { + [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + }, + }); + + const badToken = new Request("https://example.com", { + headers: { + Authorization: `Bearer foo`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + }, + }); + + const emptyToken = new Request("https://example.com", { + headers: { + Authorization: `Bearer `, + [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + }, + }); + + const missingInstanceName = new Request("https://example.com", { + headers: { + Authorization: `Bearer ${token.plaintext}`, + }, + }); + + const tokenService = new WorkerGroupTokenService({ prisma }); + expect(await tokenService.authenticate(missingToken)).toBeUndefined(); + expect(await tokenService.authenticate(badToken)).toBeUndefined(); + expect(await tokenService.authenticate(emptyToken)).toBeUndefined(); + expect(await tokenService.authenticate(missingInstanceName)).toBeUndefined(); + }); + + containerTest("should succeed", async ({ prisma }) => { + const { workerGroup, token } = await setupWorkerGroup({ prisma }); + expect(workerGroup.type).toBe(WorkerInstanceGroupType.SHARED); + + const request = new Request("https://example.com", { + headers: { + Authorization: `Bearer ${token.plaintext}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + }, + }); + + const tokenService = new WorkerGroupTokenService({ prisma }); + const authentication = await tokenService.authenticate(request); + + expect(authentication).toBeDefined(); + expect(authentication?.workerGroupId).toBe(workerGroup.id); + + const identicalAuth = await tokenService.authenticate(request); + expect(identicalAuth).toEqual(authentication); + + const secondInstanceName = "test_worker_2"; + const secondRequest = new Request("https://example.com", { + headers: { + Authorization: `Bearer ${token.plaintext}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: secondInstanceName, + }, + }); + const secondAuth = await tokenService.authenticate(secondRequest); + expect(secondAuth).toBeDefined(); + expect(secondAuth?.workerGroupId).toBe(workerGroup.id); + expect(secondAuth?.workerInstanceId).not.toBe(authentication?.workerInstanceId); + }); + }); + + describe("trigger", { timeout: 10000 }, () => { + containerTest("dequeue - unmanaged", async ({ prisma, redisContainer }) => { + const taskIdentifier = "test-task"; + + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const { deployment } = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + assert(deployment, "deployment should be defined"); + + const engine = setupRunEngine(prisma, redisContainer); + const triggerService = new TriggerTaskServiceV2({ prisma, engine }); + + const { token, workerGroupService, workerGroup } = await setupWorkerGroup({ + prisma, + engine, + authenticatedEnvironment, + }); + + // Promote worker group to project default + await workerGroupService.setDefaultWorkerGroupForProject({ + projectId: authenticatedEnvironment.projectId, + workerGroupId: workerGroup.id, + }); + + const request = new Request("https://example.com", { + headers: { + Authorization: `Bearer ${token.plaintext}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + [HEADER_NAME.WORKER_DEPLOYMENT_ID]: deployment.id, + }, + }); + + try { + const tokenService = new WorkerGroupTokenService({ prisma, engine }); + const authenticatedInstance = await tokenService.authenticate(request); + + assert(authenticatedInstance, "authenticationInstance should be defined"); + expect(authenticatedInstance.type).toBe(WorkerInstanceGroupType.UNMANAGED); + assert( + authenticatedInstance.type === WorkerInstanceGroupType.UNMANAGED, + "type should be unmanaged" + ); + + // Trigger + const run = await triggerService.call({ + environment: authenticatedEnvironment, + taskId: taskIdentifier, + body: {}, + }); + assert(run, "run should be defined"); + + const queueLengthBefore = await engine.runQueue.lengthOfQueue( + authenticatedEnvironment, + run.queue + ); + expect(queueLengthBefore).toBe(1); + + const runBeforeDequeue = await prisma.taskRun.findUnique({ + where: { + id: run.id, + }, + }); + expect(runBeforeDequeue?.status).toBe(TaskRunStatus.PENDING); + + // Dequeue + const dequeued = await authenticatedInstance.dequeue(); + expect(dequeued.length).toBe(1); + expect(dequeued[0].run.id).toBe(run.id); + expect(dequeued[0].run.attemptNumber).toBe(1); + } finally { + engine.quit(); + } + }); + }); +}); + +async function setupWorkerGroup({ + prisma, + engine, + authenticatedEnvironment, +}: { + prisma: PrismaClientOrTransaction; + engine?: RunEngine; + authenticatedEnvironment?: AuthenticatedEnvironment; +}) { + const workerGroupService = new WorkerGroupService({ prisma, engine }); + const { workerGroup, token } = await workerGroupService.createWorkerGroup({ + projectId: authenticatedEnvironment?.projectId, + organizationId: authenticatedEnvironment?.organizationId, + }); + + return { + workerGroupService, + workerGroup, + token, + }; +} + +function setupRunEngine(prisma: PrismaClient, redisContainer: StartedRedisContainer) { + return new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); +} From d36b2d5ceeef1e2491e677d9bf68dfecee3eadb9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 5 Nov 2024 19:34:28 +0000 Subject: [PATCH 106/485] Improved attempt completing and made a test for retries --- .../run-engine/src/engine/index.test.ts | 486 +++++++++++------- .../run-engine/src/engine/index.ts | 12 +- 2 files changed, 317 insertions(+), 181 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index a7b7f69e25..254c6697d3 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -15,194 +15,310 @@ function assertNonNullable(value: T): asserts value is NonNullable { } describe("RunEngine", () => { - containerTest( - "Single run (trigger to success)", - { timeout: 15_000 }, - async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", + }, }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); - try { - const taskIdentifier = "test-task"; + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength).toBe(1); - //check it's actually in the db - const runFromDb = await prisma.taskRun.findUnique({ - where: { - friendlyId: "run_1234", - }, - }); - expect(runFromDb).toBeDefined(); - expect(runFromDb?.id).toBe(run.id); + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("QUEUED"); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued.length).toBe(1); + expect(dequeued[0].run.id).toBe(run.id); + expect(dequeued[0].run.attemptNumber).toBe(1); - //check the waitpoint is created - const runWaitpoint = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpoint.length).toBe(1); - expect(runWaitpoint[0].type).toBe("RUN"); + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); - //check the queue length - const queueLength = await engine.runQueue.lengthOfQueue( - authenticatedEnvironment, - run.queue - ); - expect(queueLength).toBe(1); + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - //concurrency before - const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyBefore).toBe(0); + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("EXECUTING"); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - expect(dequeued.length).toBe(1); - expect(dequeued[0].run.id).toBe(run.id); - expect(dequeued[0].run.attemptNumber).toBe(1); + let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + engine.eventBus.on("runSucceeded", (result) => { + event = result; + }); - const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyAfter).toBe(1); + //complete the run + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: true, + id: dequeued[0].run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); + + //event + assertNonNullable(event); + const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; + expect(completedEvent.run.spanId).toBe(run.spanId); + expect(completedEvent.run.output).toBe('{"foo":"bar"}'); + expect(completedEvent.run.outputType).toBe("application/json"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData2.run.attemptNumber).toBe(1); - expect(executionData2.run.status).toBe("EXECUTING"); - - let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; - engine.eventBus.on("runSucceeded", (result) => { - event = result; - }); + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); + } finally { + engine.quit(); + } + }); - //complete the run - const result = await engine.completeRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: attemptResult.snapshot.id, - completion: { - ok: true, - id: dequeued[0].run.id, - output: `{"foo":"bar"}`, - outputType: "application/json", + containerTest("Single run (failed)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - }); - expect(result).toBe("COMPLETED"); + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - //state should be completed - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); - - //event - assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; - expect(completedEvent.run.spanId).toBe(run.spanId); - expect(completedEvent.run.output).toBe('{"foo":"bar"}'); - expect(completedEvent.run.outputType).toBe("application/json"); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); + try { + const taskIdentifier = "test-task"; - //waitpoint should have been completed, with the output - const runWaitpointAfter = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpointAfter.length).toBe(1); - expect(runWaitpointAfter[0].type).toBe("RUN"); - expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); - } finally { - engine.quit(); - } + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].execution.id, + }); + + //fail the attempt + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: false, + id: dequeued[0].run.id, + error, + }, + }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + const output = JSON.parse(runWaitpointAfter[0].output as string); + expect(output.type).toBe(error.type); + expect(runWaitpointAfter[0].outputIsError).toBe(true); + } finally { + engine.quit(); } - ); + }); containerTest( - "Single run (trigger to user-failure)", + "Single run (retry attempt, then succeed)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment @@ -294,22 +410,32 @@ describe("RunEngine", () => { ok: false, id: dequeued[0].run.id, error, + retry: { + timestamp: Date.now(), + delay: 0, + }, }, }); - expect(result).toBe("COMPLETED"); + expect(result).toBe("RETRY_IMMEDIATELY"); //state should be completed const executionData3 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); + expect(executionData3.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData3.run.attemptNumber).toBe(2); + expect(executionData3.run.status).toBe("RETRYING_AFTER_FAILURE"); - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); + //now complete it successfully + const result2 = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: executionData3.snapshot.id, + completion: { + ok: true, + id: dequeued[0].run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); //waitpoint should have been completed, with the output const runWaitpointAfter = await prisma.waitpoint.findMany({ @@ -319,9 +445,15 @@ describe("RunEngine", () => { }); expect(runWaitpointAfter.length).toBe(1); expect(runWaitpointAfter[0].type).toBe("RUN"); - const output = JSON.parse(runWaitpointAfter[0].output as string); - expect(output.type).toBe(error.type); - expect(runWaitpointAfter[0].outputIsError).toBe(true); + expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); + expect(runWaitpointAfter[0].outputIsError).toBe(false); + + //state should be completed + const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData4); + expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData4.run.attemptNumber).toBe(2); + expect(executionData4.run.status).toBe("COMPLETED_SUCCESSFULLY"); } finally { engine.quit(); } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 4d3203b8a0..dbee9fe49c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -105,6 +105,8 @@ type TriggerParams = { seedMetadataType?: string; }; +type FailedAttemptResult = "COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"; + const workerCatalog = { waitpointCompleteDateTime: { schema: z.object({ @@ -1024,7 +1026,7 @@ export class RunEngine { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - }): Promise<"COMPLETED" | "RETRIED"> { + }): Promise { switch (completion.ok) { case true: { return this.#attemptSucceeded({ runId, snapshotId, completion, tx: this.prisma }); @@ -1486,7 +1488,7 @@ export class RunEngine { snapshotId: string; completion: TaskRunFailedExecutionResult; tx: PrismaClientOrTransaction; - }): Promise<"COMPLETED" | "RETRIED"> { + }): Promise<"COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"> { const prisma = this.prisma; return this.#trace("completeRunAttemptFailure", { runId, snapshotId }, async (span) => { @@ -1575,19 +1577,21 @@ export class RunEngine { timestamp: retryAt.getTime(), tx: prisma, }); + + return "RETRY_QUEUED" as const; } else { //it will continue running because the retry delay is short await this.#createExecutionSnapshot(prisma, { run, snapshot: { - executionStatus: "PENDING_EXECUTING", + executionStatus: "EXECUTING", description: "Attempt failed wth a short delay, starting a new attempt.", }, }); await this.#sendRunChangedNotificationToWorker({ runId }); } - return "RETRIED" as const; + return "RETRY_IMMEDIATELY" as const; } const status = runStatusFromError(completion.error); From cca3a5be30282c019cd9287b575079e4610b0fa1 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 12:14:59 +0000 Subject: [PATCH 107/485] Add a delayed run test --- .../run-engine/src/engine/index.test.ts | 88 ++++++++++++++++++- 1 file changed, 86 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 254c6697d3..50f5c6054e 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -735,6 +735,92 @@ describe("RunEngine", () => { //todo cancelling a run + //todo crashed run + + //todo system failure run + + //todo delaying a run + containerTest("Run delayed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + delayUntil: new Date(Date.now() + 500), + }, + prisma + ); + + //should be created but not queued yet + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); + + //wait for 1 seconds + await setTimeout(1_000); + + //should now be queued + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); + } + }); + //todo expiring a run containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment @@ -825,6 +911,4 @@ describe("RunEngine", () => { engine.quit(); } }); - - //todo delaying a run }); From 8cda15c299f5d3bc8966caa3217cf28345d0cff9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:42:33 +0000 Subject: [PATCH 108/485] rename and implement lastDequeueAt --- .../v3/services/worker/workerGroupTokenService.server.ts | 9 +++++++++ internal-packages/database/prisma/schema.prisma | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index f3666bb934..5e1273e6c4 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -344,6 +344,15 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { return []; } + await this._prisma.workerInstance.update({ + where: { + id: this.workerInstanceId, + }, + data: { + lastDequeueAt: new Date(), + }, + }); + if (this.isLatestDeployment) { return await this._engine.dequeueFromEnvironmentMasterQueue({ consumerId: this.workerInstanceId, diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 8b1982b33f..cc4fd13e68 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2102,7 +2102,7 @@ model WorkerInstance { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - lastPullAt DateTime? + lastDequeueAt DateTime? lastHeartbeatAt DateTime? @@unique([workerGroupId, name]) From 0160c97834f072ebca2fb83265104b4d254aa647 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 12:57:25 +0000 Subject: [PATCH 109/485] Renamed checkpointThresholdMs, added a stub for createCheckpoint, and improved some types --- .../run-engine/src/engine/index.ts | 39 +++++++++++++++++-- .../run-engine/src/engine/messages.ts | 2 +- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index dbee9fe49c..92870e1b2d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -61,7 +61,7 @@ type Options = { baseCostInCents: number; }; /** If not set then checkpoints won't ever be used */ - checkpointThresholdMs?: number; + retryWarmStartThresholdMs?: number; tracer: Tracer; }; @@ -679,7 +679,7 @@ export class RunEngine { return { version: "1" as const, - execution: { + snapshot: { id: newSnapshot.id, }, image: result.deployment?.imageReference ?? undefined, @@ -1316,6 +1316,37 @@ export class RunEngine { ); } + async createCheckpoint({ + runId, + snapshotId, + checkpoint, + tx, + }: { + runId: string; + snapshotId: string; + //todo + checkpoint: Record; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + return await this.runLock.lock([runId], 5_000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + if (snapshot.id !== snapshotId) { + return { + ok: false as const, + error: "Not the latest snapshot", + }; + } + + //we know it's the latest snapshot, so we can checkpoint + + //todo check the status is checkpointable + + //todo return a Result, which will determine if the server is allowed to shutdown + }); + } + /** Get required data to execute the run */ async getRunExecutionData({ runId, @@ -1567,8 +1598,8 @@ export class RunEngine { //if it's a long delay and we support checkpointing, put it back in the queue if ( - this.options.checkpointThresholdMs !== undefined && - completion.retry.delay >= this.options.checkpointThresholdMs + this.options.retryWarmStartThresholdMs !== undefined && + completion.retry.delay >= this.options.retryWarmStartThresholdMs ) { //long delay for retry, so requeue await this.#enqueueRun({ diff --git a/internal-packages/run-engine/src/engine/messages.ts b/internal-packages/run-engine/src/engine/messages.ts index 47ee64d72b..fc1720384e 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/internal-packages/run-engine/src/engine/messages.ts @@ -20,7 +20,7 @@ const CompletedWaitpoint = z.object({ /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ const DequeuedMessage = z.object({ version: z.literal("1"), - execution: z.object({ + snapshot: z.object({ id: z.string(), }), image: z.string().optional(), From dbc28a889e0b8dd3f57185aa24e87ab7e7cc66d7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 12:59:51 +0000 Subject: [PATCH 110/485] Fix tests --- internal-packages/run-engine/src/engine/index.test.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 50f5c6054e..53fdb97f1d 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -130,7 +130,7 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, + snapshotId: dequeued[0].snapshot.id, }); expect(attemptResult.run.id).toBe(run.id); expect(attemptResult.run.status).toBe("EXECUTING"); @@ -267,7 +267,7 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, + snapshotId: dequeued[0].snapshot.id, }); //fail the attempt @@ -393,7 +393,7 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, + snapshotId: dequeued[0].snapshot.id, }); //fail the attempt @@ -698,7 +698,7 @@ describe("RunEngine", () => { //create an attempt const attemptResult = await engine.startRunAttempt({ runId: dequeued[0].run.id, - snapshotId: dequeued[0].execution.id, + snapshotId: dequeued[0].snapshot.id, }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); From 176407b1609f8ca8cdf1dcc4804a33d6c10d0b8a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 13:07:19 +0000 Subject: [PATCH 111/485] rename complete attempt result --- internal-packages/run-engine/src/engine/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 92870e1b2d..9029f3a118 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -105,7 +105,7 @@ type TriggerParams = { seedMetadataType?: string; }; -type FailedAttemptResult = "COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"; +type CompleteAttemptResult = "COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"; const workerCatalog = { waitpointCompleteDateTime: { @@ -1026,7 +1026,7 @@ export class RunEngine { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - }): Promise { + }): Promise { switch (completion.ok) { case true: { return this.#attemptSucceeded({ runId, snapshotId, completion, tx: this.prisma }); From 5cb1fef6cd1809181d819cf2ae8d4c8a92a6e5f8 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 13:10:48 +0000 Subject: [PATCH 112/485] =?UTF-8?q?When=20an=20attempt=20fails=20snapshot?= =?UTF-8?q?=20goes=20to=20=E2=80=9CPENDING=5FEXECUTING=E2=80=9D.=20Fixes?= =?UTF-8?q?=20for=20the=20attempt=20number?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../run-engine/src/engine/index.test.ts | 14 +++++++++++--- internal-packages/run-engine/src/engine/index.ts | 11 +++++------ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 53fdb97f1d..70dc0c5df3 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -421,14 +421,22 @@ describe("RunEngine", () => { //state should be completed const executionData3 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData3.run.attemptNumber).toBe(2); + expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //only when the new attempt is created, should the attempt be increased + expect(executionData3.run.attemptNumber).toBe(1); expect(executionData3.run.status).toBe("RETRYING_AFTER_FAILURE"); + //create a second attempt + const attemptResult2 = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: executionData3.snapshot.id, + }); + expect(attemptResult2.run.attemptNumber).toBe(2); + //now complete it successfully const result2 = await engine.completeRunAttempt({ runId: dequeued[0].run.id, - snapshotId: executionData3.snapshot.id, + snapshotId: attemptResult2.snapshot.id, completion: { ok: true, id: dequeued[0].run.id, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 9029f3a118..462c2ab11d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1559,16 +1559,12 @@ export class RunEngine { ) { const retryAt = new Date(completion.retry.timestamp); - const attemptNumber = - latestSnapshot.attemptNumber === null ? 1 : latestSnapshot.attemptNumber + 1; - const run = await prisma.taskRun.update({ where: { id: runId, }, data: { status: "RETRYING_AFTER_FAILURE", - attemptNumber, }, include: { runtimeEnvironment: { @@ -1581,11 +1577,14 @@ export class RunEngine { }, }); + const nextAttemptNumber = + latestSnapshot.attemptNumber === null ? 1 : latestSnapshot.attemptNumber + 1; + this.eventBus.emit("runRetryScheduled", { time: failedAt, run: { id: run.id, - attemptNumber, + attemptNumber: nextAttemptNumber, queue: run.queue, taskIdentifier: run.taskIdentifier, traceContext: run.traceContext as Record, @@ -1615,7 +1614,7 @@ export class RunEngine { await this.#createExecutionSnapshot(prisma, { run, snapshot: { - executionStatus: "EXECUTING", + executionStatus: "PENDING_EXECUTING", description: "Attempt failed wth a short delay, starting a new attempt.", }, }); From 318d3e0749731de8ed9425a62a8b93fa1de13d49 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 18:02:54 +0000 Subject: [PATCH 113/485] Cancelling a run. Heartbeat function exposed --- .../run-engine/src/engine/eventBus.ts | 10 + .../run-engine/src/engine/index.test.ts | 142 ++++++++++- .../run-engine/src/engine/index.ts | 238 +++++++++++++----- 3 files changed, 329 insertions(+), 61 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 460ef9617d..3d541c924c 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -49,6 +49,16 @@ export type EventBusEvents = { retryAt: Date; }, ]; + runCancelled: [ + { + time: Date; + run: { + id: string; + spanId: string; + error: TaskRunError; + }; + }, + ]; executionSnapshotCreated: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 70dc0c5df3..e48b0cd993 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -739,16 +739,142 @@ describe("RunEngine", () => { //todo heartbeats - //todo failing a run + containerTest( + "Cancelling a run with children (that is executing)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //todo cancelling a run + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //cancel + const result = await engine.cancelRun({ + runId: run.id, + completedAt: new Date(), + reason: "Cancelled by the user", + }); + expect(result).toBe("PENDING_CANCEL"); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("PENDING_CANCEL"); + expect(executionData?.run.status).toBe("CANCELED"); + + let cancelledEventData: EventBusEventArgs<"runCancelled">[0] | undefined = undefined; + engine.eventBus.on("runCancelled", (result) => { + cancelledEventData = result; + }); + + //todo call completeAttempt (this will happen from the worker) + const completeResult = await engine.completeRunAttempt({ + runId: run.id, + snapshotId: executionData!.snapshot.id, + completion: { + ok: false, + id: executionData!.run.id, + error: { + type: "INTERNAL_ERROR" as const, + code: "TASK_RUN_CANCELLED" as const, + }, + }, + }); + + //should now be fully cancelled + const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); + expect(executionDataAfter?.snapshot.executionStatus).toBe("FINISHED"); + expect(executionDataAfter?.run.status).toBe("CANCELED"); + + //check emitted event + assertNonNullable(cancelledEventData); + const assertedExpiredEventData = cancelledEventData as EventBusEventArgs<"runCancelled">[0]; + expect(assertedExpiredEventData.run.spanId).toBe(run.spanId); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); + } + } + ); + + //todo cancelling a run (not executing) + + //todo bulk cancelling runs //todo crashed run //todo system failure run - //todo delaying a run - containerTest("Run delayed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + containerTest("Run start delayed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -829,6 +955,8 @@ describe("RunEngine", () => { } }); + //todo extending the delay of a run + //todo expiring a run containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment @@ -915,6 +1043,12 @@ describe("RunEngine", () => { expect(executionData2.snapshot.executionStatus).toBe("FINISHED"); expect(executionData2.run.attemptNumber).toBe(undefined); expect(executionData2.run.status).toBe("EXPIRED"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); } finally { engine.quit(); } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 462c2ab11d..b1386018ce 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -28,6 +28,7 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, + TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -46,7 +47,7 @@ import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { DequeuedMessage, RunExecutionData } from "./messages"; -import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; +import { isCancellable, isDequeueableExecutionStatus, isExecuting } from "./statuses"; import { runStatusFromError } from "./errors"; type Options = { @@ -127,6 +128,14 @@ const workerCatalog = { }), visibilityTimeoutMs: 5000, }, + cancelRun: { + schema: z.object({ + runId: z.string(), + completedAt: z.coerce.date(), + reason: z.string().optional(), + }), + visibilityTimeoutMs: 5000, + }, }; type EngineWorker = Worker; @@ -175,6 +184,13 @@ export class RunEngine { expireRun: async ({ payload }) => { await this.expireRun({ runId: payload.runId }); }, + cancelRun: async ({ payload }) => { + await this.cancelRun({ + runId: payload.runId, + completedAt: payload.completedAt, + reason: payload.reason, + }); + }, }, }); @@ -1191,26 +1207,126 @@ export class RunEngine { }); } + /** + Call this to cancel a run. + If the run is in-progress it will change it's state to PENDING_CANCEL and notify the worker. + If the run is not in-progress it will finish it. + You can pass `finalizeRun` in if you know it's no longer running, e.g. the worker has messaged to say it's done. + */ async cancelRun({ runId, completedAt, reason, + finalizeRun, tx, }: { runId: string; - completedAt: Date; - reason: string; + completedAt?: Date; + reason?: string; + finalizeRun?: boolean; tx?: PrismaClientOrTransaction; - }) { + }): Promise<"FINISHED" | "PENDING_CANCEL"> { const prisma = tx ?? this.prisma; + reason = reason ?? "Cancelled by user"; return this.#trace("cancelRun", { runId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(this.prisma, runId); + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + + //already finished, do nothing + if (latestSnapshot.executionStatus === "FINISHED") { + return "FINISHED" as const; + } + + //is pending cancellation and we're not finalizing, alert the worker again + if (latestSnapshot.executionStatus === "PENDING_CANCEL" && !finalizeRun) { + await this.#sendRunChangedNotificationToWorker({ runId }); + return "PENDING_CANCEL" as const; + } + + //set the run to cancelled immediately + const error: TaskRunError = { + type: "STRING_ERROR", + raw: reason, + }; + + const run = await prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "CANCELED", + completedAt: finalizeRun ? completedAt ?? new Date() : completedAt, + error, + }, + include: { + runtimeEnvironment: true, + associatedWaitpoint: true, + childRuns: { + select: { + id: true, + }, + }, + }, + }); + + //remove it from the queue and release concurrency + await this.runQueue.acknowledgeMessage(run.runtimeEnvironment.organizationId, runId); + + //if executing, we need to message the worker to cancel the run and put it into `PENDING_CANCEL` status + if (isExecuting(latestSnapshot.executionStatus)) { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "PENDING_CANCEL", + description: "Run was cancelled", + }, + }); + + await this.#sendRunChangedNotificationToWorker({ runId }); + return "PENDING_CANCEL" as const; + } + + //not executing, so we will actually finish the run + await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "FINISHED", + description: "Run was cancelled, not finished", + }, + }); + + if (!run.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + //complete the waitpoint so the parent run can continue + await this.completeWaitpoint({ + id: run.associatedWaitpoint.id, + output: { value: JSON.stringify(error), isError: true }, + }); + + this.eventBus.emit("runCancelled", { + time: new Date(), + run: { + id: run.id, + spanId: run.spanId, + error, + }, + }); + + //schedule the cancellation of all the child runs + //it will call this function for each child, + //which will recursively cancel all children if they need to be + if (run.childRuns.length > 0) { + for (const childRun of run.childRuns) { + await this.worker.enqueue({ + id: `cancelRun:${childRun.id}`, + job: "cancelRun", + payload: { runId: childRun.id, completedAt: run.completedAt ?? new Date(), reason }, + }); + } + } - //todo - //ack - //complete waitpoint + return "FINISHED" as const; }); }); } @@ -1347,6 +1463,36 @@ export class RunEngine { }); } + /** + Send a heartbeat to signal the the run is still executing. + If a heartbeat isn't received, after a while the run is considered "stalled" + and some logic will be run to try recover it + */ + async heartbeatRun({ + runId, + snapshotId, + tx, + }: { + runId: string; + snapshotId: string; + tx?: PrismaClientOrTransaction; + }) { + const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); + if (latestSnapshot.id !== snapshotId) { + this.logger.log("heartbeatRun no longer the latest snapshot, stopping the heartbeat.", { + runId, + snapshotId, + latestSnapshot: latestSnapshot, + }); + + await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); + return; + } + + //it's the same as creating a new heartbeat + await this.#setHeartbeatDeadline({ runId, snapshotId, status: latestSnapshot.executionStatus }); + } + /** Get required data to execute the run */ async getRunExecutionData({ runId, @@ -1542,7 +1688,8 @@ export class RunEngine { await this.cancelRun({ runId, completedAt: failedAt, - reason: "Cancelled by user", + reason: completion.error.message, + finalizeRun: true, tx: prisma, }); return "COMPLETED" as const; @@ -1936,31 +2083,29 @@ export class RunEngine { runId: string; snapshotId: string; }) { + await this.#setHeartbeatDeadline({ + runId, + snapshotId, + status, + }); + } + + #getHeartbeatInterval(status: TaskRunExecutionStatus): number | null { switch (status) { case "RUN_CREATED": case "FINISHED": case "BLOCKED_BY_WAITPOINTS": case "QUEUED": { //we don't need to heartbeat these statuses - break; + return null; } case "PENDING_EXECUTING": case "PENDING_CANCEL": { - await this.#startHeartbeating({ - runId, - snapshotId, - intervalSeconds: 60, - }); - break; + return 60; } case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { - await this.#startHeartbeating({ - runId, - snapshotId, - intervalSeconds: 60 * 15, - }); - break; + return 60 * 15; } default: { assertNever(status); @@ -2028,15 +2173,21 @@ export class RunEngine { //#endregion //#region Heartbeat - async #startHeartbeating({ + async #setHeartbeatDeadline({ runId, snapshotId, - intervalSeconds, + status, }: { runId: string; snapshotId: string; - intervalSeconds: number; + status: TaskRunExecutionStatus; }) { + const intervalSeconds = this.#getHeartbeatInterval(status); + + if (intervalSeconds === null) { + return; + } + await this.worker.enqueue({ id: `heartbeatSnapshot.${snapshotId}`, job: "heartbeatSnapshot", @@ -2045,36 +2196,6 @@ export class RunEngine { }); } - async #extendHeartbeatTimeout({ - runId, - snapshotId, - intervalSeconds, - tx, - }: { - runId: string; - snapshotId: string; - intervalSeconds: number; - tx?: PrismaClientOrTransaction; - }) { - const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); - if (latestSnapshot.id !== snapshotId) { - this.logger.log( - "RunEngine.#extendHeartbeatTimeout() no longer the latest snapshot, stopping the heartbeat.", - { - runId, - snapshotId, - latestSnapshot: latestSnapshot, - } - ); - - await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); - return; - } - - //it's the same as creating a new heartbeat - await this.#startHeartbeating({ runId, snapshotId, intervalSeconds }); - } - async #handleStalledSnapshot({ runId, snapshotId, @@ -2133,8 +2254,11 @@ export class RunEngine { throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); } case "PENDING_CANCEL": { - //we need to check if the run is still pending cancel - throw new NotImplementedError("Not implemented PENDING_CANCEL"); + await this.cancelRun({ + runId: latestSnapshot.runId, + finalizeRun: true, + tx, + }); } case "FINISHED": { //we need to check if the run is still finished From 29e8228eee9e9634636d5579f338a075641374af Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 6 Nov 2024 18:46:31 +0000 Subject: [PATCH 114/485] Cancelling test now includes a running child task --- .../run-engine/src/engine/index.test.ts | 121 +++++++++++++++--- 1 file changed, 103 insertions(+), 18 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index e48b0cd993..2a4c49bf30 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -586,10 +586,23 @@ describe("RunEngine", () => { expect(runWaitpoint.waitpoint.type).toBe("RUN"); expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + //dequeue the child run + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: childRun.masterQueue, + maxRunCount: 10, + }); + + //start the child run + const childAttempt = await engine.startRunAttempt({ + runId: childRun.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + // complete the child run await engine.completeRunAttempt({ runId: childRun.id, - snapshotId: childExecutionData.snapshot.id, + snapshotId: childAttempt.snapshot.id, completion: { id: childRun.id, ok: true, @@ -775,17 +788,19 @@ describe("RunEngine", () => { }); try { - const taskIdentifier = "test-task"; + const parentTask = "parent-task"; + const childTask = "child-task"; + //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); //trigger the run - const run = await engine.trigger( + const parentRun = await engine.trigger( { number: 1, friendlyId: "run_p1234", environment: authenticatedEnvironment, - taskIdentifier, + taskIdentifier: parentTask, payload: "{}", payloadType: "application/json", context: {}, @@ -793,7 +808,7 @@ describe("RunEngine", () => { traceId: "t12345", spanId: "s12345", masterQueue: "main", - queueName: "task/test-task", + queueName: `task/${parentTask}`, isTest: false, tags: [], }, @@ -803,7 +818,7 @@ describe("RunEngine", () => { //dequeue the run const dequeued = await engine.dequeueFromMasterQueue({ consumerId: "test_12345", - masterQueue: run.masterQueue, + masterQueue: parentRun.masterQueue, maxRunCount: 10, }); @@ -814,26 +829,62 @@ describe("RunEngine", () => { }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - //cancel + //start child run + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); + + //dequeue the child run + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: childRun.masterQueue, + maxRunCount: 10, + }); + + //start the child run + const childAttempt = await engine.startRunAttempt({ + runId: childRun.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + + //cancel the parent run const result = await engine.cancelRun({ - runId: run.id, + runId: parentRun.id, completedAt: new Date(), reason: "Cancelled by the user", }); expect(result).toBe("PENDING_CANCEL"); - const executionData = await engine.getRunExecutionData({ runId: run.id }); + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); expect(executionData?.snapshot.executionStatus).toBe("PENDING_CANCEL"); expect(executionData?.run.status).toBe("CANCELED"); - let cancelledEventData: EventBusEventArgs<"runCancelled">[0] | undefined = undefined; + let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; engine.eventBus.on("runCancelled", (result) => { - cancelledEventData = result; + cancelledEventData.push(result); }); //todo call completeAttempt (this will happen from the worker) const completeResult = await engine.completeRunAttempt({ - runId: run.id, + runId: parentRun.id, snapshotId: executionData!.snapshot.id, completion: { ok: false, @@ -845,15 +896,49 @@ describe("RunEngine", () => { }, }); - //should now be fully cancelled - const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); + //parent should now be fully cancelled + const executionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); expect(executionDataAfter?.snapshot.executionStatus).toBe("FINISHED"); expect(executionDataAfter?.run.status).toBe("CANCELED"); + const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); + assertNonNullable(parentEvent); + expect(parentEvent.run.spanId).toBe(parentRun.spanId); + + //cancelling children is async, so we need to wait a brief moment + await setTimeout(200); + + //child should now be pending cancel + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + expect(childExecutionDataAfter?.snapshot.executionStatus).toBe("PENDING_CANCEL"); + expect(childExecutionDataAfter?.run.status).toBe("CANCELED"); + + //cancel the child (this will come from the worker) + const completeChildResult = await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childExecutionDataAfter!.snapshot.id, + completion: { + ok: false, + id: childRun.id, + error: { + type: "INTERNAL_ERROR" as const, + code: "TASK_RUN_CANCELLED" as const, + }, + }, + }); + + //child should now be pending cancel + const childExecutionDataCancelled = await engine.getRunExecutionData({ + runId: childRun.id, + }); + expect(childExecutionDataCancelled?.snapshot.executionStatus).toBe("FINISHED"); + expect(childExecutionDataCancelled?.run.status).toBe("CANCELED"); + //check emitted event - assertNonNullable(cancelledEventData); - const assertedExpiredEventData = cancelledEventData as EventBusEventArgs<"runCancelled">[0]; - expect(assertedExpiredEventData.run.spanId).toBe(run.spanId); + expect(cancelledEventData.length).toBe(2); + const childEvent = cancelledEventData.find((r) => r.run.id === childRun.id); + assertNonNullable(childEvent); + expect(childEvent.run.spanId).toBe(childRun.spanId); //concurrency should have been released const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( From 3668cf516bcfcad68da79965cd5986375b512585 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 15:09:58 +0000 Subject: [PATCH 115/485] don't await the unawaitable --- packages/cli-v3/src/entryPoints/deploy-index-worker.ts | 2 +- packages/cli-v3/src/entryPoints/dev-index-worker.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/cli-v3/src/entryPoints/deploy-index-worker.ts b/packages/cli-v3/src/entryPoints/deploy-index-worker.ts index 73de86535d..491f8973ae 100644 --- a/packages/cli-v3/src/entryPoints/deploy-index-worker.ts +++ b/packages/cli-v3/src/entryPoints/deploy-index-worker.ts @@ -169,7 +169,7 @@ await sendMessageInCatalog( "TASKS_FAILED_TO_PARSE", { zodIssues: err.error.issues, tasks }, async (msg) => { - await process.send?.(msg); + process.send?.(msg); } ); } else { diff --git a/packages/cli-v3/src/entryPoints/dev-index-worker.ts b/packages/cli-v3/src/entryPoints/dev-index-worker.ts index 9e6e8e05e9..2ef18444eb 100644 --- a/packages/cli-v3/src/entryPoints/dev-index-worker.ts +++ b/packages/cli-v3/src/entryPoints/dev-index-worker.ts @@ -153,7 +153,7 @@ await sendMessageInCatalog( "TASKS_FAILED_TO_PARSE", { zodIssues: err.error.issues, tasks }, async (msg) => { - await process.send?.(msg); + process.send?.(msg); } ); } else { From a0dcd785fc16b46396cffc532b11f6ae22ca2382 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:22:57 +0000 Subject: [PATCH 116/485] simplify image build and add node-22 runtime --- packages/cli-v3/src/deploy/buildImage.ts | 71 +++++++++++++++--------- packages/core/src/v3/schemas/build.ts | 2 +- 2 files changed, 46 insertions(+), 27 deletions(-) diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index 62c2da2844..fa626f29ff 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -434,11 +434,19 @@ export type GenerateContainerfileOptions = { entrypoint: string; }; +const BASE_IMAGE: Record = { + bun: "imbios/bun-node:1.1.24-22-slim@sha256:9cfb7cd87529261c482fe17d8894c0986263f3a5ccf84ad65c00ec0e1ed539c6", + node: "node:21-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170", + "node-22": + "node:22-bookworm-slim@sha256:f73e9c70d4279d5e7b7cc1fe307c5de18b61089ffa2235230408dfb14e2f09a0", +}; + const DEFAULT_PACKAGES = ["busybox", "ca-certificates", "dumb-init", "git", "openssl"]; export async function generateContainerfile(options: GenerateContainerfileOptions) { switch (options.runtime) { - case "node": { + case "node": + case "node-22": { return await generateNodeContainerfile(options); } case "bun": { @@ -447,7 +455,7 @@ export async function generateContainerfile(options: GenerateContainerfileOption } } -async function generateBunContainerfile(options: GenerateContainerfileOptions) { +const parseGenerateOptions = (options: GenerateContainerfileOptions) => { const buildArgs = Object.entries(options.build.env || {}) .flatMap(([key]) => `ARG ${key}`) .join("\n"); @@ -463,19 +471,38 @@ async function generateBunContainerfile(options: GenerateContainerfileOptions) { " " ); + return { + baseImage: BASE_IMAGE[options.runtime], + baseInstructions, + buildArgs, + buildEnvVars, + packages, + postInstallCommands, + }; +}; + +async function generateBunContainerfile(options: GenerateContainerfileOptions) { + const { baseImage, buildArgs, buildEnvVars, postInstallCommands, baseInstructions, packages } = + parseGenerateOptions(options); + return `# syntax=docker/dockerfile:1 -FROM imbios/bun-node:1.1.24-22-slim@sha256:9cfb7cd87529261c482fe17d8894c0986263f3a5ccf84ad65c00ec0e1ed539c6 AS base +FROM ${baseImage} AS base ${baseInstructions} ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get --fix-broken install -y && apt-get install -y --no-install-recommends ${packages} && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get --fix-broken install -y && \ + apt-get install -y --no-install-recommends ${packages} && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* FROM base AS build -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3 make g++ && \ - apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get install -y --no-install-recommends python3 make g++ && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* USER bun WORKDIR /app @@ -553,35 +580,27 @@ CMD [] } async function generateNodeContainerfile(options: GenerateContainerfileOptions) { - const buildArgs = Object.entries(options.build.env || {}) - .flatMap(([key]) => `ARG ${key}`) - .join("\n"); - - const buildEnvVars = Object.entries(options.build.env || {}) - .flatMap(([key]) => `ENV ${key}=$${key}`) - .join("\n"); - - const postInstallCommands = (options.build.commands || []).map((cmd) => `RUN ${cmd}`).join("\n"); - - const baseInstructions = (options.image?.instructions || []).join("\n"); - const packages = Array.from(new Set(DEFAULT_PACKAGES.concat(options.image?.pkgs || []))).join( - " " - ); + const { baseImage, buildArgs, buildEnvVars, postInstallCommands, baseInstructions, packages } = + parseGenerateOptions(options); return `# syntax=docker/dockerfile:1 -FROM node:21-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170 AS base +FROM ${baseImage} AS base ${baseInstructions} ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get --fix-broken install -y && apt-get install -y --no-install-recommends ${packages} && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get --fix-broken install -y && \ + apt-get install -y --no-install-recommends ${packages} && \ + apt-get clean && rm -rf /var/lib/apt/lists/* FROM base AS build # Install build dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3 make g++ && \ - apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && \ + apt-get install -y --no-install-recommends python3 make g++ && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* USER node WORKDIR /app diff --git a/packages/core/src/v3/schemas/build.ts b/packages/core/src/v3/schemas/build.ts index 83518b5341..de1a1b35f7 100644 --- a/packages/core/src/v3/schemas/build.ts +++ b/packages/core/src/v3/schemas/build.ts @@ -13,7 +13,7 @@ export const BuildTarget = z.enum(["dev", "deploy"]); export type BuildTarget = z.infer; -export const BuildRuntime = z.enum(["node", "bun"]); +export const BuildRuntime = z.enum(["node", "node-22", "bun"]); export type BuildRuntime = z.infer; From cba4fcabc7062a051149996e764d1e1f503ad986 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:06:42 +0000 Subject: [PATCH 117/485] new worker action api routes --- .../routes/api.v1.worker.attempt.complete.ts | 23 +++++ .../app/routes/api.v1.worker.attempt.start.ts | 17 ++++ .../app/routes/api.v1.worker.heartbeat.ts | 2 +- .../routeBuiilders/apiBuilder.server.ts | 86 ++++++++++++++++++- .../worker/workerGroupTokenService.server.ts | 23 ++++- 5 files changed, 146 insertions(+), 5 deletions(-) create mode 100644 apps/webapp/app/routes/api.v1.worker.attempt.complete.ts create mode 100644 apps/webapp/app/routes/api.v1.worker.attempt.start.ts diff --git a/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts b/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts new file mode 100644 index 0000000000..22b89338ad --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts @@ -0,0 +1,23 @@ +import { json } from "@remix-run/server-runtime"; +import { TaskRunExecutionResult } from "@trigger.dev/core/v3"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createActionWorkerApiRoute( + { + body: z.object({ + runId: z.string(), + snapshotId: z.string(), + completion: TaskRunExecutionResult, + }), + }, + async ({ authenticatedWorker, body }) => { + const { runId, snapshotId, completion } = body; + const completeResult = await authenticatedWorker.completeRunAttempt({ + runId, + snapshotId, + completion, + }); + return json({ completeResult }); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker.attempt.start.ts b/apps/webapp/app/routes/api.v1.worker.attempt.start.ts new file mode 100644 index 0000000000..abd18b864f --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker.attempt.start.ts @@ -0,0 +1,17 @@ +import { json } from "@remix-run/server-runtime"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createActionWorkerApiRoute( + { + body: z.object({ + runId: z.string(), + snapshotId: z.string(), + }), + }, + async ({ authenticatedWorker, body }) => { + const { runId, snapshotId } = body; + const runExecutionData = await authenticatedWorker.startRunAttempt({ runId, snapshotId }); + return json(runExecutionData); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker.heartbeat.ts index a10c1e9cc8..cf7358e19f 100644 --- a/apps/webapp/app/routes/api.v1.worker.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker.heartbeat.ts @@ -2,6 +2,6 @@ import { json } from "@remix-run/server-runtime"; import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; export const loader = createLoaderWorkerApiRoute({}, async ({ authenticatedWorker }) => { - await authenticatedWorker.heartbeat(); + await authenticatedWorker.heartbeatWorkerInstance(); return json({ ok: true }); }); diff --git a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts index 978b6b260c..d47f7f7eec 100644 --- a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts +++ b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts @@ -1,6 +1,6 @@ import { z } from "zod"; import { ApiAuthenticationResult, authenticateApiRequest } from "../apiAuth.server"; -import { json, LoaderFunctionArgs } from "@remix-run/server-runtime"; +import { json, LoaderFunctionArgs, ActionFunctionArgs } from "@remix-run/server-runtime"; import { fromZodError } from "zod-validation-error"; import { apiCors } from "~/utils/apiCors"; import { @@ -265,15 +265,18 @@ function wrapResponse(request: Request, response: Response, useCors: boolean) { type WorkerRouteBuilderOptions< TParamsSchema extends z.AnyZodObject | undefined = undefined, - TSearchParamsSchema extends z.AnyZodObject | undefined = undefined + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined > = { params?: TParamsSchema; searchParams?: TSearchParamsSchema; + body?: TBodySchema; }; type WorkerHandlerFunction< TParamsSchema extends z.AnyZodObject | undefined, - TSearchParamsSchema extends z.AnyZodObject | undefined + TSearchParamsSchema extends z.AnyZodObject | undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined > = (args: { params: TParamsSchema extends z.AnyZodObject ? z.infer : undefined; searchParams: TSearchParamsSchema extends z.AnyZodObject @@ -281,6 +284,7 @@ type WorkerHandlerFunction< : undefined; authenticatedWorker: AuthenticatedWorkerInstance; request: Request; + body: TBodySchema extends z.AnyZodObject ? z.infer : undefined; }) => Promise; export function createLoaderWorkerApiRoute< @@ -331,6 +335,82 @@ export function createLoaderWorkerApiRoute< searchParams: parsedSearchParams, authenticatedWorker: authenticationResult, request, + body: undefined, + }); + return result; + } catch (error) { + console.error("Error in API route:", error); + if (error instanceof Response) { + return error; + } + return json({ error: "Internal Server Error" }, { status: 500 }); + } + }; +} + +export function createActionWorkerApiRoute< + TParamsSchema extends z.AnyZodObject | undefined = undefined, + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined +>( + options: WorkerRouteBuilderOptions, + handler: WorkerHandlerFunction +) { + return async function action({ request, params }: ActionFunctionArgs) { + const { params: paramsSchema, searchParams: searchParamsSchema, body: bodySchema } = options; + + const service = new WorkerGroupTokenService(); + const authenticationResult = await service.authenticate(request); + + if (!authenticationResult) { + return json({ error: "Invalid or missing worker token" }, { status: 401 }); + } + + let parsedParams: any = undefined; + if (paramsSchema) { + const parsed = paramsSchema.safeParse(params); + if (!parsed.success) { + return json( + { error: "Params Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ); + } + parsedParams = parsed.data; + } + + let parsedSearchParams: any = undefined; + if (searchParamsSchema) { + const searchParams = Object.fromEntries(new URL(request.url).searchParams); + const parsed = searchParamsSchema.safeParse(searchParams); + if (!parsed.success) { + return json( + { error: "Query Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ); + } + parsedSearchParams = parsed.data; + } + + let parsedBody: any = undefined; + if (bodySchema) { + const body = await request.clone().json(); + const parsed = bodySchema.safeParse(body); + if (!parsed.success) { + return json( + { error: "Body Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ); + } + parsedBody = parsed.data; + } + + try { + const result = await handler({ + params: parsedParams, + searchParams: parsedSearchParams, + authenticatedWorker: authenticationResult, + request, + body: parsedBody, }); return result; } catch (error) { diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 5e1273e6c4..e00a928b07 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -6,6 +6,7 @@ import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/datab import { z } from "zod"; import { HEADER_NAME } from "@trigger.dev/worker"; import { DequeuedMessage } from "@internal/run-engine/engine/messages"; +import { TaskRunExecutionResult } from "@trigger.dev/core/v3"; export class WorkerGroupTokenService extends WithRunEngine { private readonly tokenPrefix = "tr_wgt_"; @@ -368,7 +369,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } - async heartbeat() { + async heartbeatWorkerInstance() { await this._prisma.workerInstance.update({ where: { id: this.workerInstanceId, @@ -379,6 +380,26 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } + async heartbeatRun({ runId, snapshotId }: { runId: string; snapshotId: string }) { + // await this._engine.heartbeatRun({ runId, snapshotId }); + } + + async startRunAttempt({ runId, snapshotId }: { runId: string; snapshotId: string }) { + return await this._engine.startRunAttempt({ runId, snapshotId }); + } + + async completeRunAttempt({ + runId, + snapshotId, + completion, + }: { + runId: string; + snapshotId: string; + completion: TaskRunExecutionResult; + }) { + return await this._engine.completeRunAttempt({ runId, snapshotId, completion }); + } + toJSON(): WorkerGroupTokenAuthenticationResponse { if (this.type === WorkerInstanceGroupType.SHARED) { return { From b10094fe84026bed113e96a8128f2c40b9debae3 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:46:57 +0000 Subject: [PATCH 118/485] deprecate several context props --- .../run-engine/src/engine/index.ts | 25 ++++++++++------- packages/core/src/v3/schemas/common.ts | 27 +++++++++++++++---- 2 files changed, 38 insertions(+), 14 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index b1386018ce..4da75361ca 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -973,29 +973,36 @@ export class RunEngine { attempt: { number: nextAttemptNumber, startedAt: latestSnapshot.updatedAt, - //todo deprecate everything below - id: generateFriendlyId("attempt"), - backgroundWorkerId: run.lockedBy!.worker.id, - backgroundWorkerTaskId: run.lockedBy!.id, - status: "EXECUTING" as const, + /** @deprecated */ + id: "deprecated", + /** @deprecated */ + backgroundWorkerId: "deprecated", + /** @deprecated */ + backgroundWorkerTaskId: "deprecated", + /** @deprecated */ + status: "deprecated", }, run: { id: run.friendlyId, payload: run.payload, payloadType: run.payloadType, - context: run.context, createdAt: run.createdAt, tags: run.tags.map((tag) => tag.name), isTest: run.isTest, idempotencyKey: run.idempotencyKey ?? undefined, startedAt: run.startedAt ?? run.createdAt, - durationMs: run.usageDurationMs, - costInCents: run.costInCents, - baseCostInCents: run.baseCostInCents, maxAttempts: run.maxAttempts ?? undefined, version: run.lockedBy!.worker.version, metadata, maxDuration: run.maxDurationInSeconds ?? undefined, + /** @deprecated */ + context: undefined, + /** @deprecated */ + durationMs: run.usageDurationMs, + /** @deprecated */ + costInCents: run.costInCents, + /** @deprecated */ + baseCostInCents: run.baseCostInCents, }, queue: { id: queue.friendlyId, diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 7bf584d14f..66b48ef9da 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -130,19 +130,32 @@ export const TaskRun = z.object({ id: z.string(), payload: z.string(), payloadType: z.string(), - context: z.any(), tags: z.array(z.string()), isTest: z.boolean().default(false), createdAt: z.coerce.date(), startedAt: z.coerce.date().default(() => new Date()), idempotencyKey: z.string().optional(), maxAttempts: z.number().optional(), - durationMs: z.number().default(0), - costInCents: z.number().default(0), - baseCostInCents: z.number().default(0), version: z.string().optional(), metadata: z.record(DeserializedJsonSchema).optional(), maxDuration: z.number().optional(), + /** @deprecated */ + context: z.any(), + /** + * @deprecated For live values use the `usage` SDK functions + * @link https://trigger.dev/docs/run-usage + */ + durationMs: z.number().default(0), + /** + * @deprecated For live values use the `usage` SDK functions + * @link https://trigger.dev/docs/run-usage + */ + costInCents: z.number().default(0), + /** + * @deprecated For live values use the `usage` SDK functions + * @link https://trigger.dev/docs/run-usage + */ + baseCostInCents: z.number().default(0), }); export type TaskRun = z.infer; @@ -156,11 +169,15 @@ export const TaskRunExecutionTask = z.object({ export type TaskRunExecutionTask = z.infer; export const TaskRunExecutionAttempt = z.object({ - id: z.string(), number: z.number(), startedAt: z.coerce.date(), + /** @deprecated */ + id: z.string(), + /** @deprecated */ backgroundWorkerId: z.string(), + /** @deprecated */ backgroundWorkerTaskId: z.string(), + /** @deprecated */ status: z.string(), }); From f14aa044e1a69ba1fcb4a506205e4f31d8e6ac72 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 10:32:43 +0000 Subject: [PATCH 119/485] Fix for bad import --- internal-packages/run-engine/src/engine/index.test.ts | 2 ++ internal-packages/run-engine/src/engine/index.ts | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 2a4c49bf30..c18b97a362 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -901,6 +901,8 @@ describe("RunEngine", () => { expect(executionDataAfter?.snapshot.executionStatus).toBe("FINISHED"); expect(executionDataAfter?.run.status).toBe("CANCELED"); + //check emitted event + expect(cancelledEventData.length).toBe(1); const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); assertNonNullable(parentEvent); expect(parentEvent.run.spanId).toBe(parentRun.spanId); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 4da75361ca..303095a336 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -47,7 +47,7 @@ import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { DequeuedMessage, RunExecutionData } from "./messages"; -import { isCancellable, isDequeueableExecutionStatus, isExecuting } from "./statuses"; +import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; import { runStatusFromError } from "./errors"; type Options = { From 9741512fec261877a3861bd0a3243aba25aab97c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 10:38:49 +0000 Subject: [PATCH 120/485] Test for cancelling a non-executing run --- .../run-engine/src/engine/index.test.ts | 103 +++++++++++++++++- 1 file changed, 102 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index c18b97a362..15f1cd41e4 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -953,7 +953,108 @@ describe("RunEngine", () => { } ); - //todo cancelling a run (not executing) + containerTest( + "Cancelling a run (not executing)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; + engine.eventBus.on("runCancelled", (result) => { + cancelledEventData.push(result); + }); + + //cancel the parent run + const result = await engine.cancelRun({ + runId: parentRun.id, + completedAt: new Date(), + reason: "Cancelled by the user", + }); + expect(result).toBe("FINISHED"); + + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); + expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData?.run.status).toBe("CANCELED"); + + //check emitted event + expect(cancelledEventData.length).toBe(1); + const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); + assertNonNullable(parentEvent); + expect(parentEvent.run.spanId).toBe(parentRun.spanId); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); + } + } + ); //todo bulk cancelling runs From d6da8283b9dc5e167d42e2dad1737cf2157ebaf6 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 7 Nov 2024 11:30:19 +0000 Subject: [PATCH 121/485] move image ref helper to core --- apps/webapp/app/v3/registryProxy.server.ts | 68 +-------------------- packages/core/src/v3/index.ts | 2 + packages/core/src/v3/utils/imageRef.ts | 69 ++++++++++++++++++++++ 3 files changed, 72 insertions(+), 67 deletions(-) create mode 100644 packages/core/src/v3/utils/imageRef.ts diff --git a/apps/webapp/app/v3/registryProxy.server.ts b/apps/webapp/app/v3/registryProxy.server.ts index b06ee89304..df38d6e6ac 100644 --- a/apps/webapp/app/v3/registryProxy.server.ts +++ b/apps/webapp/app/v3/registryProxy.server.ts @@ -13,6 +13,7 @@ import { mkdtemp } from "fs/promises"; import { createReadStream, createWriteStream } from "node:fs"; import { pipeline } from "node:stream/promises"; import { unlinkSync } from "fs"; +import { parseDockerImageReference, rebuildDockerImageReference } from "@trigger.dev/core/v3"; const TokenResponseBody = z.object({ token: z.string(), @@ -461,70 +462,3 @@ async function streamRequestBodyToTempFile(request: IncomingMessage): Promise 1) { - parts.digest = atSplit[1]; - imageReference = atSplit[0]; - } - - // Splitting by ':' to separate the tag (if exists) and to ensure it's not part of a port - let colonSplit = imageReference.split(":"); - if (colonSplit.length > 2 || (colonSplit.length === 2 && !colonSplit[1].includes("/"))) { - // It's a tag if there's no '/' in the second part (after colon), or there are more than 2 parts (implying a port number in registry) - parts.tag = colonSplit.pop(); // The last part is the tag - imageReference = colonSplit.join(":"); // Join back in case it was a port number - } - - // Check for registry - let slashIndex = imageReference.indexOf("/"); - if (slashIndex !== -1) { - let potentialRegistry = imageReference.substring(0, slashIndex); - // Validate if the first part is a valid hostname-like string (registry), otherwise treat the entire string as the repo - if ( - potentialRegistry.includes(".") || - potentialRegistry === "localhost" || - potentialRegistry.includes(":") - ) { - parts.registry = potentialRegistry; - parts.repo = imageReference.substring(slashIndex + 1); - } else { - parts.repo = imageReference; // No valid registry found, treat as repo - } - } else { - parts.repo = imageReference; // Only repo is present - } - - return parts; -} - -function rebuildDockerImageReference(parts: DockerImageParts): string { - let imageReference = ""; - - if (parts.registry) { - imageReference += `${parts.registry}/`; - } - - imageReference += parts.repo; // Repo is now guaranteed to be defined - - if (parts.tag) { - imageReference += `:${parts.tag}`; - } - - if (parts.digest) { - imageReference += `@${parts.digest}`; - } - - return imageReference; -} diff --git a/packages/core/src/v3/index.ts b/packages/core/src/v3/index.ts index cb94f42c84..8899dfc22b 100644 --- a/packages/core/src/v3/index.ts +++ b/packages/core/src/v3/index.ts @@ -58,6 +58,8 @@ export { type IOPacket, } from "./utils/ioSerialization.js"; +export * from "./utils/imageRef.js"; + export * from "./config.js"; export { getSchemaParseFn, type AnySchemaParseFn, type SchemaParseFn } from "./types/schemas.js"; diff --git a/packages/core/src/v3/utils/imageRef.ts b/packages/core/src/v3/utils/imageRef.ts new file mode 100644 index 0000000000..88efba0fdf --- /dev/null +++ b/packages/core/src/v3/utils/imageRef.ts @@ -0,0 +1,69 @@ +export type DockerImageParts = { + registry?: string; + repo: string; + tag?: string; + digest?: string; +}; + +export function parseDockerImageReference(imageReference: string): DockerImageParts { + const parts: DockerImageParts = { repo: "" }; // Initialize with an empty repo + + // Splitting by '@' to separate the digest (if exists) + const atSplit = imageReference.split("@"); + if (atSplit.length > 1) { + parts.digest = atSplit[1]; + imageReference = atSplit[0] as string; + } + + // Splitting by ':' to separate the tag (if exists) and to ensure it's not part of a port + let colonSplit = imageReference.split(":"); + if ( + colonSplit.length > 2 || + (colonSplit.length === 2 && !(colonSplit[1] as string).includes("/")) + ) { + // It's a tag if there's no '/' in the second part (after colon), or there are more than 2 parts (implying a port number in registry) + parts.tag = colonSplit.pop(); // The last part is the tag + imageReference = colonSplit.join(":"); // Join back in case it was a port number + } + + // Check for registry + let slashIndex = imageReference.indexOf("/"); + if (slashIndex !== -1) { + let potentialRegistry = imageReference.substring(0, slashIndex); + // Validate if the first part is a valid hostname-like string (registry), otherwise treat the entire string as the repo + if ( + potentialRegistry.includes(".") || + potentialRegistry === "localhost" || + potentialRegistry.includes(":") + ) { + parts.registry = potentialRegistry; + parts.repo = imageReference.substring(slashIndex + 1); + } else { + parts.repo = imageReference; // No valid registry found, treat as repo + } + } else { + parts.repo = imageReference; // Only repo is present + } + + return parts; +} + +export function rebuildDockerImageReference(parts: DockerImageParts): string { + let imageReference = ""; + + if (parts.registry) { + imageReference += `${parts.registry}/`; + } + + imageReference += parts.repo; // Repo is now guaranteed to be defined + + if (parts.tag) { + imageReference += `:${parts.tag}`; + } + + if (parts.digest) { + imageReference += `@${parts.digest}`; + } + + return imageReference; +} From aa9538d8c04833ac6597352fd17320b93253380b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 7 Nov 2024 11:34:44 +0000 Subject: [PATCH 122/485] unify bundling more --- packages/cli-v3/src/build/buildWorker.ts | 39 +++---------- packages/cli-v3/src/build/bundle.ts | 73 +++++++++++++++++++++++- packages/cli-v3/src/deploy/buildImage.ts | 71 ++++++++++++----------- packages/cli-v3/src/dev/devSession.ts | 63 +++----------------- packages/core/src/v3/build/runtime.ts | 2 +- 5 files changed, 125 insertions(+), 123 deletions(-) diff --git a/packages/cli-v3/src/build/buildWorker.ts b/packages/cli-v3/src/build/buildWorker.ts index 0ab03f1e61..6800f5a69b 100644 --- a/packages/cli-v3/src/build/buildWorker.ts +++ b/packages/cli-v3/src/build/buildWorker.ts @@ -3,7 +3,7 @@ import { DEFAULT_RUNTIME, ResolvedConfig } from "@trigger.dev/core/v3/build"; import { BuildManifest, BuildTarget } from "@trigger.dev/core/v3/schemas"; import { resolveFileSources } from "../utilities/sourceFiles.js"; import { VERSION } from "../version.js"; -import { BundleResult, bundleWorker } from "./bundle.js"; +import { BundleResult, bundleWorker, createBuildManifestFromBundle } from "./bundle.js"; import { createBuildContext, notifyExtensionOnBuildComplete, @@ -56,7 +56,7 @@ export async function buildWorker(options: BuildWorkerOptions) { resolvedConfig, options.forcedExternals ); - const buildContext = createBuildContext("deploy", resolvedConfig); + const buildContext = createBuildContext(options.target, resolvedConfig); buildContext.prependExtension(externalsExtension); await notifyExtensionOnBuildStart(buildContext); const pluginsFromExtensions = resolvePluginsForContext(buildContext); @@ -77,35 +77,14 @@ export async function buildWorker(options: BuildWorkerOptions) { options.listener?.onBundleComplete?.(bundleResult); - let buildManifest: BuildManifest = { - contentHash: bundleResult.contentHash, - runtime: resolvedConfig.runtime ?? DEFAULT_RUNTIME, + let buildManifest = await createBuildManifestFromBundle({ + bundle: bundleResult, + destination: options.destination, + resolvedConfig, environment: options.environment, - packageVersion: CORE_VERSION, - cliPackageVersion: VERSION, - target: "deploy", - files: bundleResult.files, - sources: await resolveFileSources(bundleResult.files, resolvedConfig), - config: { - project: resolvedConfig.project, - dirs: resolvedConfig.dirs, - }, - outputPath: options.destination, - runControllerEntryPoint: bundleResult.runControllerEntryPoint ?? deployRunController, - runWorkerEntryPoint: bundleResult.runWorkerEntryPoint ?? deployRunWorker, - indexControllerEntryPoint: bundleResult.indexControllerEntryPoint ?? deployIndexController, - indexWorkerEntryPoint: bundleResult.indexWorkerEntryPoint ?? deployIndexWorker, - loaderEntryPoint: bundleResult.loaderEntryPoint, - configPath: bundleResult.configPath, - customConditions: resolvedConfig.build.conditions ?? [], - deploy: { - env: options.envVars ? options.envVars : {}, - }, - build: {}, - otelImportHook: { - include: resolvedConfig.instrumentedPackageNames ?? [], - }, - }; + target: options.target, + envVars: options.envVars, + }); buildManifest = await notifyExtensionOnBuildComplete(buildContext, buildManifest); diff --git a/packages/cli-v3/src/build/bundle.ts b/packages/cli-v3/src/build/bundle.ts index 251e975e22..280c090dae 100644 --- a/packages/cli-v3/src/build/bundle.ts +++ b/packages/cli-v3/src/build/bundle.ts @@ -1,5 +1,5 @@ -import { ResolvedConfig } from "@trigger.dev/core/v3/build"; -import { BuildTarget, TaskFile } from "@trigger.dev/core/v3/schemas"; +import { DEFAULT_RUNTIME, ResolvedConfig } from "@trigger.dev/core/v3/build"; +import { BuildManifest, BuildTarget, TaskFile } from "@trigger.dev/core/v3/schemas"; import * as esbuild from "esbuild"; import { createHash } from "node:crypto"; import { join, relative, resolve } from "node:path"; @@ -7,7 +7,13 @@ import { createFile } from "../utilities/fileSystem.js"; import { logger } from "../utilities/logger.js"; import { deployEntryPoints, + deployIndexController, + deployIndexWorker, + deployRunController, + deployRunWorker, devEntryPoints, + devIndexWorker, + devRunWorker, isIndexControllerForTarget, isIndexWorkerForTarget, isLoaderEntryPoint, @@ -17,6 +23,10 @@ import { telemetryEntryPoint, } from "./packageModules.js"; import { buildPlugins } from "./plugins.js"; +import { VERSION } from "../../../core/src/version.js"; +import { CORE_VERSION } from "@trigger.dev/core/v3"; +import { resolveFileSources } from "../utilities/sourceFiles.js"; +import { copyManifestToDir } from "./manifests.js"; export interface BundleOptions { target: BuildTarget; @@ -268,3 +278,62 @@ export function logBuildFailure(errors: esbuild.Message[], warnings: esbuild.Mes } logBuildWarnings(warnings); } + +export async function createBuildManifestFromBundle({ + bundle, + destination, + resolvedConfig, + workerDir, + environment, + target, + envVars, +}: { + bundle: BundleResult; + destination: string; + resolvedConfig: ResolvedConfig; + workerDir?: string; + environment: string; + target: BuildTarget; + envVars?: Record; +}): Promise { + const buildManifest: BuildManifest = { + contentHash: bundle.contentHash, + runtime: resolvedConfig.runtime ?? DEFAULT_RUNTIME, + environment: environment, + packageVersion: CORE_VERSION, + cliPackageVersion: VERSION, + target: target, + files: bundle.files, + sources: await resolveFileSources(bundle.files, resolvedConfig), + externals: [], + config: { + project: resolvedConfig.project, + dirs: resolvedConfig.dirs, + }, + outputPath: destination, + indexControllerEntryPoint: + bundle.indexControllerEntryPoint ?? target === "deploy" ? deployIndexController : undefined, + indexWorkerEntryPoint: + bundle.indexWorkerEntryPoint ?? target === "deploy" ? deployIndexWorker : devIndexWorker, + runControllerEntryPoint: + bundle.runControllerEntryPoint ?? target === "deploy" ? deployRunController : undefined, + runWorkerEntryPoint: + bundle.runWorkerEntryPoint ?? target === "deploy" ? deployRunWorker : devRunWorker, + loaderEntryPoint: bundle.loaderEntryPoint, + configPath: bundle.configPath, + customConditions: resolvedConfig.build.conditions ?? [], + deploy: { + env: envVars ?? {}, + }, + build: {}, + otelImportHook: { + include: resolvedConfig.instrumentedPackageNames ?? [], + }, + }; + + if (!workerDir) { + return buildManifest; + } + + return copyManifestToDir(buildManifest, destination, workerDir); +} diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index fa626f29ff..646f1d3cc2 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -41,51 +41,50 @@ export interface BuildImageOptions { deploymentSpinner?: any; // Replace 'any' with the actual type if known } -export async function buildImage(options: BuildImageOptions) { - const { - selfHosted, - buildPlatform, - noCache, - push, - registry, - loadImage, - registryHost, - authAccessToken, - imageTag, - deploymentId, - deploymentVersion, - contentHash, - externalBuildId, - externalBuildToken, - externalBuildProjectId, - compilationPath, - projectId, - projectRef, - extraCACerts, - apiUrl, - apiKey, - buildEnvVars, - } = options; - +export async function buildImage({ + selfHosted, + buildPlatform, + noCache, + push, + registry, + loadImage, + registryHost, + authAccessToken, + imageTag, + deploymentId, + deploymentVersion, + contentHash, + externalBuildId, + externalBuildToken, + externalBuildProjectId, + compilationPath, + projectId, + projectRef, + extraCACerts, + apiUrl, + apiKey, + buildEnvVars, + network, +}: BuildImageOptions) { if (selfHosted) { return selfHostedBuildImage({ - registryHost: registryHost, - imageTag: imageTag, + registryHost, + imageTag, cwd: compilationPath, - projectId: projectId, - deploymentId: deploymentId, - deploymentVersion: deploymentVersion, - contentHash: contentHash, - projectRef: projectRef, + projectId, + deploymentId, + deploymentVersion, + contentHash, + projectRef, buildPlatform: buildPlatform, pushImage: push, selfHostedRegistry: !!registry, - noCache: noCache, - extraCACerts: extraCACerts, + noCache, + extraCACerts, apiUrl, apiKey, buildEnvVars, - network: options.network, + network, }); } diff --git a/packages/cli-v3/src/dev/devSession.ts b/packages/cli-v3/src/dev/devSession.ts index 2de079ffbc..6d9e1529fe 100644 --- a/packages/cli-v3/src/dev/devSession.ts +++ b/packages/cli-v3/src/dev/devSession.ts @@ -1,11 +1,10 @@ -import { CORE_VERSION } from "@trigger.dev/core/v3"; -import { DEFAULT_RUNTIME, ResolvedConfig } from "@trigger.dev/core/v3/build"; -import { BuildManifest } from "@trigger.dev/core/v3/schemas"; +import { ResolvedConfig } from "@trigger.dev/core/v3/build"; import * as esbuild from "esbuild"; import { CliApiClient } from "../apiClient.js"; import { BundleResult, bundleWorker, + createBuildManifestFromBundle, getBundleResultFromBuild, logBuildFailure, logBuildWarnings, @@ -17,14 +16,10 @@ import { resolvePluginsForContext, } from "../build/extensions.js"; import { createExternalsBuildExtension, resolveAlwaysExternal } from "../build/externals.js"; -import { copyManifestToDir } from "../build/manifests.js"; -import { devIndexWorker, devRunWorker, telemetryEntryPoint } from "../build/packageModules.js"; import { type DevCommandOptions } from "../commands/dev.js"; import { eventBus } from "../utilities/eventBus.js"; import { logger } from "../utilities/logger.js"; -import { resolveFileSources } from "../utilities/sourceFiles.js"; import { EphemeralDirectory, getTmpDir } from "../utilities/tempDirectories.js"; -import { VERSION } from "../version.js"; import { startDevOutput } from "./devOutput.js"; import { startWorkerRuntime } from "./workerRuntime.js"; @@ -82,12 +77,14 @@ export async function startDevSession({ const pluginsFromExtensions = resolvePluginsForContext(buildContext); async function updateBundle(bundle: BundleResult, workerDir?: EphemeralDirectory) { - let buildManifest = await createBuildManifestFromBundle( + let buildManifest = await createBuildManifestFromBundle({ bundle, - destination.path, - rawConfig, - workerDir?.path - ); + destination: destination.path, + resolvedConfig: rawConfig, + workerDir: workerDir?.path, + environment: "dev", + target: "dev", + }); logger.debug("Created build manifest from bundle", { buildManifest }); @@ -182,45 +179,3 @@ export async function startDevSession({ }, }; } - -async function createBuildManifestFromBundle( - bundle: BundleResult, - destination: string, - resolvedConfig: ResolvedConfig, - workerDir: string | undefined -): Promise { - const buildManifest: BuildManifest = { - contentHash: bundle.contentHash, - runtime: resolvedConfig.runtime ?? DEFAULT_RUNTIME, - cliPackageVersion: VERSION, - packageVersion: CORE_VERSION, - environment: "dev", - target: "dev", - files: bundle.files, - sources: await resolveFileSources(bundle.files, resolvedConfig), - externals: [], - config: { - project: resolvedConfig.project, - dirs: resolvedConfig.dirs, - }, - outputPath: destination, - runWorkerEntryPoint: bundle.runWorkerEntryPoint ?? devRunWorker, - indexWorkerEntryPoint: bundle.indexWorkerEntryPoint ?? devIndexWorker, - loaderEntryPoint: bundle.loaderEntryPoint, - configPath: bundle.configPath, - customConditions: resolvedConfig.build.conditions ?? [], - deploy: { - env: {}, - }, - build: {}, - otelImportHook: { - include: resolvedConfig.instrumentedPackageNames ?? [], - }, - }; - - if (!workerDir) { - return buildManifest; - } - - return copyManifestToDir(buildManifest, destination, workerDir); -} diff --git a/packages/core/src/v3/build/runtime.ts b/packages/core/src/v3/build/runtime.ts index d473f0f7f8..3fc7126e21 100644 --- a/packages/core/src/v3/build/runtime.ts +++ b/packages/core/src/v3/build/runtime.ts @@ -2,7 +2,7 @@ import { join } from "node:path"; import { pathToFileURL } from "url"; import { BuildRuntime } from "../schemas/build.js"; -export const DEFAULT_RUNTIME: BuildRuntime = "node"; +export const DEFAULT_RUNTIME = "node" satisfies BuildRuntime; export function binaryForRuntime(runtime: BuildRuntime): string { switch (runtime) { From f7d2c4c0258c7ebccc9f25b56a46860418f8de8a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 7 Nov 2024 11:35:43 +0000 Subject: [PATCH 123/485] add missing node-22 case --- packages/core/src/v3/build/runtime.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/core/src/v3/build/runtime.ts b/packages/core/src/v3/build/runtime.ts index 3fc7126e21..2504d1e3a0 100644 --- a/packages/core/src/v3/build/runtime.ts +++ b/packages/core/src/v3/build/runtime.ts @@ -41,7 +41,8 @@ export type ExecOptions = { export function execOptionsForRuntime(runtime: BuildRuntime, options: ExecOptions): string { switch (runtime) { - case "node": { + case "node": + case "node-22": { const importEntryPoint = options.loaderEntryPoint ? `--import=${pathToFileURL(options.loaderEntryPoint).href}` : undefined; From 9c93498efdcd26a613314aa89ea9467aac97b1b5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 11:55:35 +0000 Subject: [PATCH 124/485] Rescheduling test --- .../run-engine/src/engine/index.test.ts | 95 +++++++++++++++- .../run-engine/src/engine/index.ts | 105 ++++++++++++++++++ 2 files changed, 199 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 15f1cd41e4..b9d1a0ae68 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -1143,7 +1143,100 @@ describe("RunEngine", () => { } }); - //todo extending the delay of a run + containerTest( + "Rescheduling a delayed run", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + delayUntil: new Date(Date.now() + 200), + }, + prisma + ); + + //should be created but not queued yet + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); + + await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_000) }); + + //wait for 1.5 seconds + await setTimeout(500); + + //should still be created + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); + + //wait for 1.5 seconds + await setTimeout(1_500); + + //should now be queued + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); + } + } + ); //todo expiring a run containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 303095a336..fcc91395ac 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -6,6 +6,7 @@ import { MachinePresetName, parsePacket, QueueOptions, + RescheduleRunRequestBody, sanitizeError, shouldRetryError, TaskRunError, @@ -1338,6 +1339,69 @@ export class RunEngine { }); } + /** + * Reschedules a delayed run where the run hasn't been queued yet + */ + async rescheduleRun({ + runId, + delayUntil, + tx, + }: { + runId: string; + delayUntil: Date; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + return this.#trace("rescheduleRun", { runId }, async (span) => { + await this.runLock.lock([runId], 5_000, async (signal) => { + const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + + //if the run isn't just created then we can't reschedule it + if (snapshot.executionStatus !== "RUN_CREATED") { + throw new ServiceValidationError("Cannot reschedule a run that is not delayed"); + } + + const updatedRun = await prisma.taskRun.update({ + where: { + id: runId, + }, + data: { + delayUntil: delayUntil, + executionSnapshots: { + create: { + engine: "V2", + executionStatus: "RUN_CREATED", + description: "Delayed run was rescheduled to a future date", + runStatus: "EXPIRED", + }, + }, + }, + include: { + blockedByWaitpoints: true, + }, + }); + + if (updatedRun.blockedByWaitpoints.length === 0) { + throw new ServiceValidationError( + "Cannot reschedule a run that is not blocked by a waitpoint" + ); + } + + const result = await this.#rescheduleDateTimeWaitpoint( + prisma, + updatedRun.blockedByWaitpoints[0].waitpointId, + delayUntil + ); + + if (!result.success) { + throw new ServiceValidationError("Failed to reschedule waitpoint, too late.", 400); + } + + return updatedRun; + }); + }); + } + /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ async completeWaitpoint({ @@ -1978,6 +2042,47 @@ export class RunEngine { return waitpoint; } + async #rescheduleDateTimeWaitpoint( + tx: PrismaClientOrTransaction, + waitpointId: string, + completedAfter: Date + ): Promise<{ success: true } | { success: false; error: string }> { + try { + const updatedWaitpoint = await tx.waitpoint.update({ + where: { id: waitpointId, status: "PENDING" }, + data: { + completedAfter, + }, + }); + } catch (error) { + if (error instanceof Prisma.PrismaClientKnownRequestError && error.code === "P2025") { + return { + success: false, + error: "Waitpoint doesn't exist or is already completed", + }; + } + + this.logger.error("Error rescheduling waitpoint", { error }); + + return { + success: false, + error: "An unknown error occurred", + }; + } + + //reschedule completion + await this.worker.enqueue({ + id: `waitpointCompleteDateTime.${waitpointId}`, + job: "waitpointCompleteDateTime", + payload: { waitpointId: waitpointId }, + availableAt: completedAfter, + }); + + return { + success: true, + }; + } + async #blockRunWithWaitpoint( tx: PrismaClientOrTransaction, { orgId, runId, waitpoint }: { orgId: string; runId: string; waitpoint: Waitpoint } From a67a8743bf9d37cdeaa69fd8207f753cebe22c5e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 11:58:41 +0000 Subject: [PATCH 125/485] Made the rescheduling test a bit more robust/harder to fluke --- internal-packages/run-engine/src/engine/index.test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index b9d1a0ae68..978b8d09a7 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -1215,18 +1215,18 @@ describe("RunEngine", () => { assertNonNullable(executionData); expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); - await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_000) }); + await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_500) }); - //wait for 1.5 seconds - await setTimeout(500); + //wait so the initial delay passes + await setTimeout(1_000); //should still be created const executionData2 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); - //wait for 1.5 seconds - await setTimeout(1_500); + //wait so the updated delay passes + await setTimeout(1_750); //should now be queued const executionData3 = await engine.getRunExecutionData({ runId: run.id }); From 915e359be98b2355b68245b466ea32b6459a0f07 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 7 Nov 2024 13:06:33 +0000 Subject: [PATCH 126/485] fix cli version import --- packages/cli-v3/src/build/bundle.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli-v3/src/build/bundle.ts b/packages/cli-v3/src/build/bundle.ts index 280c090dae..2de7941969 100644 --- a/packages/cli-v3/src/build/bundle.ts +++ b/packages/cli-v3/src/build/bundle.ts @@ -23,10 +23,10 @@ import { telemetryEntryPoint, } from "./packageModules.js"; import { buildPlugins } from "./plugins.js"; -import { VERSION } from "../../../core/src/version.js"; import { CORE_VERSION } from "@trigger.dev/core/v3"; import { resolveFileSources } from "../utilities/sourceFiles.js"; import { copyManifestToDir } from "./manifests.js"; +import { VERSION } from "../version.js"; export interface BundleOptions { target: BuildTarget; From df14586a02d449fa5fc7fc50b6c062fb901397af Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 16:02:45 +0000 Subject: [PATCH 127/485] Use the eventBus to send worker notifications out of the run engine --- .../run-engine/src/engine/eventBus.ts | 8 ++++++++ .../run-engine/src/engine/index.test.ts | 14 ++++++++++++- .../run-engine/src/engine/index.ts | 20 ++++++++++++------- 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 3d541c924c..9f6e2a70cb 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -59,6 +59,14 @@ export type EventBusEvents = { }; }, ]; + workerNotification: [ + { + time: Date; + run: { + id: string; + }; + }, + ]; executionSnapshotCreated: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts index 978b8d09a7..2367bef60f 100644 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ b/internal-packages/run-engine/src/engine/index.test.ts @@ -865,6 +865,11 @@ describe("RunEngine", () => { snapshotId: dequeuedChild[0].snapshot.id, }); + let workerNotifications: EventBusEventArgs<"workerNotification">[0][] = []; + engine.eventBus.on("workerNotification", (result) => { + workerNotifications.push(result); + }); + //cancel the parent run const result = await engine.cancelRun({ runId: parentRun.id, @@ -873,6 +878,10 @@ describe("RunEngine", () => { }); expect(result).toBe("PENDING_CANCEL"); + //check a worker notification was sent for the running parent + expect(workerNotifications).toHaveLength(1); + expect(workerNotifications[0].run.id).toBe(parentRun.id); + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); expect(executionData?.snapshot.executionStatus).toBe("PENDING_CANCEL"); expect(executionData?.run.status).toBe("CANCELED"); @@ -910,6 +919,10 @@ describe("RunEngine", () => { //cancelling children is async, so we need to wait a brief moment await setTimeout(200); + //check a worker notification was sent for the running parent + expect(workerNotifications).toHaveLength(2); + expect(workerNotifications[1].run.id).toBe(childRun.id); + //child should now be pending cancel const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); expect(childExecutionDataAfter?.snapshot.executionStatus).toBe("PENDING_CANCEL"); @@ -1238,7 +1251,6 @@ describe("RunEngine", () => { } ); - //todo expiring a run containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index fcc91395ac..84136b4f30 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1248,7 +1248,7 @@ export class RunEngine { //is pending cancellation and we're not finalizing, alert the worker again if (latestSnapshot.executionStatus === "PENDING_CANCEL" && !finalizeRun) { - await this.#sendRunChangedNotificationToWorker({ runId }); + await this.#sendNotificationToWorker({ runId }); return "PENDING_CANCEL" as const; } @@ -1289,7 +1289,8 @@ export class RunEngine { }, }); - await this.#sendRunChangedNotificationToWorker({ runId }); + //the worker needs to be notified so it can kill the run and complete the attempt + await this.#sendNotificationToWorker({ runId }); return "PENDING_CANCEL" as const; } @@ -1772,8 +1773,8 @@ export class RunEngine { if ( retriableError && completion.retry !== undefined && - (latestSnapshot.attemptNumber === null || - latestSnapshot.attemptNumber < MAX_TASK_RUN_ATTEMPTS) + latestSnapshot.attemptNumber !== null && + latestSnapshot.attemptNumber < MAX_TASK_RUN_ATTEMPTS ) { const retryAt = new Date(completion.retry.timestamp); @@ -1836,7 +1837,8 @@ export class RunEngine { description: "Attempt failed wth a short delay, starting a new attempt.", }, }); - await this.#sendRunChangedNotificationToWorker({ runId }); + //the worker can fetch the latest snapshot and should create a new attempt + await this.#sendNotificationToWorker({ runId }); } return "RETRY_IMMEDIATELY" as const; @@ -2384,8 +2386,12 @@ export class RunEngine { //#endregion - async #sendRunChangedNotificationToWorker({ runId }: { runId: string }) { - //todo: implement + /** + * Sends a notification that a run has changed and we need to fetch the latest run state. + * The worker will call `getRunExecutionData` via the API and act accordingly. + */ + async #sendNotificationToWorker({ runId }: { runId: string }) { + this.eventBus.emit("workerNotification", { time: new Date(), run: { id: runId } }); } async #getAuthenticatedEnvironmentFromRun(runId: string, tx?: PrismaClientOrTransaction) { From 7f2ef4295e0a3fb4f8ea9b8cae6ace449d268179 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 16:41:57 +0000 Subject: [PATCH 128/485] Update the snapshot with the heartbeat time when we receive one, make the heartbeat timeouts configurable via engine options --- .../run-engine/src/engine/index.ts | 54 ++++++++++++++----- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 84136b4f30..9bc46646bd 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -64,9 +64,17 @@ type Options = { }; /** If not set then checkpoints won't ever be used */ retryWarmStartThresholdMs?: number; + heartbeatTimeouts?: Partial; tracer: Tracer; }; +type HeartbeatTimeouts = { + PENDING_EXECUTING: number; + PENDING_CANCEL: number; + EXECUTING: number; + EXECUTING_WITH_WAITPOINTS: number; +}; + type MachineResources = { cpu: number; memory: number; @@ -149,6 +157,7 @@ export class RunEngine { private worker: EngineWorker; private logger = new Logger("RunEngine", "debug"); private tracer: Tracer; + private heartbeatTimeouts: HeartbeatTimeouts; eventBus = new EventEmitter(); constructor(private readonly options: Options) { @@ -196,6 +205,17 @@ export class RunEngine { }); this.tracer = options.tracer; + + const defaultHeartbeatTimeouts: HeartbeatTimeouts = { + PENDING_EXECUTING: 60, + PENDING_CANCEL: 60, + EXECUTING: 60, + EXECUTING_WITH_WAITPOINTS: 60, + }; + this.heartbeatTimeouts = { + ...defaultHeartbeatTimeouts, + ...(options.heartbeatTimeouts ?? {}), + }; } //MARK: - Run functions @@ -1549,7 +1569,10 @@ export class RunEngine { snapshotId: string; tx?: PrismaClientOrTransaction; }) { - const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); + const prisma = tx ?? this.prisma; + + //we don't need to acquire a run lock for any of this, it's not critical if it happens on an older version + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { this.logger.log("heartbeatRun no longer the latest snapshot, stopping the heartbeat.", { runId, @@ -1561,7 +1584,15 @@ export class RunEngine { return; } - //it's the same as creating a new heartbeat + //update the snapshot heartbeat time + await prisma.taskRunExecutionSnapshot.update({ + where: { id: latestSnapshot.id }, + data: { + lastHeartbeatAt: new Date(), + }, + }); + + //extending is the same as creating a new heartbeat await this.#setHeartbeatDeadline({ runId, snapshotId, status: latestSnapshot.executionStatus }); } @@ -2206,23 +2237,20 @@ export class RunEngine { #getHeartbeatInterval(status: TaskRunExecutionStatus): number | null { switch (status) { - case "RUN_CREATED": - case "FINISHED": - case "BLOCKED_BY_WAITPOINTS": - case "QUEUED": { - //we don't need to heartbeat these statuses - return null; + case "PENDING_EXECUTING": { + return this.heartbeatTimeouts.PENDING_EXECUTING; } - case "PENDING_EXECUTING": case "PENDING_CANCEL": { - return 60; + return this.heartbeatTimeouts.PENDING_CANCEL; + } + case "EXECUTING": { + return this.heartbeatTimeouts.EXECUTING; } - case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { - return 60 * 15; + return this.heartbeatTimeouts.EXECUTING_WITH_WAITPOINTS; } default: { - assertNever(status); + return null; } } } From 66256fcc312b859b41464a9c79c144867672de62 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 17:23:19 +0000 Subject: [PATCH 129/485] Split the run engine tests into separate files, it was getting too crazy --- internal-packages/run-engine/package.json | 2 +- .../run-engine/src/engine/index.test.ts | 1349 ----------------- .../src/engine/tests/batchTrigger.test.ts | 16 + .../engine/tests/batchTriggerAndWait.test.ts | 16 + .../src/engine/tests/cancelling.test.ts | 332 ++++ .../src/engine/tests/checkpoints.test.ts | 17 + .../src/engine/tests/delays.test.ts | 188 +++ .../src/engine/tests/heartbeats.test.ts | 18 + .../src/engine/tests/trigger.test.ts | 465 ++++++ .../src/engine/tests/triggerAndWait.test.ts | 193 +++ .../run-engine/src/engine/tests/ttl.test.ts | 109 ++ .../src/engine/tests/waitForDuration.test.ts | 107 ++ internal-packages/run-engine/vitest.config.ts | 6 + internal-packages/testcontainers/src/index.ts | 1 + internal-packages/testcontainers/src/utils.ts | 6 + 15 files changed, 1475 insertions(+), 1350 deletions(-) delete mode 100644 internal-packages/run-engine/src/engine/index.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/cancelling.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/checkpoints.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/delays.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/heartbeats.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/trigger.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/ttl.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index 767a4469c9..d0a220bf2a 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -23,6 +23,6 @@ }, "scripts": { "typecheck": "tsc --noEmit", - "test": "vitest" + "test": "vitest --sequence.concurrent=false" } } diff --git a/internal-packages/run-engine/src/engine/index.test.ts b/internal-packages/run-engine/src/engine/index.test.ts deleted file mode 100644 index 2367bef60f..0000000000 --- a/internal-packages/run-engine/src/engine/index.test.ts +++ /dev/null @@ -1,1349 +0,0 @@ -import { - containerTest, - setupAuthenticatedEnvironment, - setupBackgroundWorker, -} from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; -import { expect } from "vitest"; -import { RunEngine } from "./index.js"; -import { setTimeout } from "timers/promises"; -import { EventBusEventArgs } from "./eventBus.js"; - -function assertNonNullable(value: T): asserts value is NonNullable { - expect(value).toBeDefined(); - expect(value).not.toBeNull(); -} - -describe("RunEngine", () => { - containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - expect(run).toBeDefined(); - expect(run.friendlyId).toBe("run_1234"); - - //check it's actually in the db - const runFromDb = await prisma.taskRun.findUnique({ - where: { - friendlyId: "run_1234", - }, - }); - expect(runFromDb).toBeDefined(); - expect(runFromDb?.id).toBe(run.id); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("QUEUED"); - - //check the waitpoint is created - const runWaitpoint = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpoint.length).toBe(1); - expect(runWaitpoint[0].type).toBe("RUN"); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); - expect(queueLength).toBe(1); - - //concurrency before - const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyBefore).toBe(0); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - expect(dequeued.length).toBe(1); - expect(dequeued[0].run.id).toBe(run.id); - expect(dequeued[0].run.attemptNumber).toBe(1); - - const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyAfter).toBe(1); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData2.run.attemptNumber).toBe(1); - expect(executionData2.run.status).toBe("EXECUTING"); - - let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; - engine.eventBus.on("runSucceeded", (result) => { - event = result; - }); - - //complete the run - const result = await engine.completeRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: attemptResult.snapshot.id, - completion: { - ok: true, - id: dequeued[0].run.id, - output: `{"foo":"bar"}`, - outputType: "application/json", - }, - }); - expect(result).toBe("COMPLETED"); - - //state should be completed - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); - - //event - assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; - expect(completedEvent.run.spanId).toBe(run.spanId); - expect(completedEvent.run.output).toBe('{"foo":"bar"}'); - expect(completedEvent.run.outputType).toBe("application/json"); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - - //waitpoint should have been completed, with the output - const runWaitpointAfter = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpointAfter.length).toBe(1); - expect(runWaitpointAfter[0].type).toBe("RUN"); - expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); - } finally { - engine.quit(); - } - }); - - containerTest("Single run (failed)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - - //fail the attempt - const error = { - type: "BUILT_IN_ERROR" as const, - name: "UserError", - message: "This is a user error", - stackTrace: "Error: This is a user error\n at :1:1", - }; - const result = await engine.completeRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: attemptResult.snapshot.id, - completion: { - ok: false, - id: dequeued[0].run.id, - error, - }, - }); - expect(result).toBe("COMPLETED"); - - //state should be completed - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - - //waitpoint should have been completed, with the output - const runWaitpointAfter = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpointAfter.length).toBe(1); - expect(runWaitpointAfter[0].type).toBe("RUN"); - const output = JSON.parse(runWaitpointAfter[0].output as string); - expect(output.type).toBe(error.type); - expect(runWaitpointAfter[0].outputIsError).toBe(true); - } finally { - engine.quit(); - } - }); - - containerTest( - "Single run (retry attempt, then succeed)", - { timeout: 15_000 }, - async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - - //fail the attempt - const error = { - type: "BUILT_IN_ERROR" as const, - name: "UserError", - message: "This is a user error", - stackTrace: "Error: This is a user error\n at :1:1", - }; - const result = await engine.completeRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: attemptResult.snapshot.id, - completion: { - ok: false, - id: dequeued[0].run.id, - error, - retry: { - timestamp: Date.now(), - delay: 0, - }, - }, - }); - expect(result).toBe("RETRY_IMMEDIATELY"); - - //state should be completed - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - //only when the new attempt is created, should the attempt be increased - expect(executionData3.run.attemptNumber).toBe(1); - expect(executionData3.run.status).toBe("RETRYING_AFTER_FAILURE"); - - //create a second attempt - const attemptResult2 = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: executionData3.snapshot.id, - }); - expect(attemptResult2.run.attemptNumber).toBe(2); - - //now complete it successfully - const result2 = await engine.completeRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: attemptResult2.snapshot.id, - completion: { - ok: true, - id: dequeued[0].run.id, - output: `{"foo":"bar"}`, - outputType: "application/json", - }, - }); - - //waitpoint should have been completed, with the output - const runWaitpointAfter = await prisma.waitpoint.findMany({ - where: { - completedByTaskRunId: run.id, - }, - }); - expect(runWaitpointAfter.length).toBe(1); - expect(runWaitpointAfter[0].type).toBe("RUN"); - expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); - expect(runWaitpointAfter[0].outputIsError).toBe(false); - - //state should be completed - const executionData4 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData4); - expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData4.run.attemptNumber).toBe(2); - expect(executionData4.run.status).toBe("COMPLETED_SUCCESSFULLY"); - } finally { - engine.quit(); - } - } - ); - - containerTest("triggerAndWait", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const parentTask = "parent-task"; - const childTask = "child-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); - - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier: parentTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${parentTask}`, - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue parent - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: parentRun.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(initialExecutionData); - const attemptResult = await engine.startRunAttempt({ - runId: parentRun.id, - snapshotId: initialExecutionData.snapshot.id, - }); - - const childRun = await engine.trigger( - { - number: 1, - friendlyId: "run_c1234", - environment: authenticatedEnvironment, - taskIdentifier: childTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${childTask}`, - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, - }, - prisma - ); - - const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); - assertNonNullable(childExecutionData); - expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); - - const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentExecutionData); - expect(parentExecutionData.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - //check the waitpoint blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - assertNonNullable(runWaitpoint); - expect(runWaitpoint.waitpoint.type).toBe("RUN"); - expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); - - //dequeue the child run - const dequeuedChild = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: childRun.masterQueue, - maxRunCount: 10, - }); - - //start the child run - const childAttempt = await engine.startRunAttempt({ - runId: childRun.id, - snapshotId: dequeuedChild[0].snapshot.id, - }); - - // complete the child run - await engine.completeRunAttempt({ - runId: childRun.id, - snapshotId: childAttempt.snapshot.id, - completion: { - id: childRun.id, - ok: true, - output: '{"foo":"bar"}', - outputType: "application/json", - }, - }); - - //child snapshot - const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); - assertNonNullable(childExecutionDataAfter); - expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); - - const waitpointAfter = await prisma.waitpoint.findFirst({ - where: { - id: runWaitpoint.waitpointId, - }, - }); - expect(waitpointAfter?.completedAt).not.toBeNull(); - expect(waitpointAfter?.status).toBe("COMPLETED"); - expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); - - const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointAfter).toBeNull(); - - //parent snapshot - const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentExecutionDataAfter); - expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); - expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); - expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); - expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( - childRun.id - ); - expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); - } finally { - engine.quit(); - } - }); - - containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - //waitForDuration - const date = new Date(Date.now() + 1000); - const result = await engine.waitForDuration({ - runId: run.id, - snapshotId: attemptResult.snapshot.id, - date, - releaseConcurrency: false, - }); - - expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - await setTimeout(1_500); - - const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); - expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); - } finally { - engine.quit(); - } - }); - - //todo batchTriggerAndWait - - //todo checkpoints - - //todo heartbeats - - containerTest( - "Cancelling a run with children (that is executing)", - { timeout: 15_000 }, - async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const parentTask = "parent-task"; - const childTask = "child-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); - - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier: parentTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${parentTask}`, - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: parentRun.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - //start child run - const childRun = await engine.trigger( - { - number: 1, - friendlyId: "run_c1234", - environment: authenticatedEnvironment, - taskIdentifier: childTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${childTask}`, - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, - }, - prisma - ); - - //dequeue the child run - const dequeuedChild = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: childRun.masterQueue, - maxRunCount: 10, - }); - - //start the child run - const childAttempt = await engine.startRunAttempt({ - runId: childRun.id, - snapshotId: dequeuedChild[0].snapshot.id, - }); - - let workerNotifications: EventBusEventArgs<"workerNotification">[0][] = []; - engine.eventBus.on("workerNotification", (result) => { - workerNotifications.push(result); - }); - - //cancel the parent run - const result = await engine.cancelRun({ - runId: parentRun.id, - completedAt: new Date(), - reason: "Cancelled by the user", - }); - expect(result).toBe("PENDING_CANCEL"); - - //check a worker notification was sent for the running parent - expect(workerNotifications).toHaveLength(1); - expect(workerNotifications[0].run.id).toBe(parentRun.id); - - const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); - expect(executionData?.snapshot.executionStatus).toBe("PENDING_CANCEL"); - expect(executionData?.run.status).toBe("CANCELED"); - - let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; - engine.eventBus.on("runCancelled", (result) => { - cancelledEventData.push(result); - }); - - //todo call completeAttempt (this will happen from the worker) - const completeResult = await engine.completeRunAttempt({ - runId: parentRun.id, - snapshotId: executionData!.snapshot.id, - completion: { - ok: false, - id: executionData!.run.id, - error: { - type: "INTERNAL_ERROR" as const, - code: "TASK_RUN_CANCELLED" as const, - }, - }, - }); - - //parent should now be fully cancelled - const executionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); - expect(executionDataAfter?.snapshot.executionStatus).toBe("FINISHED"); - expect(executionDataAfter?.run.status).toBe("CANCELED"); - - //check emitted event - expect(cancelledEventData.length).toBe(1); - const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); - assertNonNullable(parentEvent); - expect(parentEvent.run.spanId).toBe(parentRun.spanId); - - //cancelling children is async, so we need to wait a brief moment - await setTimeout(200); - - //check a worker notification was sent for the running parent - expect(workerNotifications).toHaveLength(2); - expect(workerNotifications[1].run.id).toBe(childRun.id); - - //child should now be pending cancel - const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); - expect(childExecutionDataAfter?.snapshot.executionStatus).toBe("PENDING_CANCEL"); - expect(childExecutionDataAfter?.run.status).toBe("CANCELED"); - - //cancel the child (this will come from the worker) - const completeChildResult = await engine.completeRunAttempt({ - runId: childRun.id, - snapshotId: childExecutionDataAfter!.snapshot.id, - completion: { - ok: false, - id: childRun.id, - error: { - type: "INTERNAL_ERROR" as const, - code: "TASK_RUN_CANCELLED" as const, - }, - }, - }); - - //child should now be pending cancel - const childExecutionDataCancelled = await engine.getRunExecutionData({ - runId: childRun.id, - }); - expect(childExecutionDataCancelled?.snapshot.executionStatus).toBe("FINISHED"); - expect(childExecutionDataCancelled?.run.status).toBe("CANCELED"); - - //check emitted event - expect(cancelledEventData.length).toBe(2); - const childEvent = cancelledEventData.find((r) => r.run.id === childRun.id); - assertNonNullable(childEvent); - expect(childEvent.run.spanId).toBe(childRun.spanId); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - } finally { - engine.quit(); - } - } - ); - - containerTest( - "Cancelling a run (not executing)", - { timeout: 15_000 }, - async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const parentTask = "parent-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask]); - - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier: parentTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${parentTask}`, - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: parentRun.masterQueue, - maxRunCount: 10, - }); - - let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; - engine.eventBus.on("runCancelled", (result) => { - cancelledEventData.push(result); - }); - - //cancel the parent run - const result = await engine.cancelRun({ - runId: parentRun.id, - completedAt: new Date(), - reason: "Cancelled by the user", - }); - expect(result).toBe("FINISHED"); - - const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); - expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData?.run.status).toBe("CANCELED"); - - //check emitted event - expect(cancelledEventData.length).toBe(1); - const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); - assertNonNullable(parentEvent); - expect(parentEvent.run.spanId).toBe(parentRun.spanId); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - } finally { - engine.quit(); - } - } - ); - - //todo bulk cancelling runs - - //todo crashed run - - //todo system failure run - - containerTest("Run start delayed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - delayUntil: new Date(Date.now() + 500), - }, - prisma - ); - - //should be created but not queued yet - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); - - //wait for 1 seconds - await setTimeout(1_000); - - //should now be queued - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - } finally { - engine.quit(); - } - }); - - containerTest( - "Rescheduling a delayed run", - { timeout: 15_000 }, - async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - delayUntil: new Date(Date.now() + 200), - }, - prisma - ); - - //should be created but not queued yet - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); - - await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_500) }); - - //wait so the initial delay passes - await setTimeout(1_000); - - //should still be created - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); - - //wait so the updated delay passes - await setTimeout(1_750); - - //should now be queued - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); - } finally { - engine.quit(); - } - } - ); - - containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - ttl: "1s", - }, - prisma - ); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("QUEUED"); - - let expiredEventData: EventBusEventArgs<"runExpired">[0] | undefined = undefined; - engine.eventBus.on("runExpired", (result) => { - expiredEventData = result; - }); - - //wait for 1 seconds - await setTimeout(1_000); - - assertNonNullable(expiredEventData); - const assertedExpiredEventData = expiredEventData as EventBusEventArgs<"runExpired">[0]; - expect(assertedExpiredEventData.run.spanId).toBe(run.spanId); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData2.run.attemptNumber).toBe(undefined); - expect(executionData2.run.status).toBe("EXPIRED"); - - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - } finally { - engine.quit(); - } - }); -}); diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts new file mode 100644 index 0000000000..731c4964e6 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -0,0 +1,16 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine batchTrigger", () => { + //todo batchTrigger tests + test("empty test", async () => {}); +}); diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts new file mode 100644 index 0000000000..c7fc43a89e --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -0,0 +1,16 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine batchTriggerAndWait", () => { + //todo batchTriggerAndWait tests + test("empty test", async () => {}); +}); diff --git a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts new file mode 100644 index 0000000000..cda76200cc --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts @@ -0,0 +1,332 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine cancelling", () => { + containerTest( + "Cancelling a run with children (that is executing)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //start child run + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); + + //dequeue the child run + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: childRun.masterQueue, + maxRunCount: 10, + }); + + //start the child run + const childAttempt = await engine.startRunAttempt({ + runId: childRun.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + + let workerNotifications: EventBusEventArgs<"workerNotification">[0][] = []; + engine.eventBus.on("workerNotification", (result) => { + workerNotifications.push(result); + }); + + //cancel the parent run + const result = await engine.cancelRun({ + runId: parentRun.id, + completedAt: new Date(), + reason: "Cancelled by the user", + }); + expect(result).toBe("PENDING_CANCEL"); + + //check a worker notification was sent for the running parent + expect(workerNotifications).toHaveLength(1); + expect(workerNotifications[0].run.id).toBe(parentRun.id); + + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); + expect(executionData?.snapshot.executionStatus).toBe("PENDING_CANCEL"); + expect(executionData?.run.status).toBe("CANCELED"); + + let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; + engine.eventBus.on("runCancelled", (result) => { + cancelledEventData.push(result); + }); + + //todo call completeAttempt (this will happen from the worker) + const completeResult = await engine.completeRunAttempt({ + runId: parentRun.id, + snapshotId: executionData!.snapshot.id, + completion: { + ok: false, + id: executionData!.run.id, + error: { + type: "INTERNAL_ERROR" as const, + code: "TASK_RUN_CANCELLED" as const, + }, + }, + }); + + //parent should now be fully cancelled + const executionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); + expect(executionDataAfter?.snapshot.executionStatus).toBe("FINISHED"); + expect(executionDataAfter?.run.status).toBe("CANCELED"); + + //check emitted event + expect(cancelledEventData.length).toBe(1); + const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); + assertNonNullable(parentEvent); + expect(parentEvent.run.spanId).toBe(parentRun.spanId); + + //cancelling children is async, so we need to wait a brief moment + await setTimeout(200); + + //check a worker notification was sent for the running parent + expect(workerNotifications).toHaveLength(2); + expect(workerNotifications[1].run.id).toBe(childRun.id); + + //child should now be pending cancel + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + expect(childExecutionDataAfter?.snapshot.executionStatus).toBe("PENDING_CANCEL"); + expect(childExecutionDataAfter?.run.status).toBe("CANCELED"); + + //cancel the child (this will come from the worker) + const completeChildResult = await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childExecutionDataAfter!.snapshot.id, + completion: { + ok: false, + id: childRun.id, + error: { + type: "INTERNAL_ERROR" as const, + code: "TASK_RUN_CANCELLED" as const, + }, + }, + }); + + //child should now be pending cancel + const childExecutionDataCancelled = await engine.getRunExecutionData({ + runId: childRun.id, + }); + expect(childExecutionDataCancelled?.snapshot.executionStatus).toBe("FINISHED"); + expect(childExecutionDataCancelled?.run.status).toBe("CANCELED"); + + //check emitted event + expect(cancelledEventData.length).toBe(2); + const childEvent = cancelledEventData.find((r) => r.run.id === childRun.id); + assertNonNullable(childEvent); + expect(childEvent.run.spanId).toBe(childRun.spanId); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); + } + } + ); + + containerTest( + "Cancelling a run (not executing)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; + engine.eventBus.on("runCancelled", (result) => { + cancelledEventData.push(result); + }); + + //cancel the parent run + const result = await engine.cancelRun({ + runId: parentRun.id, + completedAt: new Date(), + reason: "Cancelled by the user", + }); + expect(result).toBe("FINISHED"); + + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); + expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData?.run.status).toBe("CANCELED"); + + //check emitted event + expect(cancelledEventData.length).toBe(1); + const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); + assertNonNullable(parentEvent); + expect(parentEvent.run.spanId).toBe(parentRun.spanId); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); + } + } + ); + + //todo bulk cancelling runs +}); diff --git a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts new file mode 100644 index 0000000000..a62747ca0c --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts @@ -0,0 +1,17 @@ +//todo checkpoint tests +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine checkpoints", () => { + //todo checkpoint tests + test("empty test", async () => {}); +}); diff --git a/internal-packages/run-engine/src/engine/tests/delays.test.ts b/internal-packages/run-engine/src/engine/tests/delays.test.ts new file mode 100644 index 0000000000..0f35b37385 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/delays.test.ts @@ -0,0 +1,188 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; + +describe("RunEngine delays", () => { + containerTest("Run start delayed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + delayUntil: new Date(Date.now() + 500), + }, + prisma + ); + + //should be created but not queued yet + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); + + //wait for 1 seconds + await setTimeout(1_000); + + //should now be queued + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); + } + }); + + containerTest( + "Rescheduling a delayed run", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + delayUntil: new Date(Date.now() + 200), + }, + prisma + ); + + //should be created but not queued yet + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); + + await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_500) }); + + //wait so the initial delay passes + await setTimeout(1_000); + + //should still be created + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); + + //wait so the updated delay passes + await setTimeout(1_750); + + //should now be queued + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); + } + } + ); +}); diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts new file mode 100644 index 0000000000..05c7832029 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -0,0 +1,18 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine", () => { + //todo heartbeat coming through, updating the snapshot and then succeeding + test("empty test", async () => {}); + + //todo heartbeat failing and the run eventually failing with a system failure +}); diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts new file mode 100644 index 0000000000..95d90a4a76 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -0,0 +1,465 @@ +import { + assertNonNullable, + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { EventBusEventArgs } from "../eventBus.js"; +import { RunEngine } from "../index.js"; + +describe("RunEngine trigger()", () => { + containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + expect(run).toBeDefined(); + expect(run.friendlyId).toBe("run_1234"); + + //check it's actually in the db + const runFromDb = await prisma.taskRun.findUnique({ + where: { + friendlyId: "run_1234", + }, + }); + expect(runFromDb).toBeDefined(); + expect(runFromDb?.id).toBe(run.id); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); + + //check the waitpoint is created + const runWaitpoint = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpoint.length).toBe(1); + expect(runWaitpoint[0].type).toBe("RUN"); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength).toBe(1); + + //concurrency before + const envConcurrencyBefore = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyBefore).toBe(0); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued.length).toBe(1); + expect(dequeued[0].run.id).toBe(run.id); + expect(dequeued[0].run.attemptNumber).toBe(1); + + const envConcurrencyAfter = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyAfter).toBe(1); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("EXECUTING"); + + let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + engine.eventBus.on("runSucceeded", (result) => { + event = result; + }); + + //complete the run + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: true, + id: dequeued[0].run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); + + //event + assertNonNullable(event); + const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; + expect(completedEvent.run.spanId).toBe(run.spanId); + expect(completedEvent.run.output).toBe('{"foo":"bar"}'); + expect(completedEvent.run.outputType).toBe("application/json"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); + } finally { + engine.quit(); + } + }); + + containerTest("Single run (failed)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + + //fail the attempt + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: false, + id: dequeued[0].run.id, + error, + }, + }); + expect(result).toBe("COMPLETED"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("COMPLETED_WITH_ERRORS"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + const output = JSON.parse(runWaitpointAfter[0].output as string); + expect(output.type).toBe(error.type); + expect(runWaitpointAfter[0].outputIsError).toBe(true); + } finally { + engine.quit(); + } + }); + + containerTest( + "Single run (retry attempt, then succeed)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + + //fail the attempt + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; + const result = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult.snapshot.id, + completion: { + ok: false, + id: dequeued[0].run.id, + error, + retry: { + timestamp: Date.now(), + delay: 0, + }, + }, + }); + expect(result).toBe("RETRY_IMMEDIATELY"); + + //state should be completed + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //only when the new attempt is created, should the attempt be increased + expect(executionData3.run.attemptNumber).toBe(1); + expect(executionData3.run.status).toBe("RETRYING_AFTER_FAILURE"); + + //create a second attempt + const attemptResult2 = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: executionData3.snapshot.id, + }); + expect(attemptResult2.run.attemptNumber).toBe(2); + + //now complete it successfully + const result2 = await engine.completeRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: attemptResult2.snapshot.id, + completion: { + ok: true, + id: dequeued[0].run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + + //waitpoint should have been completed, with the output + const runWaitpointAfter = await prisma.waitpoint.findMany({ + where: { + completedByTaskRunId: run.id, + }, + }); + expect(runWaitpointAfter.length).toBe(1); + expect(runWaitpointAfter[0].type).toBe("RUN"); + expect(runWaitpointAfter[0].output).toBe(`{"foo":"bar"}`); + expect(runWaitpointAfter[0].outputIsError).toBe(false); + + //state should be completed + const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData4); + expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData4.run.attemptNumber).toBe(2); + expect(executionData4.run.status).toBe("COMPLETED_SUCCESSFULLY"); + } finally { + engine.quit(); + } + } + ); +}); diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts new file mode 100644 index 0000000000..97122eb3f7 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -0,0 +1,193 @@ +import { + assertNonNullable, + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; + +describe("RunEngine triggerAndWait", () => { + containerTest("triggerAndWait", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue parent + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(initialExecutionData); + const attemptResult = await engine.startRunAttempt({ + runId: parentRun.id, + snapshotId: initialExecutionData.snapshot.id, + }); + + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + }, + prisma + ); + + const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionData); + expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); + + const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionData); + expect(parentExecutionData.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + assertNonNullable(runWaitpoint); + expect(runWaitpoint.waitpoint.type).toBe("RUN"); + expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + + //dequeue the child run + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: childRun.masterQueue, + maxRunCount: 10, + }); + + //start the child run + const childAttempt = await engine.startRunAttempt({ + runId: childRun.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + + // complete the child run + await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childAttempt.snapshot.id, + completion: { + id: childRun.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); + + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + + const waitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: runWaitpoint.waitpointId, + }, + }); + expect(waitpointAfter?.completedAt).not.toBeNull(); + expect(waitpointAfter?.status).toBe("COMPLETED"); + expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); + + const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointAfter).toBeNull(); + + //parent snapshot + const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentExecutionDataAfter); + expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); + expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); + expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + childRun.id + ); + expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); + } finally { + engine.quit(); + } + }); +}); diff --git a/internal-packages/run-engine/src/engine/tests/ttl.test.ts b/internal-packages/run-engine/src/engine/tests/ttl.test.ts new file mode 100644 index 0000000000..7f0d836672 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/ttl.test.ts @@ -0,0 +1,109 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; + +describe("RunEngine ttl", () => { + containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + ttl: "1s", + }, + prisma + ); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("QUEUED"); + + let expiredEventData: EventBusEventArgs<"runExpired">[0] | undefined = undefined; + engine.eventBus.on("runExpired", (result) => { + expiredEventData = result; + }); + + //wait for 1 seconds + await setTimeout(1_000); + + assertNonNullable(expiredEventData); + const assertedExpiredEventData = expiredEventData as EventBusEventArgs<"runExpired">[0]; + expect(assertedExpiredEventData.run.spanId).toBe(run.spanId); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData2.run.attemptNumber).toBe(undefined); + expect(executionData2.run.status).toBe("EXPIRED"); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); + } + }); +}); diff --git a/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts b/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts new file mode 100644 index 0000000000..2fe410f82b --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts @@ -0,0 +1,107 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; + +describe("RunEngine waitForDuration", () => { + containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //waitForDuration + const date = new Date(Date.now() + 1000); + const result = await engine.waitForDuration({ + runId: run.id, + snapshotId: attemptResult.snapshot.id, + date, + releaseConcurrency: false, + }); + + expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + await setTimeout(1_500); + + const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); + expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); + } + }); +}); diff --git a/internal-packages/run-engine/vitest.config.ts b/internal-packages/run-engine/vitest.config.ts index 4afd926425..0e8719d265 100644 --- a/internal-packages/run-engine/vitest.config.ts +++ b/internal-packages/run-engine/vitest.config.ts @@ -4,5 +4,11 @@ export default defineConfig({ test: { include: ["**/*.test.ts"], globals: true, + isolate: true, + poolOptions: { + threads: { + singleThread: true, + }, + }, }, }); diff --git a/internal-packages/testcontainers/src/index.ts b/internal-packages/testcontainers/src/index.ts index 9932149f64..acfe02b33e 100644 --- a/internal-packages/testcontainers/src/index.ts +++ b/internal-packages/testcontainers/src/index.ts @@ -8,6 +8,7 @@ import { Network, type StartedNetwork, type StartedTestContainer } from "testcon export { StartedRedisContainer }; export * from "./setup"; +export { assertNonNullable } from "./utils"; type NetworkContext = { network: StartedNetwork }; diff --git a/internal-packages/testcontainers/src/utils.ts b/internal-packages/testcontainers/src/utils.ts index 343c538754..4cee0f57f9 100644 --- a/internal-packages/testcontainers/src/utils.ts +++ b/internal-packages/testcontainers/src/utils.ts @@ -3,6 +3,7 @@ import { RedisContainer } from "@testcontainers/redis"; import path from "path"; import { GenericContainer, StartedNetwork } from "testcontainers"; import { x } from "tinyexec"; +import { expect } from "vitest"; export async function createPostgresContainer(network: StartedNetwork) { const container = await new PostgreSqlContainer("docker.io/postgres:14") @@ -68,3 +69,8 @@ export async function createElectricContainer( origin: `http://${container.getHost()}:${container.getMappedPort(3000)}`, }; } + +export function assertNonNullable(value: T): asserts value is NonNullable { + expect(value).toBeDefined(); + expect(value).not.toBeNull(); +} From 391613e93932cf8351c6498f6763fec6c8043cbb Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 19:00:22 +0000 Subject: [PATCH 130/485] Added some better comments to the prisma schema --- internal-packages/database/prisma/schema.prisma | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index cc4fd13e68..98937c2792 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1905,7 +1905,7 @@ enum RunEngineVersion { model TaskRunExecutionSnapshot { id String @id @default(cuid()) - /// This should never be V1 + /// This should always be 2+ (V1 didn't use the run engine or snapshots) engine RunEngineVersion @default(V2) /// The execution status @@ -1922,6 +1922,7 @@ model TaskRunExecutionSnapshot { run TaskRun @relation(fields: [runId], references: [id]) runStatus TaskRunStatus + /// This is the current run attempt number. Users can define how many attempts they want for a run. attemptNumber Int? /// Waitpoints that have been completed for this execution From 1640bb22220568a32fa8ed6eadb669532f2fc7f2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 19:50:47 +0000 Subject: [PATCH 131/485] Use nacking when a heartbeat fails before an attempt is created --- .../run-engine/src/engine/errors.ts | 1 + .../run-engine/src/engine/index.ts | 126 +++++++++++++++--- .../src/engine/tests/heartbeats.test.ts | 101 +++++++++++++- packages/core/src/v3/errors.ts | 1 + packages/core/src/v3/schemas/common.ts | 1 + 5 files changed, 207 insertions(+), 23 deletions(-) diff --git a/internal-packages/run-engine/src/engine/errors.ts b/internal-packages/run-engine/src/engine/errors.ts index bdf5077ee0..94a9d8b94a 100644 --- a/internal-packages/run-engine/src/engine/errors.ts +++ b/internal-packages/run-engine/src/engine/errors.ts @@ -34,6 +34,7 @@ export function runStatusFromError(error: TaskRunError): TaskRunStatus { case "TASK_RUN_HEARTBEAT_TIMEOUT": case "TASK_DEQUEUED_INVALID_STATE": case "TASK_DEQUEUED_QUEUE_NOT_FOUND": + case "TASK_RUN_DEQUEUED_MAX_RETRIES": case "TASK_HAS_N0_EXECUTION_SNAPSHOT": case "GRACEFUL_EXIT_TIMEOUT": case "TASK_INPUT_ERROR": diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 9bc46646bd..5f7cf62202 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -64,7 +64,7 @@ type Options = { }; /** If not set then checkpoints won't ever be used */ retryWarmStartThresholdMs?: number; - heartbeatTimeouts?: Partial; + heartbeatTimeoutsMs?: Partial; tracer: Tracer; }; @@ -207,14 +207,14 @@ export class RunEngine { this.tracer = options.tracer; const defaultHeartbeatTimeouts: HeartbeatTimeouts = { - PENDING_EXECUTING: 60, - PENDING_CANCEL: 60, - EXECUTING: 60, - EXECUTING_WITH_WAITPOINTS: 60, + PENDING_EXECUTING: 60_000, + PENDING_CANCEL: 60_000, + EXECUTING: 60_000, + EXECUTING_WITH_WAITPOINTS: 60_000, }; this.heartbeatTimeouts = { ...defaultHeartbeatTimeouts, - ...(options.heartbeatTimeouts ?? {}), + ...(options.heartbeatTimeoutsMs ?? {}), }; } @@ -1850,15 +1850,24 @@ export class RunEngine { this.options.retryWarmStartThresholdMs !== undefined && completion.retry.delay >= this.options.retryWarmStartThresholdMs ) { - //long delay for retry, so requeue - await this.#enqueueRun({ + //we nack the message, this allows another work to pick up the run + const gotRequeued = await this.#tryNackAndRequeue({ run, - env: run.runtimeEnvironment, + orgId: run.runtimeEnvironment.organizationId, timestamp: retryAt.getTime(), + error: { + type: "INTERNAL_ERROR", + code: "TASK_RUN_DEQUEUED_MAX_RETRIES", + message: `We tried to dequeue the run the maximum number of times but it wouldn't start executing`, + }, tx: prisma, }); - return "RETRY_QUEUED" as const; + if (!gotRequeued) { + return "COMPLETED"; + } else { + return "RETRY_QUEUED"; + } } else { //it will continue running because the retry delay is short await this.#createExecutionSnapshot(prisma, { @@ -1872,7 +1881,7 @@ export class RunEngine { await this.#sendNotificationToWorker({ runId }); } - return "RETRY_IMMEDIATELY" as const; + return "RETRY_IMMEDIATELY"; } const status = runStatusFromError(completion.error); @@ -1943,6 +1952,7 @@ export class RunEngine { }) { const prisma = tx ?? this.prisma; + await this.runLock.lock([run.id], 5000, async (signal) => { const newSnapshot = await this.#createExecutionSnapshot(prisma, { run: run, snapshot: { @@ -1972,6 +1982,47 @@ export class RunEngine { attempt: 0, }, }); + }); + } + + async #tryNackAndRequeue({ + run, + orgId, + timestamp, + error, + tx, + }: { + run: TaskRun; + orgId: string; + timestamp?: number; + error: TaskRunInternalError; + tx?: PrismaClientOrTransaction; + }): Promise { + const prisma = tx ?? this.prisma; + + return await this.runLock.lock([run.id], 5000, async (signal) => { + //we nack the message, this allows another work to pick up the run + const gotRequeued = await this.runQueue.nackMessage(orgId, run.id, timestamp); + + if (!gotRequeued) { + await this.#systemFailure({ + runId: run.id, + error, + tx: prisma, + }); + return false; + } + + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { + executionStatus: "QUEUED", + description: "Requeued the run after a failure", + }, + }); + + return true; + }); } async #continueRun( @@ -2235,7 +2286,7 @@ export class RunEngine { }); } - #getHeartbeatInterval(status: TaskRunExecutionStatus): number | null { + #getHeartbeatIntervalMs(status: TaskRunExecutionStatus): number | null { switch (status) { case "PENDING_EXECUTING": { return this.heartbeatTimeouts.PENDING_EXECUTING; @@ -2324,9 +2375,9 @@ export class RunEngine { snapshotId: string; status: TaskRunExecutionStatus; }) { - const intervalSeconds = this.#getHeartbeatInterval(status); + const intervalMs = this.#getHeartbeatIntervalMs(status); - if (intervalSeconds === null) { + if (intervalMs === null) { return; } @@ -2334,7 +2385,7 @@ export class RunEngine { id: `heartbeatSnapshot.${snapshotId}`, job: "heartbeatSnapshot", payload: { snapshotId, runId }, - availableAt: new Date(Date.now() + intervalSeconds * 1000), + availableAt: new Date(Date.now() + intervalMs), }); } @@ -2347,8 +2398,9 @@ export class RunEngine { snapshotId: string; tx?: PrismaClientOrTransaction; }) { - const latestSnapshot = await this.#getLatestExecutionSnapshot(tx ?? this.prisma, runId); - + const prisma = tx ?? this.prisma; + return await this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { this.logger.log( "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", @@ -2368,8 +2420,6 @@ export class RunEngine { snapshot: latestSnapshot, }); - //todo fail attempt if there is one? - switch (latestSnapshot.executionStatus) { case "RUN_CREATED": { //we need to check if the run is still created @@ -2380,8 +2430,41 @@ export class RunEngine { throw new NotImplementedError("Not implemented QUEUED"); } case "PENDING_EXECUTING": { - //we need to check if the run is still dequeued - throw new NotImplementedError("Not implemented DEQUEUED_FOR_EXECUTION"); + //the run didn't start executing, we need to requeue it + const run = await prisma.taskRun.findFirst({ + where: { id: runId }, + include: { + runtimeEnvironment: { + include: { + organization: true, + }, + }, + }, + }); + + if (!run) { + this.logger.error( + "RunEngine.#handleStalledSnapshot() PENDING_EXECUTING run not found", + { + runId, + snapshot: latestSnapshot, + } + ); + + throw new Error(`Run ${runId} not found`); + } + + //it will automatically be requeued X times depending on the queue retry settings + const gotRequeued = await this.#tryNackAndRequeue({ + run, + orgId: run.runtimeEnvironment.organizationId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_RUN_DEQUEUED_MAX_RETRIES", + message: `Trying to create an attempt failed multiple times, exceeding how many times we retry.`, + }, + tx: prisma, + }); } case "EXECUTING": { //we need to check if the run is still executing @@ -2410,6 +2493,7 @@ export class RunEngine { assertNever(latestSnapshot.executionStatus); } } + }); } //#endregion diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index 05c7832029..46d7a112e7 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -8,11 +8,108 @@ import { trace } from "@opentelemetry/api"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; -import { EventBusEventArgs } from "../eventBus.js"; describe("RunEngine", () => { //todo heartbeat coming through, updating the snapshot and then succeeding - test("empty test", async () => {}); + containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const pendingExecutingTimeout = 100; + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_EXECUTING: pendingExecutingTimeout, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //expect it to be pending with 0 consecutiveFailures + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + + await setTimeout(pendingExecutingTimeout * 2); + + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + + //create an attempt + // const attemptResult = await engine.startRunAttempt({ + // runId: dequeued[0].run.id, + // snapshotId: dequeued[0].snapshot.id, + // }); + // expect(attemptResult.run.id).toBe(run.id); + // expect(attemptResult.run.status).toBe("EXECUTING"); + // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); + } + }); //todo heartbeat failing and the run eventually failing with a system failure }); diff --git a/packages/core/src/v3/errors.ts b/packages/core/src/v3/errors.ts index 9578083140..b9e2cc3873 100644 --- a/packages/core/src/v3/errors.ts +++ b/packages/core/src/v3/errors.ts @@ -172,6 +172,7 @@ export function shouldRetryError(error: TaskRunError): boolean { case "TASK_DEQUEUED_INVALID_STATE": case "TASK_DEQUEUED_QUEUE_NOT_FOUND": case "TASK_HAS_N0_EXECUTION_SNAPSHOT": + case "TASK_RUN_DEQUEUED_MAX_RETRIES": return false; case "GRACEFUL_EXIT_TIMEOUT": diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 66b48ef9da..59fcfb71a9 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -107,6 +107,7 @@ export const TaskRunInternalError = z.object({ "TASK_HAS_N0_EXECUTION_SNAPSHOT", "TASK_DEQUEUED_INVALID_STATE", "TASK_DEQUEUED_QUEUE_NOT_FOUND", + "TASK_RUN_DEQUEUED_MAX_RETRIES", "OUTDATED_SDK_VERSION", ]), message: z.string().optional(), From bc01fdb0ebe5e166b4a7695010378f4476b2a962 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 7 Nov 2024 20:01:34 +0000 Subject: [PATCH 132/485] First heartbeat test working --- .../run-engine/src/engine/index.ts | 9 +- .../src/engine/tests/heartbeats.test.ts | 191 ++++++++++-------- 2 files changed, 111 insertions(+), 89 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 5f7cf62202..6d01e65e0a 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -6,7 +6,7 @@ import { MachinePresetName, parsePacket, QueueOptions, - RescheduleRunRequestBody, + RetryOptions, sanitizeError, shouldRetryError, TaskRunError, @@ -29,7 +29,6 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, - TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, Waitpoint, @@ -44,12 +43,12 @@ import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorit import { MinimalAuthenticatedEnvironment } from "../shared"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; +import { runStatusFromError } from "./errors"; import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { DequeuedMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; -import { runStatusFromError } from "./errors"; type Options = { redis: RedisOptions; @@ -62,6 +61,9 @@ type Options = { machines: Record; baseCostInCents: number; }; + queue?: { + retryOptions?: RetryOptions; + }; /** If not set then checkpoints won't ever be used */ retryWarmStartThresholdMs?: number; heartbeatTimeoutsMs?: Partial; @@ -175,6 +177,7 @@ export class RunEngine { enableRebalancing: false, logger: new Logger("RunQueue", "warn"), redis: options.redis, + retryOptions: options.queue?.retryOptions, }); this.worker = new Worker({ diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index 46d7a112e7..b7e29701a0 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -9,107 +9,126 @@ import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; -describe("RunEngine", () => { +describe("RunEngine heartbeats", () => { //todo heartbeat coming through, updating the snapshot and then succeeding - containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest( + "Attempt timeout then successfully attempted", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const pendingExecutingTimeout = 100; + const pendingExecutingTimeout = 100; - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_EXECUTING: pendingExecutingTimeout, }, - baseCostInCents: 0.0001, - }, - heartbeatTimeoutsMs: { - PENDING_EXECUTING: pendingExecutingTimeout, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); + queue: { + retryOptions: { + maxTimeoutInMs: 50, + }, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //expect it to be pending with 0 consecutiveFailures + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + + await setTimeout(pendingExecutingTimeout * 2); - //expect it to be pending with 0 consecutiveFailures - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - await setTimeout(pendingExecutingTimeout * 2); + await setTimeout(500); - //expect it to be pending with 3 consecutiveFailures - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + //have to dequeue again + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued2.length).toBe(1); - //create an attempt - // const attemptResult = await engine.startRunAttempt({ - // runId: dequeued[0].run.id, - // snapshotId: dequeued[0].snapshot.id, - // }); - // expect(attemptResult.run.id).toBe(run.id); - // expect(attemptResult.run.status).toBe("EXECUTING"); - // expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - } finally { - engine.quit(); + // create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued2[0].run.id, + snapshotId: dequeued2[0].snapshot.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); + } } - }); + ); //todo heartbeat failing and the run eventually failing with a system failure }); From 5c5312fd3503b6c120e7a6331c71005ded3193ad Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 8 Nov 2024 09:54:15 +0000 Subject: [PATCH 133/485] If something throws when dequeuing a run, nack it (ideally with a valid snapshot) --- .../run-engine/src/engine/index.ts | 191 ++++++++++-------- 1 file changed, 109 insertions(+), 82 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 6d01e65e0a..d566ddfeae 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -756,14 +756,41 @@ export class RunEngine { } } catch (error) { this.logger.error( - "RunEngine.dequeueFromMasterQueue(): Error while preparing run to be run", + "RunEngine.dequeueFromMasterQueue(): Thrown error while preparing run to be run", { error, runId, } ); - await this.runQueue.nackMessage(orgId, runId); + const run = await prisma.taskRun.findFirst({ where: { id: runId } }); + + if (!run) { + //this isn't ideal because we're not creating a snapshot… but we can't do much else + this.logger.error( + "RunEngine.dequeueFromMasterQueue(): Thrown error, then run not found. Nacking.", + { + runId, + orgId, + } + ); + await this.runQueue.nackMessage(orgId, runId); + continue; + } + + //this is an unknown error, we'll reattempt (with auto-backoff and eventually DLQ) + const gotRequeued = await this.#tryNackAndRequeue({ + run, + orgId, + error: { + type: "INTERNAL_ERROR", + code: "TASK_RUN_DEQUEUED_MAX_RETRIES", + message: `We tried to dequeue the run the maximum number of times but it wouldn't start executing`, + }, + tx: prisma, + }); + //we don't need this, but it makes it clear we're in a loop here + continue; } } @@ -1956,35 +1983,35 @@ export class RunEngine { const prisma = tx ?? this.prisma; await this.runLock.lock([run.id], 5000, async (signal) => { - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: run, - snapshot: { - executionStatus: "QUEUED", - description: "Run was QUEUED", - }, - }); + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run: run, + snapshot: { + executionStatus: "QUEUED", + description: "Run was QUEUED", + }, + }); - const masterQueues = [run.masterQueue]; - if (run.secondaryMasterQueue) { - masterQueues.push(run.secondaryMasterQueue); - } + const masterQueues = [run.masterQueue]; + if (run.secondaryMasterQueue) { + masterQueues.push(run.secondaryMasterQueue); + } - await this.runQueue.enqueueMessage({ - env, - masterQueues, - message: { - runId: run.id, - taskIdentifier: run.taskIdentifier, - orgId: env.organization.id, - projectId: env.project.id, - environmentId: env.id, - environmentType: env.type, - queue: run.queue, - concurrencyKey: run.concurrencyKey ?? undefined, - timestamp: timestamp ?? Date.now(), - attempt: 0, - }, - }); + await this.runQueue.enqueueMessage({ + env, + masterQueues, + message: { + runId: run.id, + taskIdentifier: run.taskIdentifier, + orgId: env.organization.id, + projectId: env.project.id, + environmentId: env.id, + environmentType: env.type, + queue: run.queue, + concurrencyKey: run.concurrencyKey ?? undefined, + timestamp: timestamp ?? Date.now(), + attempt: 0, + }, + }); }); } @@ -2404,35 +2431,35 @@ export class RunEngine { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5_000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); - if (latestSnapshot.id !== snapshotId) { - this.logger.log( - "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", - { - runId, - snapshotId, - latestSnapshot: latestSnapshot, - } - ); + if (latestSnapshot.id !== snapshotId) { + this.logger.log( + "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", + { + runId, + snapshotId, + latestSnapshot: latestSnapshot, + } + ); - await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); - return; - } + await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); + return; + } - this.logger.log("RunEngine.#handleStalledSnapshot() handling stalled snapshot", { - runId, - snapshot: latestSnapshot, - }); + this.logger.log("RunEngine.#handleStalledSnapshot() handling stalled snapshot", { + runId, + snapshot: latestSnapshot, + }); - switch (latestSnapshot.executionStatus) { - case "RUN_CREATED": { - //we need to check if the run is still created - throw new NotImplementedError("Not implemented RUN_CREATED"); - } - case "QUEUED": { - //we need to check if the run is still QUEUED - throw new NotImplementedError("Not implemented QUEUED"); - } - case "PENDING_EXECUTING": { + switch (latestSnapshot.executionStatus) { + case "RUN_CREATED": { + //we need to check if the run is still created + throw new NotImplementedError("Not implemented RUN_CREATED"); + } + case "QUEUED": { + //we need to check if the run is still QUEUED + throw new NotImplementedError("Not implemented QUEUED"); + } + case "PENDING_EXECUTING": { //the run didn't start executing, we need to requeue it const run = await prisma.taskRun.findFirst({ where: { id: runId }, @@ -2468,34 +2495,34 @@ export class RunEngine { }, tx: prisma, }); + } + case "EXECUTING": { + //we need to check if the run is still executing + throw new NotImplementedError("Not implemented EXECUTING"); + } + case "EXECUTING_WITH_WAITPOINTS": { + //we need to check if the run is still executing + throw new NotImplementedError("Not implemented EXECUTING_WITH_WAITPOINTS"); + } + case "BLOCKED_BY_WAITPOINTS": { + //we need to check if the waitpoints are still blocking the run + throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); + } + case "PENDING_CANCEL": { + await this.cancelRun({ + runId: latestSnapshot.runId, + finalizeRun: true, + tx, + }); + } + case "FINISHED": { + //we need to check if the run is still finished + throw new NotImplementedError("Not implemented FINISHED"); + } + default: { + assertNever(latestSnapshot.executionStatus); + } } - case "EXECUTING": { - //we need to check if the run is still executing - throw new NotImplementedError("Not implemented EXECUTING"); - } - case "EXECUTING_WITH_WAITPOINTS": { - //we need to check if the run is still executing - throw new NotImplementedError("Not implemented EXECUTING_WITH_WAITPOINTS"); - } - case "BLOCKED_BY_WAITPOINTS": { - //we need to check if the waitpoints are still blocking the run - throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); - } - case "PENDING_CANCEL": { - await this.cancelRun({ - runId: latestSnapshot.runId, - finalizeRun: true, - tx, - }); - } - case "FINISHED": { - //we need to check if the run is still finished - throw new NotImplementedError("Not implemented FINISHED"); - } - default: { - assertNever(latestSnapshot.executionStatus); - } - } }); } From d89c25a8916f445b0ad3dfaa71f9ec761673fd69 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 8 Nov 2024 10:39:08 +0000 Subject: [PATCH 134/485] A test when we fail to start runs until the run goes into a SystemFailure state --- .../run-engine/src/engine/index.ts | 28 +++- .../src/engine/tests/heartbeats.test.ts | 126 +++++++++++++++++- 2 files changed, 150 insertions(+), 4 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index d566ddfeae..03239048fe 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -488,7 +488,7 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; - return this.#trace("createRunAttempt", { consumerId, masterQueue }, async (span) => { + return this.#trace("dequeueFromMasterQueue", { consumerId, masterQueue }, async (span) => { //gets multiple runs from the queue const messages = await this.runQueue.dequeueMessageFromMasterQueue( consumerId, @@ -853,7 +853,7 @@ export class RunEngine { }) { const prisma = tx ?? this.prisma; - return this.#trace("createRunAttempt", { runId, snapshotId }, async (span) => { + return this.#trace("startRunAttempt", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5000, async (signal) => { const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); @@ -1693,7 +1693,29 @@ export class RunEngine { runId: string; error: TaskRunInternalError; tx?: PrismaClientOrTransaction; - }) {} + }) { + const prisma = tx ?? this.prisma; + return this.#trace("#systemFailure", { runId }, async (span) => { + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + + //already finished + if (latestSnapshot.executionStatus === "FINISHED") { + //todo check run is in the correct state + return; + } + + await this.#attemptFailed({ + runId, + snapshotId: latestSnapshot.id, + completion: { + ok: false, + id: runId, + error, + }, + tx: prisma, + }); + }); + } async #crash({ runId, diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index b7e29701a0..5fe1bf5294 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -10,7 +10,6 @@ import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; describe("RunEngine heartbeats", () => { - //todo heartbeat coming through, updating the snapshot and then succeeding containerTest( "Attempt timeout then successfully attempted", { timeout: 15_000 }, @@ -130,5 +129,130 @@ describe("RunEngine heartbeats", () => { } ); + containerTest( + "All start attempts timeout", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const pendingExecutingTimeout = 100; + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_EXECUTING: pendingExecutingTimeout, + }, + queue: { + retryOptions: { + //intentionally set the attempts to 2 and quick + maxAttempts: 2, + minTimeoutInMs: 50, + maxTimeoutInMs: 50, + }, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //expect it to be pending + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + + await setTimeout(pendingExecutingTimeout * 2); + + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + + await setTimeout(500); + + //have to dequeue again + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued2.length).toBe(1); + + //expect it to be pending + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + + await setTimeout(pendingExecutingTimeout * 3); + + //expect it to be pending with 3 consecutiveFailures + const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData4); + expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); + } finally { + engine.quit(); + } + } + ); + //todo heartbeat failing and the run eventually failing with a system failure }); From 5bf7596951d4625f2aa99febe314f0cc82e9227f Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 8 Nov 2024 12:49:48 +0000 Subject: [PATCH 135/485] Updated the Run Engine readme to document some things and capture some of my current thoughts --- internal-packages/run-engine/README.md | 252 +++++++------------------ 1 file changed, 72 insertions(+), 180 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 8f68ef0cbe..08ccbfa906 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -1,19 +1,33 @@ # Trigger.dev Run Engine -The Run Engine process runs from triggering, to executing, and completing them. +The Run Engine process runs from triggering, to executing, retrying, and completing them. It is responsible for: -- Creating and updating runs as they progress. +- Creating, updating, and completing runs as they progress. - Operating the run queue, including handling concurrency. -- Mutating the state of runs. +- Heartbeats which detects stalled runs and attempts to automatically recover them. +- Registering checkpoints which enable pausing/resuming of runs. + +## Run locking + +Many operations on the run are "atomic" in the sense that only a single operation can mutate them at a time. We use RedLock to create a distributed lock to ensure this. Postgres locking is not enough on its own because we have multiple API instances and Redis is used for the queue. + +There are race conditions we need to deal with: +- When checkpointing the run continues to execute until the checkpoint has been stored. At the same time the run continues and the checkpoint can become irrelevant if the waitpoint is completed. Both can happen at the same time, so we must lock the run and protect against outdated checkpoints. ## Run execution -The execution of a run is stored in the `TaskRunExecutionSnapshot` table in Postgres. +The execution state of a run is stored in the `TaskRunExecutionSnapshot` table in Postgres. This is separate from the `TaskRun` status which is exposed to users via the dashboard and API. ![The execution states](./execution-states.png) +The `TaskRunExecutionSnapshot` `executionStatus` is used to determine the execution status and is internal to the run engine. It is a log of events that impact run execution – the data is used to execute the run. + +A common pattern we use is to read the current state and check that the passed in `snapshotId` matches the current `snapshotId`. If it doesn't, we know that the state has moved on. In the case of a checkpoint coming in, we know we can just ignore it. + +We can also store invalid states by setting an error. These invalid states are purely used for debugging and are ignored for execution purposes. + ## Workers A worker is a server that runs tasks. There are two types of workers: @@ -38,224 +52,102 @@ For dev environments, we will pass the `environment` id. If there's only a `workerGroup`, we can just `dequeueFromMasterQueue()` to get runs. If there's a `BackgroundWorker` id, we need to determine if that `BackgroundWorker` is the latest. If it's the latest we call `dequeueFromEnvironmentMasterQueue()` to get any runs that aren't locked to a version. If it's not the latest, we call `dequeueFromBackgroundWorkerMasterQueue()` to get runs that are locked to that version. -## Components - -### Run Engine - -This is used to actually process a run and store the state at each step. It coordinates with the other components. - -#### Atomicity - -Operations on the run are "atomic" in the sense that only a single operation can mutate them at a time. We use RedLock to ensure this. - -#### Valid state transitions - -The run engine ensures that the run can only transition to valid states. - -#### State history - -When a run is mutated in any way, we store the state. This data is used for the next step for the run, and also for debugging. - -`TaskRunState` is a decent table name. We should have a "description" column which describes the change, this would be purely for internal use but would be very useful for debugging. - ### Run Queue -This is used to queue, dequeue, and manage concurrency. It also provides visibility into the concurrency for the env, org, etc. +This is a fair multi-tenant queue. It is designed to fairly select runs, respect concurrency limits, and have high throughput. It provides visibility into the current concurrency for the env, org, etc. -Run IDs are enqueued. They're pulled from the queue in a fair way with advanced options for debouncing and visibility. +It has built-in reliability features: +- When nacking we increment the `attempt` and if it continually fails we will move it to a Dead Letter Queue (DLQ). +- If a run is in the DLQ you can redrive it. ### Heartbeats -Heartbeats are used to determine if a run has stopped responding. If a heartbeat isn't received within a defined period then the run is judged to have become stuck and the attempt is failed. +Heartbeats are used to determine if a run has become stalled. Depending on the current execution status, we do different things. For example, if the run has been dequeued but the attempt hasn't been started we requeue it. ### Checkpoints -Checkpoints allow pausing an executing run and then resuming it later. +Checkpoints allow pausing an executing run and then resuming it later. This is an optimization to avoid wasted compute and is especially useful with "Waitpoints". ## Waitpoints -A "Waitpoint" is something that prevents a run from continuing: +A "Waitpoint" is something that can block a run from continuing: -- `wait.for()` a future time. -- `triggerAndWait()` until the run is finished. -- `batchTriggerAndWait()` until all runs are finished. -- `wait.forRequest()` wait until a request has been received (not implemented yet). +A single Waitpoint can block many runs, the same waitpoint can only block a run once (there's a unique constraint). They block run execution from continuing until all of them are completed. -They block run execution from continuing until all of them are completed/removed. +They can have output data associated with them, e.g. the finished run payload. That includes an error, e.g. a failed run. -Some of them have data associated with them, e.g. the finished run payload. +There are currently three types: + - `RUN` which gets completed when the associated run completes. Every run has an `associatedWaitpoint` that matches the lifetime of the run. + - `DATETIME` which gets completed when the datetime is reached. + - `EVENT` which gets completed when that event occurs. -Could a run have multiple at once? That might allow us to support Promise.all wrapped. It would also allow more advanced use cases. +Waitpoints can have an idempotencyKey which allows stops them from being created multiple times. This is especially useful for event waitpoints, where you don't want to create a new waitpoint for the same event twice. -Could this be how we implement other features like `delay`, `rate limit`, and retries waiting before the next try? +### Use cases -Could we even open up a direct API/SDK for creating one inside a run (would pause execution)? And then completing one (would continue execution)? It could also be "failed" which the run could act upon differently. +#### `wait.for()` or `wait.until()` +Wait for a future time, then continue. We should add the option to pass an `idempotencyKey` so a second attempt doesn't wait again. By default it would wait again. -## Notes from call with Eric +#### `triggerAndWait()` or `batchTriggerAndWait()` +Trigger and then wait for run(s) to finish. If the run fails it will still continue but with the errors so the developer can decide what to do. -We could expose the API/SDK for creating/completing Waitpoints. +### The `trigger` `delay` option -> They need to be associated with attempts, because that's what gets continued. And if an attempts fails, we don't want to keep the waitpoints. +When triggering a run and passing the `delay` option, we use a `DATETIME` waitpoint to block the run from starting. -> We should have idempotency keys for `wait.for()` and `wait.until()`, so they wouldn't wait on a second attempt. "Waitpoints" have idempotency keys, and these are used for a `wait.forEvent()` (or whatever we call it). +#### `wait.forRequest()` +Wait until a request has been received at the URL that you are given. This is useful for pausing a run and then continuing it again when some external event occurs on another service. For example, Replicate have an API where they will callback when their work is complete. -> How would debounce use this? When the waitpoint is completed, we would "clear" the "idempotencyKey" which would be the user-provided "debounceKey". It wouldn't literally clear it necessarily. Maybe another column `idempotencyKeyActive` would be set to `false`. Or move the key to another column, which is just for reference. +#### `wait.forWaitpoint(waitpointId)` -> `triggerAndWait`, cancelling a child task run. It would clear the waitpoint `idempotencyKey`, same as above. +A more advanced SDK which would require uses to explicitly create a waitpoint. We would also need `createWaitpoint()`, `completeWaitpoint()`, and `failWaitpoint()`. -> Copying the output from the run into the waitpoint actually does make sense. It simplifies the API for continuing runs. +#### `wait.forRunToComplete(runId)` -> Inside a run you could wait for another run or runs using the run ID. `const output = await wait.forRunToComplete(runId)`. This would basically just get a run by ID, then wait for it's waitpoint to be completed. This means every run would have a waitpoint associated with it. +You could wait for another run (or runs) using their run ids. This would allow you to wait for runs that you haven't triggered inside that run. -```ts -//inside a run function -import { runs } from "@trigger.dev/sdk/v3"; +#### Debouncing -// Loop through all runs with the tag "user_123456" that have completed - -for await (const run of runs.list({ tag: "user_123456" })) { - await wait.forRunToComplete(run.id); -} - -//wait for many runs to complete -await wait.forRunToComplete(runId); -await wait.forRunsToComplete({ tag: "user_123456" }); -``` +Using a `DateTime` waitpoint and an `idempotencyKey` debounce can be implemented. -Rate limit inside a task. This is much trickier. +Suggested usage: ```ts -//simple time-based rate limit -await wait.forRateLimit(`timed-${payload.user.id}`, { per: { minute: 10 } }); - -const openAiResult = await wait.forRateLimit( - `openai-${payload.user.id}`, - { limit: 100, recharge: { seconds: 2 } }, - (rateLimit, refreshes) => { - const result = await openai.createCompletion({ - model: "gpt-3.5-turbo", - prompt: "What is the meaning of life?", - }); - const tokensUsed = result.tokensUsed; - - await rateLimit.used(tokensUsed); - - return result; - } +await myTask.trigger( + { some: "data" }, + { debounce: { key: user.id, wait: "30s", maxWait: "2m", leading: true } } ); - -//do stuff with openAiResult ``` -#### `triggerAndWait()` implementation - -Inside the SDK - -```ts -function triggerAndWait_internal(data) { - //if you don't pass in a string, it won't have a "key" - const waitpoint = await createWaitpoint(); - const response = await apiClient.triggerTask({ ...data, waitpointId: waitpoint.id }); - - //...do normal stuff +Implementation: - // wait for the waitpoint to be completed - // in reality this probably needs to happen inside the runtime - const result = await waitpointCompletion(waitpoint.id); -} -``` +The Waitpoint `idempotencyKey` should be prefixed like `debounce-${debounce.key}`. Also probably with the `taskIdentifier`? -Pseudo-code for completing a run and completing the waitpoint: - -```ts -function completeRun(tx, data) { - //complete the child run - const run = await tx.taskRun.update({ where: { id: runId }, data, include: { waitpoint } }); - if (run.waitpoint) { - await completeWaitpoint(tx, { id: run.waitpoint.id }); - - //todo in completeWaitpoint it would check if the blocked runs can now continue - //if they have no more blockers then they can continue - - //batchTriggerAndWait with two items - //blocked_by: ["w_1", "w_2"] - //blocked_by: ["w_2"] - //blocked_by: [] then you can continue - } - - const state = await tx.taskRunState.create({ - where: { runId: id }, - data: { runId, status: run.status }, - }); - - const previousState = await tx.taskRunState.findFirst({ where: { runId: runId, latest: true } }); - const waitingOn = previousState.waitingOn?.filter((w) => w !== waitpoint?.id) ?? []; - - if (waitingOn.length === 0) { - } -} -``` - -#### `batchTriggerAndWait()` implementation - -```ts -//todo -``` - -### Example: User-defined waitpoint - -A user's backend code: - -```ts -import { waitpoint } from "@trigger.dev/sdk/v3"; -import type { NextApiRequest, NextApiResponse } from "next"; - -export default async function handler(req: NextApiRequest, res: NextApiResponse<{ id: string }>) { - const userId = req.query.userId; - const isPaying = req.query.isPaying; - - //internal SDK calls, this would be nicer for users to use - const waitpoint = waitpoint(`${userId}/onboarding-completed`); - await waitpoint.complete({ data: { isPaying } }); - - //todo instead this would be a single call - - res.status(200).json(handle); -} -``` - -Inside a user's run - -```ts -export const myTask = task({ - id: "my-task", - run: async (payload) => { - //it doesn't matter if this was completed before the run started - const result = await wait.forPoint<{ isPaying: boolean }>( - `${payload.userId}/onboarding-completed` - ); - }, -}); -``` - -### How would we implement `batchTriggerAndWait`? - -```ts - -``` +1. When trigger is called with `debounce`, we check if there's an active waitpoint with the relevant `idempotencyKey`. +2. If `leading` is false (default): + - If there's a waiting run: update its payload and extend the waitpoint's completionTime + - If no waiting run: create a new run and DATETIME waitpoint +3. If `leading` is true: + - If there is no pending waitpoint: execute immediately but create a waitpoint with the idempotencyKey. + - If there is a pending waitpoint + - If there's a blocked run already, update the payload and extend the `completionTime`. + - If there's not a blocked run, create the run and block it with the waitpoint. +4. If `maxWait` is specified: + - The waitpoint's completionTime is capped at the waitpoint `createdAt` + maxWait. + - Ensures execution happens even during constant triggering +5. When the waitpoint is completed we need to clear the `idempotencyKey`. To clear an `idempotencyKey`, move the original value to the `inactiveIdempotencyKey` column and set the main one to a new randomly generated one. -## How does it work? +//todo implement auto-deactivating of the idempotencyKey when the waitpoint is completed. This would make it easier to implement features like this. -It's very important that a run can only be acted on by one process at a time. We lock runs using RedLock while they're being mutated. This prevents some network-related race conditions like the timing of checkpoints and heartbeats permanently hanging runs. +#### Rate limiting -# Sending messages to the worker +Both when triggering tasks and also any helpers we wanted inside the task. -Sending messages to the worker is challenging because we many servers and we're going to have many workers. We need to make sure that the message is sent to the correct worker. +We could either use the DATETIME waitpoints. Or it might be easier to use an existing rate limiting library with Redis and receive notifications when a limit is cleared and complete associated waitpoints. -We could add timeouts using the heartbeat system +## Emitting events -## #continueRun -When all waitpoints are finished, we need to continue a run. Sometimes they're still running in the cluster. +The Run Engine emits events using its `eventBus`. This is used for runs completing, failing, or things that any workers should be aware of. # Legacy system From 71d1cf88075def00fdc2e01729ddd463001f51b3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 8 Nov 2024 12:54:48 +0000 Subject: [PATCH 136/485] Tweaked some of the text --- internal-packages/run-engine/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 08ccbfa906..3fc107eb22 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -143,7 +143,7 @@ The Waitpoint `idempotencyKey` should be prefixed like `debounce-${debounce.key Both when triggering tasks and also any helpers we wanted inside the task. -We could either use the DATETIME waitpoints. Or it might be easier to use an existing rate limiting library with Redis and receive notifications when a limit is cleared and complete associated waitpoints. +For inside tasks, we could use the DATETIME waitpoints. Or it might be easier to use an existing rate limiting library with Redis and receive notifications when a limit is cleared and complete associated waitpoints. ## Emitting events From b1cb62fd833240d5a53831901df60d7ddb1eb196 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 8 Nov 2024 12:56:14 +0000 Subject: [PATCH 137/485] Added a todo to the prisma schema for auto deactivating idempotency keys, this will be needed for debounce for example --- internal-packages/database/prisma/schema.prisma | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 98937c2792..06e8862cad 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2004,8 +2004,14 @@ model Waitpoint { /// If it's an Event type waitpoint, this is the event. It can also be provided for the DATETIME type idempotencyKey String + /// If this is true then we can show it in the dashboard/return it from the SDK userProvidedIdempotencyKey Boolean + //todo + /// Will automatically deactivate the idempotencyKey when the waitpoint is completed + /// "Deactivating" means moving it to the inactiveIdempotencyKey field and generating a random new one for the main column + /// deactivateIdempotencyKeyWhenCompleted Boolean @default(false) + /// If an idempotencyKey is no longer active, we store it here and generate a new one for the idempotencyKey field. /// Clearing an idempotencyKey is useful for debounce or cancelling child runs. /// This is a workaround because Prisma doesn't support partial indexes. From fa06e80ecbf4906489b432c52c006b0444570570 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 10 Nov 2024 16:23:09 +0000 Subject: [PATCH 138/485] Added heartbeat recoveries when workers are unresponsive (after an attempt is created) --- .../run-engine/src/engine/errors.ts | 2 + .../run-engine/src/engine/index.ts | 58 ++++++-- .../src/engine/tests/heartbeats.test.ts | 139 +++++++++++++++++- packages/core/src/v3/errors.ts | 6 +- packages/core/src/v3/schemas/common.ts | 2 + 5 files changed, 188 insertions(+), 19 deletions(-) diff --git a/internal-packages/run-engine/src/engine/errors.ts b/internal-packages/run-engine/src/engine/errors.ts index 94a9d8b94a..b7ef0b4d61 100644 --- a/internal-packages/run-engine/src/engine/errors.ts +++ b/internal-packages/run-engine/src/engine/errors.ts @@ -35,6 +35,8 @@ export function runStatusFromError(error: TaskRunError): TaskRunStatus { case "TASK_DEQUEUED_INVALID_STATE": case "TASK_DEQUEUED_QUEUE_NOT_FOUND": case "TASK_RUN_DEQUEUED_MAX_RETRIES": + case "TASK_RUN_STALLED_EXECUTING": + case "TASK_RUN_STALLED_EXECUTING_WITH_WAITPOINTS": case "TASK_HAS_N0_EXECUTION_SNAPSHOT": case "GRACEFUL_EXIT_TIMEOUT": case "TASK_INPUT_ERROR": diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 03239048fe..b08bcf400e 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1814,11 +1814,13 @@ export class RunEngine { runId, snapshotId, completion, + forceRequeue, tx, }: { runId: string; snapshotId: string; completion: TaskRunFailedExecutionResult; + forceRequeue?: boolean; tx: PrismaClientOrTransaction; }): Promise<"COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"> { const prisma = this.prisma; @@ -1899,8 +1901,9 @@ export class RunEngine { //if it's a long delay and we support checkpointing, put it back in the queue if ( - this.options.retryWarmStartThresholdMs !== undefined && - completion.retry.delay >= this.options.retryWarmStartThresholdMs + forceRequeue || + (this.options.retryWarmStartThresholdMs !== undefined && + completion.retry.delay >= this.options.retryWarmStartThresholdMs) ) { //we nack the message, this allows another work to pick up the run const gotRequeued = await this.#tryNackAndRequeue({ @@ -2474,12 +2477,10 @@ export class RunEngine { switch (latestSnapshot.executionStatus) { case "RUN_CREATED": { - //we need to check if the run is still created - throw new NotImplementedError("Not implemented RUN_CREATED"); + throw new NotImplementedError("There shouldn't be a heartbeat for RUN_CREATED"); } case "QUEUED": { - //we need to check if the run is still QUEUED - throw new NotImplementedError("Not implemented QUEUED"); + throw new NotImplementedError("There shouldn't be a heartbeat for QUEUED"); } case "PENDING_EXECUTING": { //the run didn't start executing, we need to requeue it @@ -2517,29 +2518,56 @@ export class RunEngine { }, tx: prisma, }); + break; } - case "EXECUTING": { - //we need to check if the run is still executing - throw new NotImplementedError("Not implemented EXECUTING"); - } + case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { - //we need to check if the run is still executing - throw new NotImplementedError("Not implemented EXECUTING_WITH_WAITPOINTS"); + + const retryDelay = 250; + + //todo call attemptFailed and force requeuing + await this.#attemptFailed({ + runId, + snapshotId: latestSnapshot.id, + completion: { + ok: false, + id: runId, + error: { + type: "INTERNAL_ERROR", + code: + latestSnapshot.executionStatus === "EXECUTING" + ? "TASK_RUN_STALLED_EXECUTING" + : "TASK_RUN_STALLED_EXECUTING_WITH_WAITPOINTS", + message: `Trying to create an attempt failed multiple times, exceeding how many times we retry.`, + }, + retry: { + //250ms in the future + timestamp: Date.now() + retryDelay, + delay: retryDelay, + }, + }, + forceRequeue: true, + tx: prisma, + }); + break; } case "BLOCKED_BY_WAITPOINTS": { - //we need to check if the waitpoints are still blocking the run + //todo should we do a periodic check here for whether waitpoints are actually still blocking? + //we could at least log some things out if a run has been in this state for a long time throw new NotImplementedError("Not implemented BLOCKED_BY_WAITPOINTS"); } case "PENDING_CANCEL": { + //if the run is waiting to cancel but the worker hasn't confirmed that, + //we force the run to be cancelled await this.cancelRun({ runId: latestSnapshot.runId, finalizeRun: true, tx, }); + break; } case "FINISHED": { - //we need to check if the run is still finished - throw new NotImplementedError("Not implemented FINISHED"); + throw new NotImplementedError("There shouldn't be a heartbeat for FINISHED"); } default: { assertNever(latestSnapshot.executionStatus); diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index 5fe1bf5294..b199a5ad48 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -124,7 +124,7 @@ describe("RunEngine heartbeats", () => { expect(attemptResult.run.status).toBe("EXECUTING"); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); } finally { - engine.quit(); + await engine.quit(); } } ); @@ -249,10 +249,143 @@ describe("RunEngine heartbeats", () => { expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); } finally { - engine.quit(); + await engine.quit(); } } ); - //todo heartbeat failing and the run eventually failing with a system failure + containerTest( + "Execution timeout (worker doesn't heartbeat)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const executingTimeout = 100; + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + EXECUTING: executingTimeout, + }, + queue: { + retryOptions: { + //intentionally set the attempts to 2 and quick + maxAttempts: 2, + minTimeoutInMs: 50, + maxTimeoutInMs: 50, + }, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + + //should be executing + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData.run.status).toBe("EXECUTING"); + + //wait long enough for the heartbeat to timeout + await setTimeout(1_000); + + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + + // await setTimeout(500); + + // //have to dequeue again + // const dequeued2 = await engine.dequeueFromMasterQueue({ + // consumerId: "test_12345", + // masterQueue: run.masterQueue, + // maxRunCount: 10, + // }); + // expect(dequeued2.length).toBe(1); + + // //expect it to be pending + // const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + // assertNonNullable(executionData3); + // expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + + // await setTimeout(executingTimeout * 3); + + // //expect it to be pending with 3 consecutiveFailures + // const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + // assertNonNullable(executionData4); + // expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + // expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); + } finally { + await engine.quit(); + } + } + ); + + //todo pending_cancel }); diff --git a/packages/core/src/v3/errors.ts b/packages/core/src/v3/errors.ts index b9e2cc3873..285a753724 100644 --- a/packages/core/src/v3/errors.ts +++ b/packages/core/src/v3/errors.ts @@ -166,8 +166,8 @@ export function shouldRetryError(error: TaskRunError): boolean { case "TASK_RUN_CANCELLED": case "MAX_DURATION_EXCEEDED": case "DISK_SPACE_EXCEEDED": - case "TASK_RUN_HEARTBEAT_TIMEOUT": case "OUTDATED_SDK_VERSION": + case "TASK_RUN_HEARTBEAT_TIMEOUT": // run engine errors case "TASK_DEQUEUED_INVALID_STATE": case "TASK_DEQUEUED_QUEUE_NOT_FOUND": @@ -175,6 +175,10 @@ export function shouldRetryError(error: TaskRunError): boolean { case "TASK_RUN_DEQUEUED_MAX_RETRIES": return false; + //new heartbeat error + //todo + case "TASK_RUN_STALLED_EXECUTING": + case "TASK_RUN_STALLED_EXECUTING_WITH_WAITPOINTS": case "GRACEFUL_EXIT_TIMEOUT": case "HANDLE_ERROR_ERROR": case "TASK_INPUT_ERROR": diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 59fcfb71a9..814b6e2dca 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -108,6 +108,8 @@ export const TaskRunInternalError = z.object({ "TASK_DEQUEUED_INVALID_STATE", "TASK_DEQUEUED_QUEUE_NOT_FOUND", "TASK_RUN_DEQUEUED_MAX_RETRIES", + "TASK_RUN_STALLED_EXECUTING", + "TASK_RUN_STALLED_EXECUTING_WITH_WAITPOINTS", "OUTDATED_SDK_VERSION", ]), message: z.string().optional(), From b7874081aa0ea3e10348ce09d9e067e149871f48 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 10 Nov 2024 16:23:38 +0000 Subject: [PATCH 139/485] Added notes about how to detect if we should use the run engine when triggering --- .../routes/api.v1.tasks.$taskId.trigger.ts | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index aae68c91d6..2d428090cb 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -78,6 +78,27 @@ export async function action({ request, params }: ActionFunctionArgs) { ); } + //todo RunEngine support + /* + - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. + - Add an `engine` column to `Project` in the database. + + Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. + + You run `npx trigger.dev@latest deploy` with config v2. + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + You run `npx trigger.dev@latest dev` with config v2 + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + When triggering + - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + + */ + const service = new TriggerTaskService(); try { From caa87af936080f3eaabaaeffa3045c13a0cafdee Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 10 Nov 2024 16:49:41 +0000 Subject: [PATCH 140/485] Improved the test so the run recovers, then completely fails after 2 attempts --- .../src/engine/tests/heartbeats.test.ts | 46 +++++++++++-------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index b199a5ad48..db38bb2531 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -340,7 +340,7 @@ describe("RunEngine heartbeats", () => { }); //create an attempt - const attemptResult = await engine.startRunAttempt({ + await engine.startRunAttempt({ runId: dequeued[0].run.id, snapshotId: dequeued[0].snapshot.id, }); @@ -354,33 +354,39 @@ describe("RunEngine heartbeats", () => { //wait long enough for the heartbeat to timeout await setTimeout(1_000); - //expect it to be pending with 3 consecutiveFailures + //expect it to be queued again const executionData2 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - // await setTimeout(500); + //have to dequeue again + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued2.length).toBe(1); - // //have to dequeue again - // const dequeued2 = await engine.dequeueFromMasterQueue({ - // consumerId: "test_12345", - // masterQueue: run.masterQueue, - // maxRunCount: 10, - // }); - // expect(dequeued2.length).toBe(1); + //create an attempt + await engine.startRunAttempt({ + runId: dequeued2[0].run.id, + snapshotId: dequeued2[0].snapshot.id, + }); - // //expect it to be pending - // const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - // assertNonNullable(executionData3); - // expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //should be executing + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData3.run.status).toBe("EXECUTING"); - // await setTimeout(executingTimeout * 3); + //again wait long enough that the heartbeat fails + await setTimeout(1_000); - // //expect it to be pending with 3 consecutiveFailures - // const executionData4 = await engine.getRunExecutionData({ runId: run.id }); - // assertNonNullable(executionData4); - // expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); - // expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); + //expect it to be queued again + const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData4); + expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); } finally { await engine.quit(); } From 3049e47f88544ccf13c5b967104a24f9e31eb210 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 09:48:48 +0000 Subject: [PATCH 141/485] When an attempt fails we remove waitpoints blocking the run --- .../run-engine/src/engine/index.ts | 18 ++ .../src/engine/tests/waitForDuration.test.ts | 107 -------- .../src/engine/tests/waitpoints.test.ts | 241 ++++++++++++++++++ 3 files changed, 259 insertions(+), 107 deletions(-) delete mode 100644 internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts create mode 100644 internal-packages/run-engine/src/engine/tests/waitpoints.test.ts diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index b08bcf400e..d31e714d6c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1835,6 +1835,12 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); + //remove waitpoints blocking the run + const deletedCount = await this.#clearBlockingWaitpoints({ runId, tx }); + if (deletedCount > 0) { + this.logger.debug("Cleared blocking waitpoints", { runId, deletedCount }); + } + const failedAt = new Date(); if ( @@ -2262,6 +2268,17 @@ export class RunEngine { }); } + async #clearBlockingWaitpoints({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { + const prisma = tx ?? this.prisma; + const deleted = await prisma.taskRunWaitpoint.deleteMany({ + where: { + taskRunId: runId, + }, + }); + + return deleted.count; + } + //#region TaskRunExecutionSnapshots async #createExecutionSnapshot( prisma: PrismaClientOrTransaction, @@ -2522,6 +2539,7 @@ export class RunEngine { } case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { + //todo when a run fails, do we need to fail any pending subtasks? const retryDelay = 250; diff --git a/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts b/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts deleted file mode 100644 index 2fe410f82b..0000000000 --- a/internal-packages/run-engine/src/engine/tests/waitForDuration.test.ts +++ /dev/null @@ -1,107 +0,0 @@ -import { - containerTest, - setupAuthenticatedEnvironment, - setupBackgroundWorker, -} from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; -import { expect } from "vitest"; -import { RunEngine } from "../index.js"; -import { setTimeout } from "timers/promises"; - -describe("RunEngine waitForDuration", () => { - containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - //waitForDuration - const date = new Date(Date.now() + 1000); - const result = await engine.waitForDuration({ - runId: run.id, - snapshotId: attemptResult.snapshot.id, - date, - releaseConcurrency: false, - }); - - expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - await setTimeout(1_500); - - const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); - expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); - } finally { - engine.quit(); - } - }); -}); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts new file mode 100644 index 0000000000..4a4b33028b --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -0,0 +1,241 @@ +import { + assertNonNullable, + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; + +describe("RunEngine Waitpoints", () => { + containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //waitForDuration + const date = new Date(Date.now() + 1000); + const result = await engine.waitForDuration({ + runId: run.id, + snapshotId: attemptResult.snapshot.id, + date, + releaseConcurrency: false, + }); + + expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + await setTimeout(1_500); + + const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); + expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + engine.quit(); + } + }); + + containerTest( + "Waitpoints cleared if attempt fails", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //waitForDuration + const date = new Date(Date.now() + 60_000); + const result = await engine.waitForDuration({ + runId: run.id, + snapshotId: attemptResult.snapshot.id, + date, + releaseConcurrency: false, + }); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //fail the attempt (user error) + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; + const failResult = await engine.completeRunAttempt({ + runId: executionData!.run.id, + snapshotId: executionData!.snapshot.id, + completion: { + ok: false, + id: executionData!.run.id, + error, + retry: { + timestamp: Date.now(), + delay: 0, + }, + }, + }); + expect(failResult).toBe("RETRY_IMMEDIATELY"); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("RETRYING_AFTER_FAILURE"); + expect(executionData2.completedWaitpoints.length).toBe(0); + + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); + } finally { + engine.quit(); + } + } + ); +}); From ae9cfbcc8afae90ac219e043f5ccfbd4d47eb441 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 13:27:07 +0000 Subject: [PATCH 142/485] Remove todo --- internal-packages/run-engine/src/engine/index.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index d31e714d6c..f812146590 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -2539,8 +2539,6 @@ export class RunEngine { } case "EXECUTING": case "EXECUTING_WITH_WAITPOINTS": { - //todo when a run fails, do we need to fail any pending subtasks? - const retryDelay = 250; //todo call attemptFailed and force requeuing From 05154aa5f7b53f5ba4a4f7bf136075ababf71ecc Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 13:28:53 +0000 Subject: [PATCH 143/485] Improved comment --- .../run-engine/src/engine/tests/waitpoints.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 4a4b33028b..22543fde9a 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -223,7 +223,7 @@ describe("RunEngine Waitpoints", () => { expect(executionData2.run.status).toBe("RETRYING_AFTER_FAILURE"); expect(executionData2.completedWaitpoints.length).toBe(0); - //check the waitpoint blocking the parent run + //check there are no waitpoints blocking the parent run const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ where: { taskRunId: run.id, From c7db64790a27cfb1da10ea7f200497f62542b4ee Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 13:38:21 +0000 Subject: [PATCH 144/485] Pending cancel heartbeat test --- .../src/engine/tests/heartbeats.test.ts | 101 +++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index db38bb2531..9d223dbf74 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -393,5 +393,104 @@ describe("RunEngine heartbeats", () => { } ); - //todo pending_cancel + containerTest("Pending cancel", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const heartbeatTimeout = 100; + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_CANCEL: heartbeatTimeout, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + + //cancel run + await engine.cancelRun({ runId: dequeued[0].run.id }); + + //expect it to be pending_cancel + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("PENDING_CANCEL"); + + //wait long enough for the heartbeat to timeout + await setTimeout(1_000); + + //expect it to be queued again + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData3.run.status).toBe("CANCELED"); + } finally { + await engine.quit(); + } + }); }); From 42843cd2c2a51e731eed81e79978ff850b79addf Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 13:54:25 +0000 Subject: [PATCH 145/485] Emit a `runAttemptStarted` event that can be used for sending invocation usage --- .../run-engine/src/engine/eventBus.ts | 21 +++++++++++++++++ .../run-engine/src/engine/index.ts | 12 ++++++++++ .../src/engine/tests/trigger.test.ts | 23 ++++++++++++++----- 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 9f6e2a70cb..d9f4db0f3f 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -3,6 +3,21 @@ import { AuthenticatedEnvironment } from "../shared"; import { TaskRunError } from "@trigger.dev/core/v3"; export type EventBusEvents = { + //todo reportInvocationUsage() + runAttemptStarted: [ + { + time: Date; + run: { + id: string; + attemptNumber: number; + baseCostInCents: number; + }; + organization: { + id: string; + }; + }, + ]; + //todo eventRepository runExpired: [ { time: Date; @@ -13,6 +28,7 @@ export type EventBusEvents = { }; }, ]; + //todo eventRepository runSucceeded: [ { time: Date; @@ -24,6 +40,7 @@ export type EventBusEvents = { }; }, ]; + //todo eventRepository runFailed: [ { time: Date; @@ -35,6 +52,7 @@ export type EventBusEvents = { }; }, ]; + //todo eventRepository runRetryScheduled: [ { time: Date; @@ -49,6 +67,7 @@ export type EventBusEvents = { retryAt: Date; }, ]; + //todo eventRepository runCancelled: [ { time: Date; @@ -59,6 +78,7 @@ export type EventBusEvents = { }; }, ]; + //todo send socket message to the worker workerNotification: [ { time: Date; @@ -67,6 +87,7 @@ export type EventBusEvents = { }; }, ]; + //todo advanced logging executionSnapshotCreated: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index f812146590..df018198dc 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -945,6 +945,18 @@ export class RunEngine { throw new ServiceValidationError("Max attempts reached", 400); } + this.eventBus.emit("runAttemptStarted", { + time: new Date(), + run: { + id: taskRun.id, + attemptNumber: nextAttemptNumber, + baseCostInCents: taskRun.baseCostInCents, + }, + organization: { + id: environment.organization.id, + }, + }); + const result = await $transaction( prisma, async (tx) => { diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts index 95d90a4a76..0882dec08c 100644 --- a/internal-packages/run-engine/src/engine/tests/trigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -37,7 +37,7 @@ describe("RunEngine trigger()", () => { centsPerMs: 0.0001, }, }, - baseCostInCents: 0.0001, + baseCostInCents: 0.0005, }, tracer: trace.getTracer("test", "0.0.0"), }); @@ -122,6 +122,11 @@ describe("RunEngine trigger()", () => { ); expect(envConcurrencyAfter).toBe(1); + let attemptEvent: EventBusEventArgs<"runAttemptStarted">[0] | undefined = undefined; + engine.eventBus.on("runAttemptStarted", (result) => { + attemptEvent = result; + }); + //create an attempt const attemptResult = await engine.startRunAttempt({ runId: dequeued[0].run.id, @@ -131,15 +136,21 @@ describe("RunEngine trigger()", () => { expect(attemptResult.run.status).toBe("EXECUTING"); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + //attempt event + assertNonNullable(attemptEvent); + const attemptedEvent = attemptEvent as EventBusEventArgs<"runAttemptStarted">[0]; + expect(attemptedEvent.run.id).toBe(run.id); + expect(attemptedEvent.run.baseCostInCents).toBe(0.0005); + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); expect(executionData2.run.attemptNumber).toBe(1); expect(executionData2.run.status).toBe("EXECUTING"); - let event: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; + let successEvent: EventBusEventArgs<"runSucceeded">[0] | undefined = undefined; engine.eventBus.on("runSucceeded", (result) => { - event = result; + successEvent = result; }); //complete the run @@ -162,9 +173,9 @@ describe("RunEngine trigger()", () => { expect(executionData3.run.attemptNumber).toBe(1); expect(executionData3.run.status).toBe("COMPLETED_SUCCESSFULLY"); - //event - assertNonNullable(event); - const completedEvent = event as EventBusEventArgs<"runSucceeded">[0]; + //success event + assertNonNullable(successEvent); + const completedEvent = successEvent as EventBusEventArgs<"runSucceeded">[0]; expect(completedEvent.run.spanId).toBe(run.spanId); expect(completedEvent.run.output).toBe('{"foo":"bar"}'); expect(completedEvent.run.outputType).toBe("application/json"); From c3929c299e6ca02958cde0afbe27dbfab7e46d3a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 15:13:58 +0000 Subject: [PATCH 146/485] Added an env queue so we can track the number of queued items --- .../run-engine/src/run-queue/index.test.ts | 96 ++++++++++++++++++- .../run-engine/src/run-queue/index.ts | 55 +++++++++-- .../run-engine/src/run-queue/keyProducer.ts | 9 ++ .../run-engine/src/run-queue/types.ts | 2 + 4 files changed, 150 insertions(+), 12 deletions(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 8ad1a95438..75af69b456 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -151,6 +151,8 @@ describe("RunQueue", () => { //initial queue length const result = await queue.lengthOfQueue(authenticatedEnvDev, messageDev.queue); expect(result).toBe(0); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvDev); + expect(envQueueLength).toBe(0); //initial oldest message const oldestScore = await queue.oldestMessageInQueue(authenticatedEnvDev, messageDev.queue); @@ -168,6 +170,8 @@ describe("RunQueue", () => { //queue length const result2 = await queue.lengthOfQueue(authenticatedEnvDev, messageDev.queue); expect(result2).toBe(1); + const envQueueLength2 = await queue.lengthOfEnvQueue(authenticatedEnvDev); + expect(envQueueLength2).toBe(1); //oldest message const oldestScore2 = await queue.oldestMessageInQueue( @@ -219,6 +223,12 @@ describe("RunQueue", () => { ); expect(taskConcurrency2).toBe(1); + //queue lengths + const result3 = await queue.lengthOfQueue(authenticatedEnvDev, messageDev.queue); + expect(result3).toBe(0); + const envQueueLength3 = await queue.lengthOfEnvQueue(authenticatedEnvDev); + expect(envQueueLength3).toBe(0); + const dequeued2 = await queue.dequeueMessageFromMasterQueue( "test_12345", envMasterQueue, @@ -247,6 +257,8 @@ describe("RunQueue", () => { //initial queue length const result = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); expect(result).toBe(0); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength).toBe(0); //initial oldest message const oldestScore = await queue.oldestMessageInQueue( @@ -265,8 +277,10 @@ describe("RunQueue", () => { }); //queue length - const result2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); - expect(result2).toBe(1); + const queueLength = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength).toBe(1); + const envLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envLength).toBe(1); //oldest message const oldestScore2 = await queue.oldestMessageInQueue( @@ -318,6 +332,8 @@ describe("RunQueue", () => { //queue length const length2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); expect(length2).toBe(0); + const envLength2 = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envLength2).toBe(0); const dequeued2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); expect(dequeued2.length).toBe(0); @@ -358,6 +374,8 @@ describe("RunQueue", () => { const initialLength2 = await queue.lengthOfQueue(authenticatedEnvProd, "task/other-task"); expect(initialLength1).toBe(15); expect(initialLength2).toBe(5); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength).toBe(20); // Dequeue first batch of 10 messages const dequeued1 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); @@ -383,6 +401,8 @@ describe("RunQueue", () => { const finalLength2 = await queue.lengthOfQueue(authenticatedEnvProd, "task/other-task"); expect(finalLength1).toBe(0); expect(finalLength2).toBe(0); + const finalEnvQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(finalEnvQueueLength).toBe(0); } finally { await queue.quit(); } @@ -435,9 +455,19 @@ describe("RunQueue", () => { masterQueues: "main", }); + const queueLength = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength).toBe(1); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength).toBe(1); + const messages = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); expect(messages.length).toBe(1); + const queueLength2 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength2).toBe(0); + const envQueueLength2 = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength2).toBe(0); + //check the message is gone const key = queue.keys.messageKey(messages[0].message.orgId, messages[0].messageId); const exists = await redis.exists(key); @@ -461,6 +491,12 @@ describe("RunQueue", () => { ); expect(taskConcurrency).toBe(0); + //queue lengths + const queueLength3 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength3).toBe(0); + const envQueueLength3 = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength3).toBe(0); + //check the message is gone const exists2 = await redis.exists(key); expect(exists2).toBe(0); @@ -473,6 +509,56 @@ describe("RunQueue", () => { } }); + redisTest("Ack (before dequeue)", { timeout: 5_000 }, async ({ redisContainer, redis }) => { + const queue = new RunQueue({ + ...testOptions, + redis: { host: redisContainer.getHost(), port: redisContainer.getPort() }, + }); + + try { + await queue.enqueueMessage({ + env: authenticatedEnvProd, + message: messageProd, + masterQueues: "main", + }); + + const queueLength = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength).toBe(1); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength).toBe(1); + + await queue.acknowledgeMessage(messageProd.orgId, messageProd.runId); + + //concurrencies + const queueConcurrency = await queue.currentConcurrencyOfQueue( + authenticatedEnvProd, + messageProd.queue + ); + expect(queueConcurrency).toBe(0); + const envConcurrency = await queue.currentConcurrencyOfEnvironment(authenticatedEnvProd); + expect(envConcurrency).toBe(0); + const projectConcurrency = await queue.currentConcurrencyOfProject(authenticatedEnvProd); + expect(projectConcurrency).toBe(0); + const taskConcurrency = await queue.currentConcurrencyOfTask( + authenticatedEnvProd, + messageProd.taskIdentifier + ); + expect(taskConcurrency).toBe(0); + + //queue lengths + const queueLength3 = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength3).toBe(0); + const envQueueLength3 = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength3).toBe(0); + + //dequeue + const messages2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); + expect(messages2.length).toBe(0); + } finally { + await queue.quit(); + } + }); + redisTest("Nacking", { timeout: 15_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, @@ -531,6 +617,12 @@ describe("RunQueue", () => { ); expect(taskConcurrency2).toBe(0); + //queue lengths + const queueLength = await queue.lengthOfQueue(authenticatedEnvProd, messageProd.queue); + expect(queueLength).toBe(1); + const envQueueLength = await queue.lengthOfEnvQueue(authenticatedEnvProd); + expect(envQueueLength).toBe(1); + //check the message is there const exists2 = await redis.exists(key); expect(exists2).toBe(1); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 3290d3fac3..dbbdd8d82e 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -137,6 +137,10 @@ export class RunQueue { return this.redis.zcard(this.keys.queueKey(env, queue, concurrencyKey)); } + public async lengthOfEnvQueue(env: MinimalAuthenticatedEnvironment) { + return this.redis.zcard(this.keys.envQueueKey(env)); + } + public async oldestMessageInQueue( env: MinimalAuthenticatedEnvironment, queue: string, @@ -308,6 +312,7 @@ export class RunQueue { envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), + envQueueKey: this.keys.envQueueKeyFromQueue(queue), taskCurrentConcurrentKeyPrefix: this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), }); @@ -383,6 +388,7 @@ export class RunQueue { message.queue, message.taskIdentifier ), + envQueueKey: this.keys.envQueueKeyFromQueue(message.queue), projectConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(message.queue), }); }, @@ -437,6 +443,7 @@ export class RunQueue { const projectConcurrencyKey = this.keys.projectCurrentConcurrencyKeyFromQueue( message.queue ); + const envQueueKey = this.keys.envQueueKeyFromQueue(message.queue); message.attempt = message.attempt + 1; if (message.attempt >= maxAttempts) { @@ -446,6 +453,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, projectConcurrencyKey, + envQueueKey, taskConcurrencyKey, "dlq", messageId, @@ -464,6 +472,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, projectConcurrencyKey, + envQueueKey, taskConcurrencyKey, messageId, messageScore, @@ -478,6 +487,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, projectConcurrencyKey, + envQueueKey, taskConcurrencyKey, //args messageId, @@ -957,6 +967,7 @@ export class RunQueue { envConcurrencyKey, taskConcurrencyKey, projectConcurrencyKey, + this.keys.envQueueKeyFromQueue(message.queue), message.queue, message.runId, JSON.stringify(message), @@ -973,6 +984,7 @@ export class RunQueue { envCurrentConcurrencyKey, projectCurrentConcurrencyKey, messageKeyPrefix, + envQueueKey, taskCurrentConcurrentKeyPrefix, }: { messageQueue: string; @@ -982,6 +994,7 @@ export class RunQueue { envCurrentConcurrencyKey: string; projectCurrentConcurrencyKey: string; messageKeyPrefix: string; + envQueueKey: string; taskCurrentConcurrentKeyPrefix: string; }): Promise { const result = await this.redis.dequeueMessage( @@ -993,6 +1006,7 @@ export class RunQueue { envCurrentConcurrencyKey, projectCurrentConcurrencyKey, messageKeyPrefix, + envQueueKey, taskCurrentConcurrentKeyPrefix, //args messageQueue, @@ -1048,6 +1062,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, taskConcurrencyKey, + envQueueKey, projectConcurrencyKey, }: { masterQueues: string[]; @@ -1056,6 +1071,7 @@ export class RunQueue { concurrencyKey: string; envConcurrencyKey: string; taskConcurrencyKey: string; + envQueueKey: string; projectConcurrencyKey: string; messageId: string; }) { @@ -1065,6 +1081,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, projectConcurrencyKey, + envQueueKey, taskConcurrencyKey, messageId, masterQueues, @@ -1077,6 +1094,7 @@ export class RunQueue { concurrencyKey, envConcurrencyKey, projectConcurrencyKey, + envQueueKey, taskConcurrencyKey, messageId, JSON.stringify(masterQueues) @@ -1142,7 +1160,7 @@ export class RunQueue { #registerCommands() { this.redis.defineCommand("enqueueMessage", { - numberOfKeys: 6, + numberOfKeys: 7, lua: ` local queue = KEYS[1] local messageKey = KEYS[2] @@ -1150,6 +1168,7 @@ local concurrencyKey = KEYS[3] local envConcurrencyKey = KEYS[4] local taskConcurrencyKey = KEYS[5] local projectConcurrencyKey = KEYS[6] +local envQueueKey = KEYS[7] local queueName = ARGV[1] local messageId = ARGV[2] @@ -1163,6 +1182,9 @@ redis.call('SET', messageKey, messageData) -- Add the message to the queue redis.call('ZADD', queue, messageScore, messageId) +-- Add the message to the env queue +redis.call('ZADD', envQueueKey, messageScore, messageId) + -- Rebalance the parent queues for _, parentQueue in ipairs(parentQueues) do local earliestMessage = redis.call('ZRANGE', queue, 0, 0, 'WITHSCORES') @@ -1182,7 +1204,7 @@ redis.call('SREM', projectConcurrencyKey, messageId) }); this.redis.defineCommand("dequeueMessage", { - numberOfKeys: 8, + numberOfKeys: 9, lua: ` local childQueue = KEYS[1] local concurrencyLimitKey = KEYS[2] @@ -1191,7 +1213,8 @@ local currentConcurrencyKey = KEYS[4] local envCurrentConcurrencyKey = KEYS[5] local projectConcurrencyKey = KEYS[6] local messageKeyPrefix = KEYS[7] -local taskCurrentConcurrentKeyPrefix = KEYS[8] +local envQueueKey = KEYS[8] +local taskCurrentConcurrentKeyPrefix = KEYS[9] local childQueueName = ARGV[1] local currentTime = tonumber(ARGV[2]) @@ -1237,6 +1260,7 @@ local taskConcurrencyKey = taskCurrentConcurrentKeyPrefix .. taskIdentifier -- Update concurrency redis.call('ZREM', childQueue, messageId) +redis.call('ZREM', envQueueKey, messageId) redis.call('SADD', currentConcurrencyKey, messageId) redis.call('SADD', envCurrentConcurrencyKey, messageId) redis.call('SADD', projectConcurrencyKey, messageId) @@ -1257,7 +1281,7 @@ return {messageId, messageScore, messagePayload} -- Return message details }); this.redis.defineCommand("acknowledgeMessage", { - numberOfKeys: 6, + numberOfKeys: 7, lua: ` -- Keys: local messageKey = KEYS[1] @@ -1265,7 +1289,8 @@ local messageQueue = KEYS[2] local concurrencyKey = KEYS[3] local envCurrentConcurrencyKey = KEYS[4] local projectCurrentConcurrencyKey = KEYS[5] -local taskCurrentConcurrencyKey = KEYS[6] +local envQueueKey = KEYS[6] +local taskCurrentConcurrencyKey = KEYS[7] -- Args: local messageId = ARGV[1] @@ -1276,6 +1301,7 @@ redis.call('DEL', messageKey) -- Remove the message from the queue redis.call('ZREM', messageQueue, messageId) +redis.call('ZREM', envQueueKey, messageId) -- Rebalance the parent queues for _, parentQueue in ipairs(parentQueues) do @@ -1296,7 +1322,7 @@ redis.call('SREM', taskCurrentConcurrencyKey, messageId) }); this.redis.defineCommand("nackMessage", { - numberOfKeys: 6, + numberOfKeys: 7, lua: ` -- Keys: local messageKey = KEYS[1] @@ -1304,7 +1330,8 @@ local messageQueueKey = KEYS[2] local concurrencyKey = KEYS[3] local envConcurrencyKey = KEYS[4] local projectConcurrencyKey = KEYS[5] -local taskConcurrencyKey = KEYS[6] +local envQueueKey = KEYS[6] +local taskConcurrencyKey = KEYS[7] -- Args: local messageId = ARGV[1] @@ -1323,6 +1350,7 @@ redis.call('SREM', taskConcurrencyKey, messageId) -- Enqueue the message into the queue redis.call('ZADD', messageQueueKey, messageScore, messageId) +redis.call('ZADD', envQueueKey, messageScore, messageId) -- Rebalance the parent queues for _, parentQueue in ipairs(parentQueues) do @@ -1337,7 +1365,7 @@ end }); this.redis.defineCommand("moveToDeadLetterQueue", { - numberOfKeys: 7, + numberOfKeys: 8, lua: ` -- Keys: local messageKey = KEYS[1] @@ -1345,8 +1373,9 @@ local messageQueue = KEYS[2] local concurrencyKey = KEYS[3] local envCurrentConcurrencyKey = KEYS[4] local projectCurrentConcurrencyKey = KEYS[5] -local taskCurrentConcurrencyKey = KEYS[6] -local deadLetterQueueKey = KEYS[7] +local envQueueKey = KEYS[6] +local taskCurrentConcurrencyKey = KEYS[7] +local deadLetterQueueKey = KEYS[8] -- Args: local messageId = ARGV[1] @@ -1354,6 +1383,7 @@ local parentQueues = cjson.decode(ARGV[2]) -- Remove the message from the queue redis.call('ZREM', messageQueue, messageId) +redis.call('ZREM', envQueueKey, messageId) -- Rebalance the parent queues for _, parentQueue in ipairs(parentQueues) do @@ -1502,6 +1532,7 @@ declare module "ioredis" { envConcurrencyKey: string, taskConcurrencyKey: string, projectConcurrencyKey: string, + envQueueKey: string, //args queueName: string, messageId: string, @@ -1520,6 +1551,7 @@ declare module "ioredis" { envConcurrencyKey: string, projectConcurrencyKey: string, messageKeyPrefix: string, + envQueueKey: string, taskCurrentConcurrentKeyPrefix: string, //args childQueueName: string, @@ -1534,6 +1566,7 @@ declare module "ioredis" { concurrencyKey: string, envConcurrencyKey: string, projectConcurrencyKey: string, + envQueueKey: string, taskConcurrencyKey: string, messageId: string, masterQueues: string, @@ -1546,6 +1579,7 @@ declare module "ioredis" { concurrencyKey: string, envConcurrencyKey: string, projectConcurrencyKey: string, + envQueueKey: string, taskConcurrencyKey: string, messageId: string, messageData: string, @@ -1560,6 +1594,7 @@ declare module "ioredis" { concurrencyKey: string, envConcurrencyKey: string, projectConcurrencyKey: string, + envQueueKey: string, taskConcurrencyKey: string, deadLetterQueueKey: string, messageId: string, diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.ts b/internal-packages/run-engine/src/run-queue/keyProducer.ts index 8c145ce16e..1ba42f7f0f 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.ts @@ -57,6 +57,15 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { .join(":"); } + envQueueKey(env: MinimalAuthenticatedEnvironment) { + return [this.orgKeySection(env.organization.id), this.envKeySection(env.id)].join(":"); + } + + envQueueKeyFromQueue(queue: string) { + const { orgId, envId } = this.extractComponentsFromQueue(queue); + return [this.orgKeySection(orgId), this.envKeySection(envId)].join(":"); + } + concurrencyLimitKeyFromQueue(queue: string) { const concurrencyQueueName = queue.replace(/:ck:.+$/, ""); return `${concurrencyQueueName}:${constants.CONCURRENCY_LIMIT_PART}`; diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 8b83426493..2d936264c1 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -46,6 +46,8 @@ export interface RunQueueKeyProducer { queueCurrentConcurrencyScanPattern(): string; //queue queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string): string; + envQueueKey(env: MinimalAuthenticatedEnvironment): string; + envQueueKeyFromQueue(queue: string): string; queueConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment, queue: string): string; concurrencyLimitKeyFromQueue(queue: string): string; currentConcurrencyKeyFromQueue(queue: string): string; From d40249f639f9d75e078b9657c5deaabb0d6b8128 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 15:23:29 +0000 Subject: [PATCH 147/485] Increase the delay to make the test more robust --- .../run-engine/src/engine/tests/heartbeats.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index 9d223dbf74..cebf1d9a33 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -105,7 +105,7 @@ describe("RunEngine heartbeats", () => { assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - await setTimeout(500); + await setTimeout(1_000); //have to dequeue again const dequeued2 = await engine.dequeueFromMasterQueue({ From 9885749a4ea919885a93b27163efd790ad719e4a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 15:23:50 +0000 Subject: [PATCH 148/485] Added the env queue length guard to triggerTaskV2 --- .../app/v3/services/triggerTaskV2.server.ts | 44 +++++++++++++++++++ .../run-engine/src/engine/index.ts | 4 ++ 2 files changed, 48 insertions(+) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 2ec75c8c9c..7de918d5b7 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -21,6 +21,7 @@ import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { handleMetadataPacket } from "~/utils/packets"; import { WorkerGroupService } from "./worker/workerGroupService.server"; +import { engine } from "../runEngine.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -86,6 +87,25 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } } + //check the env queue isn't beyond the limit + const queueSizeGuard = await guardQueueSizeLimitsForEnv(environment); + + logger.debug("Queue size guard result", { + queueSizeGuard, + environment: { + id: environment.id, + type: environment.type, + organization: environment.organization, + project: environment.project, + }, + }); + + if (!queueSizeGuard.isWithinLimits) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` + ); + } + if ( body.options?.tags && typeof body.options.tags !== "string" && @@ -584,3 +604,27 @@ function stringifyDuration(seconds: number): string | undefined { return result; } + +async function guardQueueSizeLimitsForEnv(environment: AuthenticatedEnvironment) { + const maximumSize = getMaximumSizeForEnvironment(environment); + + if (typeof maximumSize === "undefined") { + return { isWithinLimits: true }; + } + + const queueSize = await engine.lengthOfEnvQueue(environment); + + return { + isWithinLimits: queueSize < maximumSize, + maximumSize, + queueSize, + }; +} + +function getMaximumSizeForEnvironment(environment: AuthenticatedEnvironment): number | undefined { + if (environment.type === "DEVELOPMENT") { + return environment.organization.maximumDevQueueSize ?? env.MAXIMUM_DEV_QUEUE_SIZE; + } else { + return environment.organization.maximumDeployedQueueSize ?? env.MAXIMUM_DEPLOYED_QUEUE_SIZE; + } +} diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index df018198dc..84402da2f1 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -468,6 +468,10 @@ export class RunEngine { */ async batchTrigger() {} + async lengthOfEnvQueue(environment: MinimalAuthenticatedEnvironment) { + return this.runQueue.lengthOfEnvQueue(environment); + } + /** * Gets a fairly selected run from the specified master queue, returning the information required to run it. * @param consumerId: The consumer that is pulling, allows multiple consumers to pull from the same queue From 8670edb7743d24dc3661a8bbe9f85978899dd165 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 17:22:03 +0000 Subject: [PATCH 149/485] Move the queue length function down --- internal-packages/run-engine/src/engine/index.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 84402da2f1..ef8371d374 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -468,10 +468,6 @@ export class RunEngine { */ async batchTrigger() {} - async lengthOfEnvQueue(environment: MinimalAuthenticatedEnvironment) { - return this.runQueue.lengthOfEnvQueue(environment); - } - /** * Gets a fairly selected run from the specified master queue, returning the information required to run it. * @param consumerId: The consumer that is pulling, allows multiple consumers to pull from the same queue @@ -1469,6 +1465,10 @@ export class RunEngine { }); } + async lengthOfEnvQueue(environment: MinimalAuthenticatedEnvironment) { + return this.runQueue.lengthOfEnvQueue(environment); + } + /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ async completeWaitpoint({ From cafb313aafb351b24ad6bde7bd25da7853ffb7f7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 17:22:22 +0000 Subject: [PATCH 150/485] Same logic for waitingForDeploy for dev --- .../run-engine/src/engine/index.ts | 35 +++++-------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index ef8371d374..8f5514a67b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -569,30 +569,13 @@ export class RunEngine { result, }); - if (result.run.runtimeEnvironment.type === "DEVELOPMENT") { - //it will automatically be requeued X times depending on the queue retry settings - const gotRequeued = await this.runQueue.nackMessage(orgId, runId); - - if (!gotRequeued) { - await this.#systemFailure({ - runId: result.run.id, - error: { - type: "INTERNAL_ERROR", - code: "COULD_NOT_FIND_TASK", - message: `We tried to dequeue this DEV run multiple times but could not find the task to run: ${result.run.taskIdentifier}`, - }, - tx: prisma, - }); - } - } else { - //not deployed yet, so we'll wait for the deploy - await this.#waitingForDeploy({ - runId, - tx: prisma, - }); - //we ack because when it's deployed it will be requeued - await this.runQueue.acknowledgeMessage(orgId, runId); - } + //not deployed yet, so we'll wait for the deploy + await this.#waitingForDeploy({ + runId, + tx: prisma, + }); + //we ack because when it's deployed it will be requeued + await this.runQueue.acknowledgeMessage(orgId, runId); return null; } @@ -1593,10 +1576,10 @@ export class RunEngine { }; } - //we know it's the latest snapshot, so we can checkpoint - //todo check the status is checkpointable + //create a new execution snapshot, with the checkpoint + //todo return a Result, which will determine if the server is allowed to shutdown }); } From 485b1b9a9676320049a22504a82945089055dda0 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 17:23:02 +0000 Subject: [PATCH 151/485] Updated the engine readme after speaking to Eric --- internal-packages/run-engine/README.md | 38 ++++++++++++++++++++------ 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 3fc107eb22..11e3559648 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -79,7 +79,7 @@ They can have output data associated with them, e.g. the finished run payload. T There are currently three types: - `RUN` which gets completed when the associated run completes. Every run has an `associatedWaitpoint` that matches the lifetime of the run. - `DATETIME` which gets completed when the datetime is reached. - - `EVENT` which gets completed when that event occurs. + - `MANUAL` which gets completed when that event occurs. Waitpoints can have an idempotencyKey which allows stops them from being created multiple times. This is especially useful for event waitpoints, where you don't want to create a new waitpoint for the same event twice. @@ -88,6 +88,11 @@ Waitpoints can have an idempotencyKey which allows stops them from being created #### `wait.for()` or `wait.until()` Wait for a future time, then continue. We should add the option to pass an `idempotencyKey` so a second attempt doesn't wait again. By default it would wait again. +```ts +await wait.until(new Date('2022-01-01T00:00:00Z'), { idempotencyKey: "first-wait" }); +await wait.until(new Date('2022-01-01T00:00:00Z'), { idempotencyKey: "second-wait" }); +``` + #### `triggerAndWait()` or `batchTriggerAndWait()` Trigger and then wait for run(s) to finish. If the run fails it will still continue but with the errors so the developer can decide what to do. @@ -102,6 +107,24 @@ Wait until a request has been received at the URL that you are given. This is us A more advanced SDK which would require uses to explicitly create a waitpoint. We would also need `createWaitpoint()`, `completeWaitpoint()`, and `failWaitpoint()`. +```ts +const waitpoint = await waitpoints.create({ idempotencyKey: `purchase-${payload.cart.id}` }); +const waitpoint = await waitpoints.retrieve(waitpoint.id); +const waitpoint = await waitpoints.complete(waitpoint.id, result); +const waitpoint = await waitpoints.fail(waitpoint.id, error); + +export const approvalFlow = task({ + id: "approvalFlow", + run: async (payload) => { + //...do stuff + + await wait.forWaitpoint(waitpoint.id, { timeout: "1h" }); + + //...do more stuff + }, +}); +``` + #### `wait.forRunToComplete(runId)` You could wait for another run (or runs) using their run ids. This would allow you to wait for runs that you haven't triggered inside that run. @@ -115,10 +138,12 @@ Suggested usage: ```ts await myTask.trigger( { some: "data" }, - { debounce: { key: user.id, wait: "30s", maxWait: "2m", leading: true } } + { debounce: { key: user.id, wait: "30s", maxWait: "2m", } } ); ``` +//todo do you get the first or last payload when it triggers? Bit confusing with leading. + Implementation: The Waitpoint `idempotencyKey` should be prefixed like `debounce-${debounce.key}`. Also probably with the `taskIdentifier`? @@ -127,15 +152,10 @@ The Waitpoint `idempotencyKey` should be prefixed like `debounce-${debounce.key 2. If `leading` is false (default): - If there's a waiting run: update its payload and extend the waitpoint's completionTime - If no waiting run: create a new run and DATETIME waitpoint -3. If `leading` is true: - - If there is no pending waitpoint: execute immediately but create a waitpoint with the idempotencyKey. - - If there is a pending waitpoint - - If there's a blocked run already, update the payload and extend the `completionTime`. - - If there's not a blocked run, create the run and block it with the waitpoint. -4. If `maxWait` is specified: +3. If `maxWait` is specified: - The waitpoint's completionTime is capped at the waitpoint `createdAt` + maxWait. - Ensures execution happens even during constant triggering -5. When the waitpoint is completed we need to clear the `idempotencyKey`. To clear an `idempotencyKey`, move the original value to the `inactiveIdempotencyKey` column and set the main one to a new randomly generated one. +4. When the waitpoint is completed we need to clear the `idempotencyKey`. To clear an `idempotencyKey`, move the original value to the `inactiveIdempotencyKey` column and set the main one to a new randomly generated one. //todo implement auto-deactivating of the idempotencyKey when the waitpoint is completed. This would make it easier to implement features like this. From 4dfcae64db3183884fa720d67db0592824974f5f Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 17:24:56 +0000 Subject: [PATCH 152/485] WorkerGroup migrations --- .../migration.sql | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql b/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql new file mode 100644 index 0000000000..239d8933e2 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql @@ -0,0 +1,126 @@ +/* + Warnings: + + - You are about to drop the `Worker` table. If the table is not empty, all the data it contains will be lost. + - A unique constraint covering the columns `[defaultWorkerGroupId]` on the table `Project` will be added. If there are existing duplicate values, this will fail. + - A unique constraint covering the columns `[masterQueue]` on the table `WorkerGroup` will be added. If there are existing duplicate values, this will fail. + - A unique constraint covering the columns `[tokenId]` on the table `WorkerGroup` will be added. If there are existing duplicate values, this will fail. + - Added the required column `name` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. + - Added the required column `tokenId` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. + - Added the required column `type` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. + - Added the required column `updatedAt` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. + +*/ +-- CreateEnum +CREATE TYPE "WorkerInstanceGroupType" AS ENUM ('SHARED', 'UNMANAGED'); + +-- AlterTable +ALTER TABLE "BackgroundWorker" ADD COLUMN "workerGroupId" TEXT; + +-- AlterTable +ALTER TABLE "Project" ADD COLUMN "defaultWorkerGroupId" TEXT; + +-- AlterTable +ALTER TABLE "TaskRunExecutionSnapshot" ADD COLUMN "lastHeartbeatAt" TIMESTAMP(3), +ADD COLUMN "workerId" TEXT; + +-- AlterTable +ALTER TABLE "WorkerGroup" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, +ADD COLUMN "description" TEXT, +ADD COLUMN "hidden" BOOLEAN NOT NULL DEFAULT false, +ADD COLUMN "name" TEXT NOT NULL, +ADD COLUMN "organizationId" TEXT, +ADD COLUMN "projectId" TEXT, +ADD COLUMN "tokenId" TEXT NOT NULL, +ADD COLUMN "type" "WorkerInstanceGroupType" NOT NULL, +ADD COLUMN "updatedAt" TIMESTAMP(3) NOT NULL; + +-- DropTable +DROP TABLE "Worker"; + +-- CreateTable +CREATE TABLE "FeatureFlag" ( + "id" TEXT NOT NULL, + "key" TEXT NOT NULL, + "value" JSONB, + + CONSTRAINT "FeatureFlag_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "WorkerInstance" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "workerGroupId" TEXT NOT NULL, + "organizationId" TEXT, + "projectId" TEXT, + "environmentId" TEXT, + "deploymentId" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + "lastDequeueAt" TIMESTAMP(3), + "lastHeartbeatAt" TIMESTAMP(3), + + CONSTRAINT "WorkerInstance_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "WorkerGroupToken" ( + "id" TEXT NOT NULL, + "tokenHash" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "WorkerGroupToken_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "FeatureFlag_key_key" ON "FeatureFlag"("key"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_name_key" ON "WorkerInstance"("workerGroupId", "name"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerGroupToken_tokenHash_key" ON "WorkerGroupToken"("tokenHash"); + +-- CreateIndex +CREATE UNIQUE INDEX "Project_defaultWorkerGroupId_key" ON "Project"("defaultWorkerGroupId"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerGroup_masterQueue_key" ON "WorkerGroup"("masterQueue"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerGroup_tokenId_key" ON "WorkerGroup"("tokenId"); + +-- AddForeignKey +ALTER TABLE "Project" ADD CONSTRAINT "Project_defaultWorkerGroupId_fkey" FOREIGN KEY ("defaultWorkerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "BackgroundWorker" ADD CONSTRAINT "BackgroundWorker_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "WorkerInstance"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_environmentId_fkey" FOREIGN KEY ("environmentId") REFERENCES "RuntimeEnvironment"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_deploymentId_fkey" FOREIGN KEY ("deploymentId") REFERENCES "WorkerDeployment"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_tokenId_fkey" FOREIGN KEY ("tokenId") REFERENCES "WorkerGroupToken"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; From 0af6eaddf99191d12552989c12d0487480903b34 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 17:25:36 +0000 Subject: [PATCH 153/485] Waitpoint type event -> manual --- .../migration.sql | 14 ++++++++++++++ internal-packages/database/prisma/schema.prisma | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql b/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql new file mode 100644 index 0000000000..9e160392a2 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql @@ -0,0 +1,14 @@ +/* + Warnings: + + - The values [EVENT] on the enum `WaitpointType` will be removed. If these variants are still used in the database, this will fail. + +*/ +-- AlterEnum +BEGIN; +CREATE TYPE "WaitpointType_new" AS ENUM ('RUN', 'DATETIME', 'MANUAL'); +ALTER TABLE "Waitpoint" ALTER COLUMN "type" TYPE "WaitpointType_new" USING ("type"::text::"WaitpointType_new"); +ALTER TYPE "WaitpointType" RENAME TO "WaitpointType_old"; +ALTER TYPE "WaitpointType_new" RENAME TO "WaitpointType"; +DROP TYPE "WaitpointType_old"; +COMMIT; diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 06e8862cad..7843ed4bb1 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2050,7 +2050,7 @@ model Waitpoint { enum WaitpointType { RUN DATETIME - EVENT + MANUAL } enum WaitpointStatus { From b03982b7794b06f73a9dab73a024558b687f19aa Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 19:25:43 +0000 Subject: [PATCH 154/485] Increase some of the timeouts in the heartbeat tests more to avoid flakiness --- .../run-engine/src/engine/tests/heartbeats.test.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index cebf1d9a33..176da6025f 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -219,15 +219,13 @@ describe("RunEngine heartbeats", () => { assertNonNullable(executionData); expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - await setTimeout(pendingExecutingTimeout * 2); + await setTimeout(500); //expect it to be pending with 3 consecutiveFailures const executionData2 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData2); expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - await setTimeout(500); - //have to dequeue again const dequeued2 = await engine.dequeueFromMasterQueue({ consumerId: "test_12345", From 96b3e8edfa368980315319e5f0f0b3f9366b52af Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 19:26:18 +0000 Subject: [PATCH 155/485] Manually creating, blocking, and completing waitpoints added to the run engine. With a test --- .../run-engine/src/engine/index.ts | 178 +++++++++++++----- .../src/engine/tests/waitpoints.test.ts | 128 +++++++++++++ 2 files changed, 260 insertions(+), 46 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8f5514a67b..2223ee9278 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -346,10 +346,12 @@ export class RunEngine { //triggerAndWait or batchTriggerAndWait if (resumeParentOnCompletion && parentTaskRunId) { //this will block the parent run from continuing until this waitpoint is completed (and removed) - await this.#blockRunWithWaitpoint(prisma, { - orgId: environment.organization.id, + await this.blockRunWithWaitpoint({ runId: parentTaskRunId, - waitpoint: associatedWaitpoint, + waitpointId: associatedWaitpoint.id, + environmentId: associatedWaitpoint.environmentId, + projectId: associatedWaitpoint.projectId, + tx: prisma, }); //release the concurrency @@ -1175,10 +1177,12 @@ export class RunEngine { } //block the run - await this.#blockRunWithWaitpoint(prisma, { - orgId: run.runtimeEnvironment.organizationId, + await this.blockRunWithWaitpoint({ runId, - waitpoint, + waitpointId: waitpoint.id, + environmentId: waitpoint.environmentId, + projectId: waitpoint.projectId, + tx: prisma, }); //release concurrency @@ -1452,6 +1456,128 @@ export class RunEngine { return this.runQueue.lengthOfEnvQueue(environment); } + /** This creates a MANUAL waitpoint, that can be explicitly completed (or failed). + * If you pass an `idempotencyKey` and it already exists, it will return the existing waitpoint. + */ + async createManualWaitpoint({ + environmentId, + projectId, + idempotencyKey, + }: { + environmentId: string; + projectId: string; + idempotencyKey?: string; + }) { + const existingWaitpoint = idempotencyKey + ? await this.prisma.waitpoint.findUnique({ + where: { + environmentId_idempotencyKey: { + environmentId, + idempotencyKey, + }, + }, + }) + : undefined; + + if (existingWaitpoint) { + return existingWaitpoint; + } + + return this.prisma.waitpoint.create({ + data: { + type: "MANUAL", + idempotencyKey: idempotencyKey ?? nanoid(24), + userProvidedIdempotencyKey: !!idempotencyKey, + environmentId, + projectId, + }, + }); + } + + async getWaitpoint({ + waitpointId, + environmentId, + projectId, + }: { + environmentId: string; + projectId: string; + waitpointId: string; + }) { + const waitpoint = await this.prisma.waitpoint.findFirst({ + where: { id: waitpointId }, + include: { + blockingTaskRuns: { + select: { + taskRun: { + select: { + id: true, + friendlyId: true, + }, + }, + }, + }, + }, + }); + + if (!waitpoint) return null; + if (waitpoint.environmentId !== environmentId) return null; + + return waitpoint; + } + + /** + * Prevents a run from continuing until the waitpoint is completed. + */ + async blockRunWithWaitpoint({ + runId, + waitpointId, + projectId, + tx, + }: { + runId: string; + waitpointId: string; + environmentId: string; + projectId: string; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + await this.runLock.lock([runId], 5000, async (signal) => { + const taskWaitpoint = await prisma.taskRunWaitpoint.create({ + data: { + taskRunId: runId, + waitpointId: waitpointId, + projectId: projectId, + }, + }); + + const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + + let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; + if ( + latestSnapshot.executionStatus === "EXECUTING" || + latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" + ) { + newStatus = "EXECUTING_WITH_WAITPOINTS"; + } + + //if the state has changed, create a new snapshot + if (newStatus !== latestSnapshot.executionStatus) { + await this.#createExecutionSnapshot(prisma, { + run: { + id: latestSnapshot.runId, + status: latestSnapshot.runStatus, + attemptNumber: latestSnapshot.attemptNumber, + }, + snapshot: { + executionStatus: newStatus, + description: "Run was blocked by a waitpoint.", + }, + }); + } + }); + } + /** This completes a waitpoint and updates all entries so the run isn't blocked, * if they're no longer blocked. This doesn't suffer from race conditions. */ async completeWaitpoint({ @@ -2227,46 +2353,6 @@ export class RunEngine { }; } - async #blockRunWithWaitpoint( - tx: PrismaClientOrTransaction, - { orgId, runId, waitpoint }: { orgId: string; runId: string; waitpoint: Waitpoint } - ) { - await this.runLock.lock([runId], 5000, async (signal) => { - const taskWaitpoint = await tx.taskRunWaitpoint.create({ - data: { - taskRunId: runId, - waitpointId: waitpoint.id, - projectId: waitpoint.projectId, - }, - }); - - const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, runId); - - let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; - if ( - latestSnapshot.executionStatus === "EXECUTING" || - latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" - ) { - newStatus = "EXECUTING_WITH_WAITPOINTS"; - } - - //if the state has changed, create a new snapshot - if (newStatus !== latestSnapshot.executionStatus) { - await this.#createExecutionSnapshot(tx, { - run: { - id: latestSnapshot.runId, - status: latestSnapshot.runStatus, - attemptNumber: latestSnapshot.attemptNumber, - }, - snapshot: { - executionStatus: newStatus, - description: "Run was blocked by a waitpoint.", - }, - }); - } - }); - } - async #clearBlockingWaitpoints({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { const prisma = tx ?? this.prisma; const deleted = await prisma.taskRunWaitpoint.deleteMany({ diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 22543fde9a..539e4e5033 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -238,4 +238,132 @@ describe("RunEngine Waitpoints", () => { } } ); + + containerTest( + "Create, block, and complete a Manual waitpoint", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //create a manual waitpoint + const waitpoint = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }); + + //block the run + await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpointId: waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check there is a waitpoint blocking the parent run + const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + + //complete the waitpoint + await engine.completeWaitpoint({ + id: waitpoint.id, + }); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); + } finally { + engine.quit(); + } + } + ); }); From 64770ce190f5180145aa6d2e226a85bc64fab742 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 11 Nov 2024 19:38:49 +0000 Subject: [PATCH 156/485] When blocking a run with a manual waitpoint, you can set a failAfter datetime --- internal-packages/run-engine/README.md | 5 +- .../run-engine/src/engine/index.ts | 34 +++-- .../src/engine/tests/waitpoints.test.ts | 118 ++++++++++++++++++ 3 files changed, 149 insertions(+), 8 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 11e3559648..c53883fe81 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -118,7 +118,10 @@ export const approvalFlow = task({ run: async (payload) => { //...do stuff - await wait.forWaitpoint(waitpoint.id, { timeout: "1h" }); + const result = await wait.forWaitpoint(waitpoint.id, { timeout: "1h" }); + if (!result.ok) { + //...timeout + } //...do more stuff }, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2223ee9278..1e4b8086ed 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -120,9 +120,10 @@ type TriggerParams = { type CompleteAttemptResult = "COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"; const workerCatalog = { - waitpointCompleteDateTime: { + finishWaitpoint: { schema: z.object({ waitpointId: z.string(), + error: z.string().optional(), }), visibilityTimeoutMs: 5000, }, @@ -188,8 +189,16 @@ export class RunEngine { pollIntervalMs: options.worker.pollIntervalMs, logger: new Logger("RunEngineWorker", "debug"), jobs: { - waitpointCompleteDateTime: async ({ payload }) => { - await this.completeWaitpoint({ id: payload.waitpointId }); + finishWaitpoint: async ({ payload }) => { + await this.completeWaitpoint({ + id: payload.waitpointId, + output: payload.error + ? { + value: payload.error, + isError: true, + } + : undefined, + }); }, heartbeatSnapshot: async ({ payload }) => { await this.#handleStalledSnapshot(payload); @@ -1532,12 +1541,14 @@ export class RunEngine { runId, waitpointId, projectId, + failAfter, tx, }: { runId: string; waitpointId: string; environmentId: string; projectId: string; + failAfter?: Date; tx?: PrismaClientOrTransaction; }) { const prisma = tx ?? this.prisma; @@ -1575,6 +1586,15 @@ export class RunEngine { }, }); } + + if (failAfter) { + await this.worker.enqueue({ + id: `finishWaitpoint.${waitpointId}`, + job: "finishWaitpoint", + payload: { waitpointId, error: "Waitpoint timed out" }, + availableAt: failAfter, + }); + } }); } @@ -2303,8 +2323,8 @@ export class RunEngine { }); await this.worker.enqueue({ - id: `waitpointCompleteDateTime.${waitpoint.id}`, - job: "waitpointCompleteDateTime", + id: `finishWaitpoint.${waitpoint.id}`, + job: "finishWaitpoint", payload: { waitpointId: waitpoint.id }, availableAt: completedAfter, }); @@ -2342,8 +2362,8 @@ export class RunEngine { //reschedule completion await this.worker.enqueue({ - id: `waitpointCompleteDateTime.${waitpointId}`, - job: "waitpointCompleteDateTime", + id: `finishWaitpoint.${waitpointId}`, + job: "finishWaitpoint", payload: { waitpointId: waitpointId }, availableAt: completedAfter, }); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 539e4e5033..0277943497 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -366,4 +366,122 @@ describe("RunEngine Waitpoints", () => { } } ); + + containerTest( + "Manual waitpoint timeout", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //create a manual waitpoint + const waitpoint = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }); + + //block the run + await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpointId: waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + //fail after 200ms + failAfter: new Date(Date.now() + 200), + }); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + await setTimeout(750); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2?.completedWaitpoints.length).toBe(1); + expect(executionData2?.completedWaitpoints[0].outputIsError).toBe(true); + + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); + } finally { + engine.quit(); + } + } + ); }); From 5232dfd640a2c31f02db64c3e951615a7dfbfc91 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 12 Nov 2024 14:32:24 +0000 Subject: [PATCH 157/485] Added some notes about run flow control. Rate limit, throttle, batching and debouncing --- internal-packages/run-engine/README.md | 74 ++++++++++++++------------ 1 file changed, 40 insertions(+), 34 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index c53883fe81..ee381573a5 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -52,7 +52,7 @@ For dev environments, we will pass the `environment` id. If there's only a `workerGroup`, we can just `dequeueFromMasterQueue()` to get runs. If there's a `BackgroundWorker` id, we need to determine if that `BackgroundWorker` is the latest. If it's the latest we call `dequeueFromEnvironmentMasterQueue()` to get any runs that aren't locked to a version. If it's not the latest, we call `dequeueFromBackgroundWorkerMasterQueue()` to get runs that are locked to that version. -### Run Queue +## Run Queue This is a fair multi-tenant queue. It is designed to fairly select runs, respect concurrency limits, and have high throughput. It provides visibility into the current concurrency for the env, org, etc. @@ -60,11 +60,11 @@ It has built-in reliability features: - When nacking we increment the `attempt` and if it continually fails we will move it to a Dead Letter Queue (DLQ). - If a run is in the DLQ you can redrive it. -### Heartbeats +## Heartbeats Heartbeats are used to determine if a run has become stalled. Depending on the current execution status, we do different things. For example, if the run has been dequeued but the attempt hasn't been started we requeue it. -### Checkpoints +## Checkpoints Checkpoints allow pausing an executing run and then resuming it later. This is an optimization to avoid wasted compute and is especially useful with "Waitpoints". @@ -83,27 +83,27 @@ There are currently three types: Waitpoints can have an idempotencyKey which allows stops them from being created multiple times. This is especially useful for event waitpoints, where you don't want to create a new waitpoint for the same event twice. -### Use cases - -#### `wait.for()` or `wait.until()` +### `wait.for()` or `wait.until()` Wait for a future time, then continue. We should add the option to pass an `idempotencyKey` so a second attempt doesn't wait again. By default it would wait again. ```ts +//Note if the idempotency key is a string, it will get prefixed with the run id. +//you can explicitly pass in an idempotency key created with the the global scope. await wait.until(new Date('2022-01-01T00:00:00Z'), { idempotencyKey: "first-wait" }); await wait.until(new Date('2022-01-01T00:00:00Z'), { idempotencyKey: "second-wait" }); ``` -#### `triggerAndWait()` or `batchTriggerAndWait()` +### `triggerAndWait()` or `batchTriggerAndWait()` Trigger and then wait for run(s) to finish. If the run fails it will still continue but with the errors so the developer can decide what to do. ### The `trigger` `delay` option When triggering a run and passing the `delay` option, we use a `DATETIME` waitpoint to block the run from starting. -#### `wait.forRequest()` +### `wait.forRequest()` Wait until a request has been received at the URL that you are given. This is useful for pausing a run and then continuing it again when some external event occurs on another service. For example, Replicate have an API where they will callback when their work is complete. -#### `wait.forWaitpoint(waitpointId)` +### `wait.forWaitpoint(waitpointId)` A more advanced SDK which would require uses to explicitly create a waitpoint. We would also need `createWaitpoint()`, `completeWaitpoint()`, and `failWaitpoint()`. @@ -128,45 +128,51 @@ export const approvalFlow = task({ }); ``` -#### `wait.forRunToComplete(runId)` +### `wait.forRunToComplete(runId)` You could wait for another run (or runs) using their run ids. This would allow you to wait for runs that you haven't triggered inside that run. -#### Debouncing +## Run flow control -Using a `DateTime` waitpoint and an `idempotencyKey` debounce can be implemented. +There are several ways to control when a run will execute (or not). Each of these should be configurable on a task, a named queue that is shared between tasks, and at trigger time including the ability to pass a `key` so you can have per-tenant controls. -Suggested usage: +### Concurrency limits -```ts -await myTask.trigger( - { some: "data" }, - { debounce: { key: user.id, wait: "30s", maxWait: "2m", } } -); -``` +When `trigger` is called the run is added to the queue. We only dequeue when the concurrency limit hasn't been exceeded for that task/queue. + +### Rate limiting + +When `trigger` is called, we check if the rate limit has been exceeded. If it has then we ignore the trigger. The run is thrown away and an appropriate error is returned. + +This is useful: +- To prevent abuse. +- To control how many executions a user can do (using a `key` with rate limiting). + +### Debouncing + +When `trigger` is called, we prevent too many runs happening in a period by collapsing into a single run. This is done by discarding some runs in a period. -//todo do you get the first or last payload when it triggers? Bit confusing with leading. +This is useful: +- To prevent too many runs happening in a short period. -Implementation: +We should mark the run as `"DELAYED"` with the correct `delayUntil` time. This will allow the user to see that the run is delayed and why. -The Waitpoint `idempotencyKey` should be prefixed like `debounce-${debounce.key}`. Also probably with the `taskIdentifier`? +### Throttling -1. When trigger is called with `debounce`, we check if there's an active waitpoint with the relevant `idempotencyKey`. -2. If `leading` is false (default): - - If there's a waiting run: update its payload and extend the waitpoint's completionTime - - If no waiting run: create a new run and DATETIME waitpoint -3. If `maxWait` is specified: - - The waitpoint's completionTime is capped at the waitpoint `createdAt` + maxWait. - - Ensures execution happens even during constant triggering -4. When the waitpoint is completed we need to clear the `idempotencyKey`. To clear an `idempotencyKey`, move the original value to the `inactiveIdempotencyKey` column and set the main one to a new randomly generated one. +When `trigger` is called the run is added to the queue. We only run them when they don't exceed the limit in that time period, by controlling the timing of when they are dequeued. -//todo implement auto-deactivating of the idempotencyKey when the waitpoint is completed. This would make it easier to implement features like this. +This is useful: +- To prevent too many runs happening in a short period. +- To control how many executions a user can do (using a `key` with throttling). +- When you need to execute every run but not too many in a short period, e.g. avoiding rate limits. -#### Rate limiting +### Batching -Both when triggering tasks and also any helpers we wanted inside the task. +When `trigger` is called, we batch the runs together. This means the payload of the run is an array of items, each being a single payload. -For inside tasks, we could use the DATETIME waitpoints. Or it might be easier to use an existing rate limiting library with Redis and receive notifications when a limit is cleared and complete associated waitpoints. +This is useful: +- For performance, as it reduces the number of runs in the system. +- It can be useful when using 3rd party APIs that support batching. ## Emitting events From 954c2bd6c9d9df7059ae91428eba70098ef15efc Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:26:55 +0000 Subject: [PATCH 158/485] worker package updates --- .../authenticatedSocketConnection.server.ts | 8 +- .../run-engine/src/engine/index.ts | 6 +- packages/core/src/v3/index.ts | 1 + packages/core/src/v3/schemas/api.ts | 1 + packages/core/src/v3/schemas/index.ts | 1 + .../core/src/v3/schemas/runEngine.ts | 52 +++++- .../core/src/v3/utils/heartbeat.ts | 36 +++- packages/worker/src/apiClient.ts | 111 ------------ packages/worker/src/client/http.ts | 160 ++++++++++++++++++ packages/worker/src/client/types.ts | 7 + packages/worker/src/client/util.ts | 25 +++ packages/worker/src/client/websocket.ts | 52 ++++++ packages/worker/src/consts.ts | 1 + packages/worker/src/index.ts | 2 + packages/worker/src/messages.ts | 80 +++++++++ packages/worker/src/queueConsumer.ts | 71 ++++++++ packages/worker/src/schemas.ts | 38 +++++ packages/worker/src/workerSession.ts | 121 +++++++++++++ 18 files changed, 648 insertions(+), 125 deletions(-) rename internal-packages/run-engine/src/engine/messages.ts => packages/core/src/v3/schemas/runEngine.ts (58%) rename apps/webapp/app/v3/services/heartbeatService.server.ts => packages/core/src/v3/utils/heartbeat.ts (51%) delete mode 100644 packages/worker/src/apiClient.ts create mode 100644 packages/worker/src/client/http.ts create mode 100644 packages/worker/src/client/types.ts create mode 100644 packages/worker/src/client/util.ts create mode 100644 packages/worker/src/client/websocket.ts create mode 100644 packages/worker/src/messages.ts create mode 100644 packages/worker/src/queueConsumer.ts create mode 100644 packages/worker/src/workerSession.ts diff --git a/apps/webapp/app/v3/authenticatedSocketConnection.server.ts b/apps/webapp/app/v3/authenticatedSocketConnection.server.ts index ce98438784..208deaa2f5 100644 --- a/apps/webapp/app/v3/authenticatedSocketConnection.server.ts +++ b/apps/webapp/app/v3/authenticatedSocketConnection.server.ts @@ -1,4 +1,8 @@ -import { clientWebsocketMessages, serverWebsocketMessages } from "@trigger.dev/core/v3"; +import { + clientWebsocketMessages, + HeartbeatService, + serverWebsocketMessages, +} from "@trigger.dev/core/v3"; import { ZodMessageHandler, ZodMessageSender } from "@trigger.dev/core/v3/zodMessageHandler"; import { Evt } from "evt"; import { randomUUID } from "node:crypto"; @@ -7,7 +11,6 @@ import { WebSocket } from "ws"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { DevQueueConsumer } from "./marqs/devQueueConsumer.server"; -import { HeartbeatService } from "./services/heartbeatService.server"; export class AuthenticatedSocketConnection { public id: string; @@ -83,6 +86,7 @@ export class AuthenticatedSocketConnection { ws.ping(); }, + intervalMs: 45_000, }); this._pingService.start(); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 1e4b8086ed..78a8de0d47 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -2,11 +2,14 @@ import { Worker, type WorkerConcurrencyOptions } from "@internal/redis-worker"; import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; import { Logger } from "@trigger.dev/core/logger"; import { + CompleteAttemptResult, + DequeuedMessage, MachinePreset, MachinePresetName, parsePacket, QueueOptions, RetryOptions, + RunExecutionData, sanitizeError, shouldRetryError, TaskRunError, @@ -47,7 +50,6 @@ import { runStatusFromError } from "./errors"; import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; -import { DequeuedMessage, RunExecutionData } from "./messages"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; type Options = { @@ -117,8 +119,6 @@ type TriggerParams = { seedMetadataType?: string; }; -type CompleteAttemptResult = "COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"; - const workerCatalog = { finishWaitpoint: { schema: z.object({ diff --git a/packages/core/src/v3/index.ts b/packages/core/src/v3/index.ts index 8899dfc22b..06d57fa320 100644 --- a/packages/core/src/v3/index.ts +++ b/packages/core/src/v3/index.ts @@ -59,6 +59,7 @@ export { } from "./utils/ioSerialization.js"; export * from "./utils/imageRef.js"; +export * from "./utils/heartbeat.js"; export * from "./config.js"; export { getSchemaParseFn, type AnySchemaParseFn, type SchemaParseFn } from "./types/schemas.js"; diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 2e325c9138..f61e28be9e 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -193,6 +193,7 @@ export const InitializeDeploymentRequestBody = z.object({ registryHost: z.string().optional(), selfHosted: z.boolean().optional(), namespace: z.string().optional(), + type: z.enum(["SHARED", "UNMANAGED"]).optional(), }); export type InitializeDeploymentRequestBody = z.infer; diff --git a/packages/core/src/v3/schemas/index.ts b/packages/core/src/v3/schemas/index.ts index 6f8b74e64e..7a99fd1575 100644 --- a/packages/core/src/v3/schemas/index.ts +++ b/packages/core/src/v3/schemas/index.ts @@ -10,3 +10,4 @@ export * from "./eventFilter.js"; export * from "./openTelemetry.js"; export * from "./config.js"; export * from "./build.js"; +export * from "./runEngine.js"; diff --git a/internal-packages/run-engine/src/engine/messages.ts b/packages/core/src/v3/schemas/runEngine.ts similarity index 58% rename from internal-packages/run-engine/src/engine/messages.ts rename to packages/core/src/v3/schemas/runEngine.ts index fc1720384e..2ddbd9ee74 100644 --- a/internal-packages/run-engine/src/engine/messages.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -1,8 +1,49 @@ -import { EnvironmentType, MachinePreset } from "@trigger.dev/core/v3"; -import { TaskRunExecutionStatus, TaskRunStatus, WaitpointType } from "@trigger.dev/database"; import { z } from "zod"; +import { MachinePreset } from "./common.js"; +import { EnvironmentType } from "./schemas.js"; + +export const TaskRunExecutionStatus = { + RUN_CREATED: "RUN_CREATED", + QUEUED: "QUEUED", + PENDING_EXECUTING: "PENDING_EXECUTING", + EXECUTING: "EXECUTING", + EXECUTING_WITH_WAITPOINTS: "EXECUTING_WITH_WAITPOINTS", + BLOCKED_BY_WAITPOINTS: "BLOCKED_BY_WAITPOINTS", + PENDING_CANCEL: "PENDING_CANCEL", + FINISHED: "FINISHED", +} as const; + +export type TaskRunExecutionStatus = + (typeof TaskRunExecutionStatus)[keyof typeof TaskRunExecutionStatus]; + +export const TaskRunStatus = { + DELAYED: "DELAYED", + PENDING: "PENDING", + WAITING_FOR_DEPLOY: "WAITING_FOR_DEPLOY", + EXECUTING: "EXECUTING", + WAITING_TO_RESUME: "WAITING_TO_RESUME", + RETRYING_AFTER_FAILURE: "RETRYING_AFTER_FAILURE", + PAUSED: "PAUSED", + CANCELED: "CANCELED", + INTERRUPTED: "INTERRUPTED", + COMPLETED_SUCCESSFULLY: "COMPLETED_SUCCESSFULLY", + COMPLETED_WITH_ERRORS: "COMPLETED_WITH_ERRORS", + SYSTEM_FAILURE: "SYSTEM_FAILURE", + CRASHED: "CRASHED", + EXPIRED: "EXPIRED", + TIMED_OUT: "TIMED_OUT", +} as const; + +export type TaskRunStatus = (typeof TaskRunStatus)[keyof typeof TaskRunStatus]; + +export const WaitpointType = { + RUN: "RUN", + DATETIME: "DATETIME", + MANUAL: "MANUAL", +} as const; + +export type WaitpointType = (typeof WaitpointType)[keyof typeof WaitpointType]; -//todo it will need to move into core because the Worker will need to use these const CompletedWaitpoint = z.object({ id: z.string(), type: z.enum(Object.values(WaitpointType) as [WaitpointType]), @@ -18,7 +59,7 @@ const CompletedWaitpoint = z.object({ }); /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ -const DequeuedMessage = z.object({ +export const DequeuedMessage = z.object({ version: z.literal("1"), snapshot: z.object({ id: z.string(), @@ -84,3 +125,6 @@ export const RunExecutionData = z.object({ }); export type RunExecutionData = z.infer; + +export const CompleteAttemptResult = z.enum(["COMPLETED", "RETRY_QUEUED", "RETRY_IMMEDIATELY"]); +export type CompleteAttemptResult = z.infer; diff --git a/apps/webapp/app/v3/services/heartbeatService.server.ts b/packages/core/src/v3/utils/heartbeat.ts similarity index 51% rename from apps/webapp/app/v3/services/heartbeatService.server.ts rename to packages/core/src/v3/utils/heartbeat.ts index 8db36b2beb..295d8ebc04 100644 --- a/apps/webapp/app/v3/services/heartbeatService.server.ts +++ b/packages/core/src/v3/utils/heartbeat.ts @@ -1,23 +1,34 @@ type HeartbeatServiceOptions = { heartbeat: () => Promise; - pingIntervalInMs?: number; + intervalMs?: number; leadingEdge?: boolean; + onError?: (error: unknown) => Promise; }; export class HeartbeatService { private _heartbeat: () => Promise; - private _heartbeatIntervalInMs: number; + private _intervalMs: number; private _nextHeartbeat: NodeJS.Timeout | undefined; private _leadingEdge: boolean; + private _isHeartbeating: boolean; + private _onError?: (error: unknown) => Promise; constructor(opts: HeartbeatServiceOptions) { this._heartbeat = opts.heartbeat; - this._heartbeatIntervalInMs = opts.pingIntervalInMs ?? 45_000; + this._intervalMs = opts.intervalMs ?? 45_000; this._nextHeartbeat = undefined; this._leadingEdge = opts.leadingEdge ?? false; + this._isHeartbeating = false; + this._onError = opts.onError; } start() { + if (this._isHeartbeating) { + return; + } + + this._isHeartbeating = true; + if (this._leadingEdge) { this.#doHeartbeat(); } else { @@ -26,13 +37,28 @@ export class HeartbeatService { } stop() { + if (!this._isHeartbeating) { + return; + } + + this._isHeartbeating = false; this.#clearNextHeartbeat(); } #doHeartbeat = async () => { this.#clearNextHeartbeat(); - await this._heartbeat(); + try { + await this._heartbeat(); + } catch (error) { + if (this._onError) { + try { + await this._onError(error); + } catch (error) { + console.error("Error handling heartbeat error", error); + } + } + } this.#scheduleNextHeartbeat(); }; @@ -44,6 +70,6 @@ export class HeartbeatService { } #scheduleNextHeartbeat() { - this._nextHeartbeat = setTimeout(this.#doHeartbeat, this._heartbeatIntervalInMs); + this._nextHeartbeat = setTimeout(this.#doHeartbeat, this._intervalMs); } } diff --git a/packages/worker/src/apiClient.ts b/packages/worker/src/apiClient.ts deleted file mode 100644 index c0b03d25d2..0000000000 --- a/packages/worker/src/apiClient.ts +++ /dev/null @@ -1,111 +0,0 @@ -import { z } from "zod"; -import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; -import { WorkerApiHeartbeatRequestBody, WorkerApiHeartbeatResponseBody } from "./schemas.js"; -import { HEADER_NAME } from "./consts.js"; - -type WorkerApiClientOptions = { - apiURL: string; - workerToken: string; - instanceName: string; - deploymentId?: string; -}; - -export class WorkerApiClient { - private readonly apiURL: string; - private readonly workerToken: string; - private readonly instanceName: string; - private readonly deploymentId?: string; - - constructor(opts: WorkerApiClientOptions) { - this.apiURL = opts.apiURL.replace(/\/$/, ""); - this.workerToken = opts.workerToken; - this.instanceName = opts.instanceName; - this.deploymentId = opts.deploymentId; - - if (!this.apiURL) { - throw new Error("apiURL is required and needs to be a non-empty string"); - } - - if (!this.workerToken) { - throw new Error("workerToken is required and needs to be a non-empty string"); - } - - if (!this.instanceName) { - throw new Error("instanceName is required and needs to be a non-empty string"); - } - } - - async heartbeat(body: WorkerApiHeartbeatRequestBody) { - return wrapZodFetch(WorkerApiHeartbeatResponseBody, `${this.apiURL}/api/v1/worker/heartbeat`, { - method: "POST", - headers: { - ...this.defaultHeaders, - "Content-Type": "application/json", - }, - body: JSON.stringify(body), - }); - } - - async dequeue() { - return wrapZodFetch(WorkerApiHeartbeatResponseBody, `${this.apiURL}/api/v1/worker/heartbeat`, { - headers: { - ...this.defaultHeaders, - }, - }); - } - - private get defaultHeaders(): HeadersInit { - return { - Authorization: `Bearer ${this.workerToken}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: this.instanceName, - ...(this.deploymentId && { [HEADER_NAME.WORKER_DEPLOYMENT_ID]: this.deploymentId }), - }; - } -} - -type ApiResult = - | { success: true; data: TSuccessResult } - | { - success: false; - error: string; - }; - -async function wrapZodFetch( - schema: T, - url: string, - requestInit?: RequestInit -): Promise>> { - try { - const response = await zodfetch(schema, url, requestInit, { - retry: { - minTimeoutInMs: 500, - maxTimeoutInMs: 5000, - maxAttempts: 5, - factor: 2, - randomize: false, - }, - }); - - return { - success: true, - data: response, - }; - } catch (error) { - if (error instanceof ApiError) { - return { - success: false, - error: error.message, - }; - } else if (error instanceof Error) { - return { - success: false, - error: error.message, - }; - } else { - return { - success: false, - error: String(error), - }; - } - } -} diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts new file mode 100644 index 0000000000..b87a35610c --- /dev/null +++ b/packages/worker/src/client/http.ts @@ -0,0 +1,160 @@ +import { z } from "zod"; +import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; +import { + WorkerApiConnectResponseBody, + WorkerApiDequeueResponseBody, + WorkerApiHeartbeatRequestBody, + WorkerApiHeartbeatResponseBody, + WorkerApiRunAttemptCompleteRequestBody, + WorkerApiRunAttemptCompleteResponseBody, + WorkerApiRunAttemptStartResponseBody, +} from "../schemas.js"; +import { WorkerClientCommonOptions } from "./types.js"; +import { getDefaultHeaders } from "./util.js"; + +type WorkerHttpClientOptions = WorkerClientCommonOptions; + +export class WorkerHttpClient { + private readonly apiURL: string; + private readonly workerToken: string; + private readonly instanceName: string; + private readonly defaultHeaders: Record; + + constructor(opts: WorkerHttpClientOptions) { + this.apiURL = opts.apiUrl.replace(/\/$/, ""); + this.workerToken = opts.workerToken; + this.instanceName = opts.instanceName; + this.deploymentId = opts.deploymentId; + this.managedWorkerSecret = opts.managedWorkerSecret; + this.defaultHeaders = getDefaultHeaders(opts); + + if (!this.apiURL) { + throw new Error("apiURL is required and needs to be a non-empty string"); + } + + if (!this.workerToken) { + throw new Error("workerToken is required and needs to be a non-empty string"); + } + + if (!this.instanceName) { + throw new Error("instanceName is required and needs to be a non-empty string"); + } + } + + async connect() { + return wrapZodFetch( + WorkerApiConnectResponseBody, + `${this.apiURL}/api/v1/worker-actions/connect`, + { + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async heartbeat(body: WorkerApiHeartbeatRequestBody) { + return wrapZodFetch( + WorkerApiHeartbeatResponseBody, + `${this.apiURL}/api/v1/worker-actions/heartbeat`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + } + ); + } + + async dequeue() { + return wrapZodFetch( + WorkerApiDequeueResponseBody, + `${this.apiURL}/api/v1/worker-actions/dequeue`, + { + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async startRun(runId: string, snapshotId: string) { + return wrapZodFetch( + WorkerApiRunAttemptStartResponseBody, + `${this.apiURL}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async completeRun( + runId: string, + snapshotId: string, + body: WorkerApiRunAttemptCompleteRequestBody + ) { + return wrapZodFetch( + WorkerApiRunAttemptCompleteResponseBody, + `${this.apiURL}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + }, + body: JSON.stringify(body), + } + ); + } +} + +type ApiResult = + | { success: true; data: TSuccessResult } + | { + success: false; + error: string; + }; + +async function wrapZodFetch( + schema: T, + url: string, + requestInit?: RequestInit +): Promise>> { + try { + const response = await zodfetch(schema, url, requestInit, { + retry: { + minTimeoutInMs: 500, + maxTimeoutInMs: 5000, + maxAttempts: 5, + factor: 2, + randomize: false, + }, + }); + + return { + success: true, + data: response, + }; + } catch (error) { + if (error instanceof ApiError) { + return { + success: false, + error: error.message, + }; + } else if (error instanceof Error) { + return { + success: false, + error: error.message, + }; + } else { + return { + success: false, + error: String(error), + }; + } + } +} diff --git a/packages/worker/src/client/types.ts b/packages/worker/src/client/types.ts new file mode 100644 index 0000000000..2da3415bb8 --- /dev/null +++ b/packages/worker/src/client/types.ts @@ -0,0 +1,7 @@ +export type WorkerClientCommonOptions = { + apiUrl: string; + workerToken: string; + instanceName: string; + deploymentId?: string; + managedWorkerSecret?: string; +}; diff --git a/packages/worker/src/client/util.ts b/packages/worker/src/client/util.ts new file mode 100644 index 0000000000..ac82d1dc56 --- /dev/null +++ b/packages/worker/src/client/util.ts @@ -0,0 +1,25 @@ +import { HEADER_NAME } from "../consts.js"; +import { WorkerClientCommonOptions } from "./types.js"; + +/** Will ignore headers with falsey values */ +function createHeaders(headersInit: Record) { + const headers = new Headers(); + + for (const [key, value] of Object.entries(headersInit)) { + if (!value) { + continue; + } + headers.set(key, value); + } + + return Object.fromEntries(headers.entries()); +} + +export function getDefaultHeaders(options: WorkerClientCommonOptions): Record { + return createHeaders({ + Authorization: `Bearer ${options.workerToken}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, + [HEADER_NAME.WORKER_DEPLOYMENT_ID]: options.deploymentId, + [HEADER_NAME.WORKER_MANAGED_SECRET]: options.managedWorkerSecret, + }); +} diff --git a/packages/worker/src/client/websocket.ts b/packages/worker/src/client/websocket.ts new file mode 100644 index 0000000000..f87eda3f81 --- /dev/null +++ b/packages/worker/src/client/websocket.ts @@ -0,0 +1,52 @@ +import { ZodSocketConnection } from "@trigger.dev/core/v3/zodSocket"; +import { PlatformToWorkerMessages, WorkerToPlatformMessages } from "../messages.js"; +import { WorkerClientCommonOptions } from "./types.js"; +import { getDefaultHeaders } from "./util.js"; + +type WorkerWebsocketClientOptions = WorkerClientCommonOptions; + +export class WorkerWebsocketClient { + private readonly defaultHeaders: Record; + private platformSocket?: ZodSocketConnection< + typeof WorkerToPlatformMessages, + typeof PlatformToWorkerMessages + >; + + constructor(private opts: WorkerWebsocketClientOptions) { + this.defaultHeaders = getDefaultHeaders(opts); + } + + start() { + const websocketPort = this.getPort(this.opts.apiUrl); + this.platformSocket = new ZodSocketConnection({ + namespace: "worker", + host: this.getHost(this.opts.apiUrl), + port: websocketPort, + secure: websocketPort === 443, + extraHeaders: this.defaultHeaders, + clientMessages: WorkerToPlatformMessages, + serverMessages: PlatformToWorkerMessages, + handlers: {}, + }); + } + + stop() { + this.platformSocket?.close(); + } + + private getHost(apiUrl: string): string { + const url = new URL(apiUrl); + return url.hostname; + } + + private getPort(apiUrl: string): number { + const url = new URL(apiUrl); + const port = Number(url.port); + + if (!isNaN(port) && port !== 0) { + return port; + } + + return url.protocol === "https" ? 443 : 80; + } +} diff --git a/packages/worker/src/consts.ts b/packages/worker/src/consts.ts index b935b1a710..8aac362e44 100644 --- a/packages/worker/src/consts.ts +++ b/packages/worker/src/consts.ts @@ -1,4 +1,5 @@ export const HEADER_NAME = { WORKER_INSTANCE_NAME: "x-trigger-worker-instance-name", WORKER_DEPLOYMENT_ID: "x-trigger-worker-deployment-id", + WORKER_MANAGED_SECRET: "x-trigger-worker-managed-secret", }; diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts index 22a2121276..bde2dd2de4 100644 --- a/packages/worker/src/index.ts +++ b/packages/worker/src/index.ts @@ -1,2 +1,4 @@ export { VERSION as WORKER_VERSION } from "./version.js"; export * from "./consts.js"; +export * from "./client/http.js"; +export * from "./workerSession.js"; diff --git a/packages/worker/src/messages.ts b/packages/worker/src/messages.ts new file mode 100644 index 0000000000..4b0e5e0673 --- /dev/null +++ b/packages/worker/src/messages.ts @@ -0,0 +1,80 @@ +import { EnvironmentType, MachinePreset, TaskRunInternalError } from "@trigger.dev/core/v3"; +import { z } from "zod"; + +export const WorkerToPlatformMessages = { + LOG: { + message: z.object({ + version: z.literal("v1").default("v1"), + data: z.string(), + }), + }, + LOG_WITH_ACK: { + message: z.object({ + version: z.literal("v1").default("v1"), + data: z.string(), + }), + callback: z.object({ + status: z.literal("ok"), + }), + }, + WORKER_CRASHED: { + message: z.object({ + version: z.literal("v1").default("v1"), + runId: z.string(), + reason: z.string().optional(), + exitCode: z.number().optional(), + message: z.string().optional(), + logs: z.string().optional(), + /** This means we should only update the error if one exists */ + overrideCompletion: z.boolean().optional(), + errorCode: TaskRunInternalError.shape.code.optional(), + }), + }, + INDEXING_FAILED: { + message: z.object({ + version: z.literal("v1").default("v1"), + deploymentId: z.string(), + error: z.object({ + name: z.string(), + message: z.string(), + stack: z.string().optional(), + stderr: z.string().optional(), + }), + overrideCompletion: z.boolean().optional(), + }), + }, +}; + +export const PlatformToWorkerMessages = { + RESTORE: { + message: z.object({ + version: z.literal("v1").default("v1"), + type: z.enum(["DOCKER", "KUBERNETES"]), + location: z.string(), + reason: z.string().optional(), + imageRef: z.string(), + attemptNumber: z.number().optional(), + machine: MachinePreset, + // identifiers + checkpointId: z.string(), + envId: z.string(), + envType: EnvironmentType, + orgId: z.string(), + projectId: z.string(), + runId: z.string(), + }), + }, + PRE_PULL_DEPLOYMENT: { + message: z.object({ + version: z.literal("v1").default("v1"), + imageRef: z.string(), + shortCode: z.string(), + // identifiers + envId: z.string(), + envType: EnvironmentType, + orgId: z.string(), + projectId: z.string(), + deploymentId: z.string(), + }), + }, +}; diff --git a/packages/worker/src/queueConsumer.ts b/packages/worker/src/queueConsumer.ts new file mode 100644 index 0000000000..342aef0b48 --- /dev/null +++ b/packages/worker/src/queueConsumer.ts @@ -0,0 +1,71 @@ +import { WorkerHttpClient } from "./client/http.js"; +import { WorkerApiDequeueResponseBody } from "./schemas.js"; + +type RunQueueConsumerOptions = { + client: WorkerHttpClient; + intervalMs?: number; + onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; +}; + +export class RunQueueConsumer { + private readonly client: WorkerHttpClient; + private readonly onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; + + private intervalMs: number; + private isEnabled: boolean; + + constructor(opts: RunQueueConsumerOptions) { + this.isEnabled = false; + this.intervalMs = opts.intervalMs ?? 5_000; + this.onDequeue = opts.onDequeue; + this.client = opts.client; + } + + start() { + if (this.isEnabled) { + return; + } + + this.isEnabled = true; + this.dequeue(); + } + + stop() { + if (!this.isEnabled) { + return; + } + + this.isEnabled = false; + } + + private async dequeue() { + console.debug("[RunQueueConsumer] dequeue()", { enabled: this.isEnabled }); + + if (!this.isEnabled) { + return; + } + + try { + const response = await this.client.dequeue(); + + if (!response.success) { + console.error("[RunQueueConsumer] Failed to dequeue", { error: response.error }); + } else { + try { + await this.onDequeue(response.data); + } catch (handlerError) { + console.error("[RunQueueConsumer] onDequeue error", { error: handlerError }); + } + } + } catch (clientError) { + console.error("[RunQueueConsumer] client.dequeue error", { error: clientError }); + } + + this.scheduleNextDequeue(); + } + + scheduleNextDequeue(delay: number = this.intervalMs) { + console.debug("[RunQueueConsumer] Scheduling next dequeue", { delay }); + setTimeout(this.dequeue.bind(this), delay); + } +} diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index 3a519ac8c5..32955deeab 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -1,4 +1,9 @@ import { z } from "zod"; +import { + CompleteAttemptResult, + DequeuedMessage, + TaskRunExecutionResult, +} from "@trigger.dev/core/v3"; export const WorkerApiHeartbeatRequestBody = z.object({ cpu: z.object({ @@ -17,3 +22,36 @@ export const WorkerApiHeartbeatResponseBody = z.object({ ok: z.literal(true), }); export type WorkerApiHeartbeatResponseBody = z.infer; + +export const WorkerApiConnectResponseBody = z.object({ + ok: z.literal(true), +}); +export type WorkerApiConnectResponseBody = z.infer; + +export const WorkerApiDequeueResponseBody = DequeuedMessage.array(); +export type WorkerApiDequeueResponseBody = z.infer; + +// Attempt start +export const WorkerApiRunAttemptStartResponseBody = z.object({ + run: z.any(), + snapshot: z.any(), + execution: z.any(), +}); +export type WorkerApiRunAttemptStartResponseBody = z.infer< + typeof WorkerApiRunAttemptStartResponseBody +>; + +// Attempt completion +export const WorkerApiRunAttemptCompleteRequestBody = z.object({ + completion: TaskRunExecutionResult, +}); +export type WorkerApiRunAttemptCompleteRequestBody = z.infer< + typeof WorkerApiRunAttemptCompleteRequestBody +>; + +export const WorkerApiRunAttemptCompleteResponseBody = z.object({ + result: CompleteAttemptResult, +}); +export type WorkerApiRunAttemptCompleteResponseBody = z.infer< + typeof WorkerApiRunAttemptCompleteResponseBody +>; diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/workerSession.ts new file mode 100644 index 0000000000..853928e90e --- /dev/null +++ b/packages/worker/src/workerSession.ts @@ -0,0 +1,121 @@ +import { HeartbeatService } from "@trigger.dev/core/v3"; +import { WorkerHttpClient } from "./client/http.js"; +import { WorkerClientCommonOptions } from "./client/types.js"; +import { WorkerWebsocketClient } from "./client/websocket.js"; +import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; +import { RunQueueConsumer } from "./queueConsumer.js"; + +type WorkerSessionOptions = WorkerClientCommonOptions & { + heartbeatIntervalSeconds?: number; +}; + +export class WorkerSession { + private readonly httpClient: WorkerHttpClient; + private readonly websocketClient: WorkerWebsocketClient; + private readonly queueConsumer: RunQueueConsumer; + private readonly heartbeatService: HeartbeatService; + private readonly heartbeatIntervalSeconds: number; + + constructor(private opts: WorkerSessionOptions) { + this.httpClient = new WorkerHttpClient(opts); + this.websocketClient = new WorkerWebsocketClient(opts); + this.queueConsumer = new RunQueueConsumer({ + client: this.httpClient, + onDequeue: this.onDequeue.bind(this), + }); + + // TODO: This should be dynamic and set by (or at least overridden by) the platform + this.heartbeatIntervalSeconds = opts.heartbeatIntervalSeconds || 30; + this.heartbeatService = new HeartbeatService({ + heartbeat: async () => { + console.debug("[WorkerSession] Sending heartbeat"); + + const body = this.getHeartbeatBody(); + const response = await this.httpClient.heartbeat(body); + + if (!response.success) { + console.error("[WorkerSession] Heartbeat failed", { error: response.error }); + } + }, + intervalMs: this.heartbeatIntervalSeconds * 1000, + leadingEdge: false, + onError: async (error) => { + console.error("[WorkerSession] Failed to send heartbeat", { error }); + }, + }); + } + + private async onDequeue(messages: WorkerApiDequeueResponseBody): Promise { + console.log("[WorkerSession] Dequeued messages", { count: messages.length }); + console.debug("[WorkerSession] Dequeued messages with contents", messages); + + for (const message of messages) { + console.log("[WorkerSession] Processing message", { message }); + + const start = await this.httpClient.startRun(message.run.id, message.snapshot.id); + + if (!start.success) { + console.error("[WorkerSession] Failed to start run", { error: start.error }); + continue; + } + + console.log("[WorkerSession] Started run", { + runId: start.data.run.id, + snapshot: start.data.snapshot.id, + }); + + const complete = await this.httpClient.completeRun( + start.data.run.id, + start.data.snapshot.id, + { + completion: { + id: start.data.run.friendlyId, + ok: true, + outputType: "application/json", + }, + } + ); + + if (!complete.success) { + console.error("[WorkerSession] Failed to complete run", { error: complete.error }); + continue; + } + + console.log("[WorkerSession] Completed run", { + runId: start.data.run.id, + result: complete.data.result, + }); + } + } + + async start() { + const connect = await this.httpClient.connect(); + if (!connect.success) { + console.error("[WorkerSession] Failed to connect via HTTP client", { error: connect.error }); + throw new Error("[WorkerSession] Failed to connect via HTTP client"); + } + + this.queueConsumer.start(); + this.heartbeatService.start(); + this.websocketClient.start(); + } + + async stop() { + this.heartbeatService.stop(); + this.websocketClient.stop(); + } + + private getHeartbeatBody(): WorkerApiHeartbeatRequestBody { + return { + cpu: { + used: 0.5, + available: 0.5, + }, + memory: { + used: 0.5, + available: 0.5, + }, + tasks: [], + }; + } +} From 8484c55820b6aac0608d255018609a14111c4c56 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:27:41 +0000 Subject: [PATCH 159/485] update worker api routes --- .../routes/api.v1.worker-actions.connect.ts | 10 ++++++ .../routes/api.v1.worker-actions.dequeue.ts | 10 ++++++ .../routes/api.v1.worker-actions.heartbeat.ts | 16 +++++++++ ...snapshots.$snapshotId.attempts.complete.ts | 33 +++++++++++++++++++ ...Id.snapshots.$snapshotId.attempts.start.ts | 26 +++++++++++++++ .../routes/api.v1.worker.attempt.complete.ts | 23 ------------- .../app/routes/api.v1.worker.attempt.start.ts | 17 ---------- .../app/routes/api.v1.worker.dequeue.ts | 6 ---- .../app/routes/api.v1.worker.heartbeat.ts | 7 ---- 9 files changed, 95 insertions(+), 53 deletions(-) create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.connect.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts delete mode 100644 apps/webapp/app/routes/api.v1.worker.attempt.complete.ts delete mode 100644 apps/webapp/app/routes/api.v1.worker.attempt.start.ts delete mode 100644 apps/webapp/app/routes/api.v1.worker.dequeue.ts delete mode 100644 apps/webapp/app/routes/api.v1.worker.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts new file mode 100644 index 0000000000..0500fc032c --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts @@ -0,0 +1,10 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkerApiConnectResponseBody } from "@trigger.dev/worker/schemas"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute( + {}, + async ({ authenticatedWorker }): Promise> => { + return json({ ok: true }); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts new file mode 100644 index 0000000000..0c125aa74d --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts @@ -0,0 +1,10 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker/schemas"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute( + {}, + async ({ authenticatedWorker }): Promise> => { + return json(await authenticatedWorker.dequeue()); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts new file mode 100644 index 0000000000..d8ea047250 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts @@ -0,0 +1,16 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { + WorkerApiConnectResponseBody, + WorkerApiHeartbeatRequestBody, +} from "@trigger.dev/worker/schemas"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const action = createActionWorkerApiRoute( + { + body: WorkerApiHeartbeatRequestBody, + }, + async ({ authenticatedWorker }): Promise> => { + await authenticatedWorker.heartbeatWorkerInstance(); + return json({ ok: true }); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts new file mode 100644 index 0000000000..01f8966233 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts @@ -0,0 +1,33 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { + WorkerApiRunAttemptCompleteRequestBody, + WorkerApiRunAttemptCompleteResponseBody, +} from "@trigger.dev/worker/schemas"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const action = createActionWorkerApiRoute( + { + body: WorkerApiRunAttemptCompleteRequestBody, + params: z.object({ + runId: z.string(), + snapshotId: z.string(), + }), + }, + async ({ + authenticatedWorker, + body, + params, + }): Promise> => { + const { completion } = body; + const { runId, snapshotId } = params; + + const completeResult = await authenticatedWorker.completeRunAttempt({ + runId, + snapshotId, + completion, + }); + + return json({ result: completeResult }); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts new file mode 100644 index 0000000000..abc4992055 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts @@ -0,0 +1,26 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkerApiRunAttemptStartResponseBody } from "@trigger.dev/worker/schemas"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; + +export const action = createActionWorkerApiRoute( + { + params: z.object({ + runId: z.string(), + snapshotId: z.string(), + }), + }, + async ({ + authenticatedWorker, + params, + }): Promise> => { + const { runId, snapshotId } = params; + + const runExecutionData = await authenticatedWorker.startRunAttempt({ + runId, + snapshotId, + }); + + return json(runExecutionData); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts b/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts deleted file mode 100644 index 22b89338ad..0000000000 --- a/apps/webapp/app/routes/api.v1.worker.attempt.complete.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { json } from "@remix-run/server-runtime"; -import { TaskRunExecutionResult } from "@trigger.dev/core/v3"; -import { z } from "zod"; -import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; - -export const loader = createActionWorkerApiRoute( - { - body: z.object({ - runId: z.string(), - snapshotId: z.string(), - completion: TaskRunExecutionResult, - }), - }, - async ({ authenticatedWorker, body }) => { - const { runId, snapshotId, completion } = body; - const completeResult = await authenticatedWorker.completeRunAttempt({ - runId, - snapshotId, - completion, - }); - return json({ completeResult }); - } -); diff --git a/apps/webapp/app/routes/api.v1.worker.attempt.start.ts b/apps/webapp/app/routes/api.v1.worker.attempt.start.ts deleted file mode 100644 index abd18b864f..0000000000 --- a/apps/webapp/app/routes/api.v1.worker.attempt.start.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { json } from "@remix-run/server-runtime"; -import { z } from "zod"; -import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; - -export const loader = createActionWorkerApiRoute( - { - body: z.object({ - runId: z.string(), - snapshotId: z.string(), - }), - }, - async ({ authenticatedWorker, body }) => { - const { runId, snapshotId } = body; - const runExecutionData = await authenticatedWorker.startRunAttempt({ runId, snapshotId }); - return json(runExecutionData); - } -); diff --git a/apps/webapp/app/routes/api.v1.worker.dequeue.ts b/apps/webapp/app/routes/api.v1.worker.dequeue.ts deleted file mode 100644 index 8213ac6cec..0000000000 --- a/apps/webapp/app/routes/api.v1.worker.dequeue.ts +++ /dev/null @@ -1,6 +0,0 @@ -import { json } from "@remix-run/server-runtime"; -import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; - -export const loader = createLoaderWorkerApiRoute({}, async ({ authenticatedWorker }) => { - return json(await authenticatedWorker.dequeue()); -}); diff --git a/apps/webapp/app/routes/api.v1.worker.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker.heartbeat.ts deleted file mode 100644 index cf7358e19f..0000000000 --- a/apps/webapp/app/routes/api.v1.worker.heartbeat.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { json } from "@remix-run/server-runtime"; -import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; - -export const loader = createLoaderWorkerApiRoute({}, async ({ authenticatedWorker }) => { - await authenticatedWorker.heartbeatWorkerInstance(); - return json({ ok: true }); -}); From dba02b54f699230eadc1ec3568b5e30f4187c5df Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:31:32 +0000 Subject: [PATCH 160/485] one trigger service to rule them all --- .../routes/api.v1.tasks.$taskId.trigger.ts | 21 - apps/webapp/app/utils/delays.ts | 29 + .../app/v3/services/triggerTask.server.ts | 674 ++---------------- .../app/v3/services/triggerTaskV1.server.ts | 609 ++++++++++++++++ .../app/v3/services/triggerTaskV2.server.ts | 144 +--- packages/core/src/v3/apps/duration.ts | 22 + 6 files changed, 725 insertions(+), 774 deletions(-) create mode 100644 apps/webapp/app/v3/services/triggerTaskV1.server.ts diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index 2d428090cb..aae68c91d6 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -78,27 +78,6 @@ export async function action({ request, params }: ActionFunctionArgs) { ); } - //todo RunEngine support - /* - - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. - - Add an `engine` column to `Project` in the database. - - Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. - - You run `npx trigger.dev@latest deploy` with config v2. - - Create BackgroundWorker with `engine`: `v2`. - - Set the `project` `engine` column to `v2`. - - You run `npx trigger.dev@latest dev` with config v2 - - Create BackgroundWorker with `engine`: `v2`. - - Set the `project` `engine` column to `v2`. - - When triggering - - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. - - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW - - */ - const service = new TriggerTaskService(); try { diff --git a/apps/webapp/app/utils/delays.ts b/apps/webapp/app/utils/delays.ts index 6faa67c677..eaa296e11b 100644 --- a/apps/webapp/app/utils/delays.ts +++ b/apps/webapp/app/utils/delays.ts @@ -1,3 +1,5 @@ +import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; + export const calculateDurationInMs = (options: { seconds?: number; minutes?: number; @@ -11,3 +13,30 @@ export const calculateDurationInMs = (options: { (options?.days ?? 0) * 24 * 60 * 60 * 1000 ); }; + +export async function parseDelay(value?: string | Date): Promise { + if (!value) { + return; + } + + if (value instanceof Date) { + return value; + } + + try { + const date = new Date(value); + + // Check if the date is valid + if (isNaN(date.getTime())) { + return parseNaturalLanguageDuration(value); + } + + if (date.getTime() <= Date.now()) { + return; + } + + return date; + } catch (error) { + return parseNaturalLanguageDuration(value); + } +} diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 9ba29a8b12..76b45d597b 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -1,30 +1,9 @@ -import { - IOPacket, - QueueOptions, - SemanticInternalAttributes, - TriggerTaskRequestBody, - packetRequiresOffloading, -} from "@trigger.dev/core/v3"; -import { env } from "~/env.server"; +import { TriggerTaskRequestBody } from "@trigger.dev/core/v3"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; -import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; -import { workerQueue } from "~/services/worker.server"; -import { marqs, sanitizeQueueName } from "~/v3/marqs/index.server"; -import { eventRepository } from "../eventRepository.server"; -import { generateFriendlyId } from "../friendlyIdentifiers"; -import { uploadToObjectStore } from "../r2.server"; -import { startActiveSpan } from "../tracer.server"; -import { getEntitlement } from "~/services/platform.v3.server"; -import { BaseService, ServiceValidationError } from "./baseService.server"; -import { logger } from "~/services/logger.server"; -import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; -import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; -import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; -import { handleMetadataPacket } from "~/utils/packets"; -import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; -import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server"; -import { guardQueueSizeLimitsForEnv } from "../queueSizeLimits.server"; -import { clampMaxDuration } from "../utils/maxDuration"; +import { WithRunEngine } from "./baseService.server"; +import { RunEngineVersion, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { TriggerTaskServiceV1 } from "./triggerTaskV1.server"; +import { TriggerTaskServiceV2 } from "./triggerTaskV2.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -42,7 +21,7 @@ export class OutOfEntitlementError extends Error { } } -export class TriggerTaskService extends BaseService { +export class TriggerTaskService extends WithRunEngine { public async call( taskId: string, environment: AuthenticatedEnvironment, @@ -52,620 +31,67 @@ export class TriggerTaskService extends BaseService { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); - const idempotencyKey = options.idempotencyKey ?? body.options?.idempotencyKey; - const delayUntil = await parseDelay(body.options?.delay); - - const ttl = - typeof body.options?.ttl === "number" - ? stringifyDuration(body.options?.ttl) - : body.options?.ttl ?? (environment.type === "DEVELOPMENT" ? "10m" : undefined); - - const existingRun = idempotencyKey - ? await this._prisma.taskRun.findUnique({ - where: { - runtimeEnvironmentId_taskIdentifier_idempotencyKey: { - runtimeEnvironmentId: environment.id, - idempotencyKey, - taskIdentifier: taskId, - }, - }, - }) - : undefined; - - if (existingRun) { - span.setAttribute("runId", existingRun.friendlyId); - - return existingRun; - } - - if (environment.type !== "DEVELOPMENT") { - const result = await getEntitlement(environment.organizationId); - if (result && result.hasAccess === false) { - throw new OutOfEntitlementError(); - } - } - - const queueSizeGuard = await guardQueueSizeLimitsForEnv(environment, marqs); - - logger.debug("Queue size guard result", { - queueSizeGuard, - environment: { - id: environment.id, - type: environment.type, - organization: environment.organization, - project: environment.project, - }, - }); - - if (!queueSizeGuard.isWithinLimits) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` - ); - } - - if ( - body.options?.tags && - typeof body.options.tags !== "string" && - body.options.tags.length > MAX_TAGS_PER_RUN - ) { - throw new ServiceValidationError( - `Runs can only have ${MAX_TAGS_PER_RUN} tags, you're trying to set ${body.options.tags.length}.` - ); + if (environment.project.engine === RunEngineVersion.V1) { + return await this.callV1(taskId, environment, body, options); } - const runFriendlyId = generateFriendlyId("run"); - - const payloadPacket = await this.#handlePayloadPacket( - body.payload, - body.options?.payloadType ?? "application/json", - runFriendlyId, - environment - ); - - const metadataPacket = body.options?.metadata - ? handleMetadataPacket( - body.options?.metadata, - body.options?.metadataType ?? "application/json" - ) - : undefined; + // The project is using the new Run Engine - const dependentAttempt = body.options?.dependentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.dependentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }) - : undefined; - - if ( - dependentAttempt && - (isFinalAttemptStatus(dependentAttempt.status) || - isFinalRunStatus(dependentAttempt.taskRun.status)) - ) { - logger.debug("Dependent attempt or run is in a terminal state", { - dependentAttempt: dependentAttempt, - }); - - if (isFinalAttemptStatus(dependentAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentAttempt.taskRun.status}` - ); - } + if (environment.type === RuntimeEnvironmentType.DEVELOPMENT) { + return await this.callV1(taskId, environment, body, options); } - const parentAttempt = body.options?.parentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.parentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }) - : undefined; - - const dependentBatchRun = body.options?.dependentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.dependentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }, - }, - }) - : undefined; - - if ( - dependentBatchRun && - dependentBatchRun.dependentTaskAttempt && - (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status) || - isFinalRunStatus(dependentBatchRun.dependentTaskAttempt.taskRun.status)) - ) { - logger.debug("Dependent batch run task attempt or run has been canceled", { - dependentBatchRunId: dependentBatchRun.id, - status: dependentBatchRun.status, - attempt: dependentBatchRun.dependentTaskAttempt, - }); - - if (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentBatchRun.dependentTaskAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentBatchRun.dependentTaskAttempt.taskRun.status}` - ); - } - } - - const parentBatchRun = body.options?.parentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.parentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - }, - }, - }, - }, - }, - }) - : undefined; - - return await eventRepository.traceEvent( - taskId, - { - context: options.traceContext, - spanParentAsLink: options.spanParentAsLink, - parentAsLinkType: options.parentAsLinkType, - kind: "SERVER", - environment, - taskSlug: taskId, - attributes: { - properties: { - [SemanticInternalAttributes.SHOW_ACTIONS]: true, - }, - style: { - icon: options.customIcon ?? "task", - }, - runIsTest: body.options?.test ?? false, - batchId: options.batchId, - idempotencyKey, - }, - incomplete: true, - immediate: true, - }, - async (event, traceContext, traceparent) => { - const run = await autoIncrementCounter.incrementInTransaction( - `v3-run:${environment.id}:${taskId}`, - async (num, tx) => { - const lockedToBackgroundWorker = body.options?.lockToVersion - ? await tx.backgroundWorker.findUnique({ - where: { - projectId_runtimeEnvironmentId_version: { - projectId: environment.projectId, - runtimeEnvironmentId: environment.id, - version: body.options?.lockToVersion, - }, - }, - }) - : undefined; + // The environment is not development, so we need to use the new Run Engine - let queueName = sanitizeQueueName( - await this.#getQueueName(taskId, environment, body.options?.queue?.name) - ); + //todo Additional checks + /* + - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. + - Add an `engine` column to `Project` in the database. - // Check that the queuename is not an empty string - if (!queueName) { - queueName = sanitizeQueueName(`task/${taskId}`); - } + Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. - event.setAttribute("queueName", queueName); - span.setAttribute("queueName", queueName); + You run `npx trigger.dev@latest deploy` with config v2. + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. - //upsert tags - let tagIds: string[] = []; - const bodyTags = - typeof body.options?.tags === "string" ? [body.options.tags] : body.options?.tags; - if (bodyTags && bodyTags.length > 0) { - for (const tag of bodyTags) { - const tagRecord = await createTag({ - tag, - projectId: environment.projectId, - }); - if (tagRecord) { - tagIds.push(tagRecord.id); - } - } - } + You run `npx trigger.dev@latest dev` with config v2 + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. - const depth = dependentAttempt - ? dependentAttempt.taskRun.depth + 1 - : parentAttempt - ? parentAttempt.taskRun.depth + 1 - : dependentBatchRun?.dependentTaskAttempt - ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 - : 0; + When triggering + - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + */ - const taskRun = await tx.taskRun.create({ - data: { - status: delayUntil ? "DELAYED" : "PENDING", - number: num, - friendlyId: runFriendlyId, - runtimeEnvironmentId: environment.id, - projectId: environment.projectId, - idempotencyKey, - taskIdentifier: taskId, - payload: payloadPacket.data ?? "", - payloadType: payloadPacket.dataType, - context: body.context, - traceContext: traceContext, - traceId: event.traceId, - spanId: event.spanId, - parentSpanId: - options.parentAsLinkType === "replay" ? undefined : traceparent?.spanId, - lockedToVersionId: lockedToBackgroundWorker?.id, - concurrencyKey: body.options?.concurrencyKey, - queue: queueName, - isTest: body.options?.test ?? false, - delayUntil, - queuedAt: delayUntil ? undefined : new Date(), - maxAttempts: body.options?.maxAttempts, - ttl, - tags: - tagIds.length === 0 - ? undefined - : { - connect: tagIds.map((id) => ({ id })), - }, - parentTaskRunId: - dependentAttempt?.taskRun.id ?? - parentAttempt?.taskRun.id ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.id, - parentTaskRunAttemptId: - dependentAttempt?.id ?? - parentAttempt?.id ?? - dependentBatchRun?.dependentTaskAttempt?.id, - rootTaskRunId: - dependentAttempt?.taskRun.rootTaskRunId ?? - dependentAttempt?.taskRun.id ?? - parentAttempt?.taskRun.rootTaskRunId ?? - parentAttempt?.taskRun.id ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.rootTaskRunId ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.id, - batchId: dependentBatchRun?.id ?? parentBatchRun?.id, - resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), - depth, - metadata: metadataPacket?.data, - metadataType: metadataPacket?.dataType, - seedMetadata: metadataPacket?.data, - seedMetadataType: metadataPacket?.dataType, - maxDurationInSeconds: body.options?.maxDuration - ? clampMaxDuration(body.options.maxDuration) - : undefined, - runTags: bodyTags, - }, - }); - - event.setAttribute("runId", taskRun.friendlyId); - span.setAttribute("runId", taskRun.friendlyId); - - if (dependentAttempt) { - await tx.taskRunDependency.create({ - data: { - taskRunId: taskRun.id, - dependentAttemptId: dependentAttempt.id, - }, - }); - } else if (dependentBatchRun) { - await tx.taskRunDependency.create({ - data: { - taskRunId: taskRun.id, - dependentBatchRunId: dependentBatchRun.id, - }, - }); - } - - if (body.options?.queue) { - const concurrencyLimit = - typeof body.options.queue.concurrencyLimit === "number" - ? Math.max(0, body.options.queue.concurrencyLimit) - : undefined; - - let taskQueue = await tx.taskQueue.findFirst({ - where: { - runtimeEnvironmentId: environment.id, - name: queueName, - }, - }); - - if (taskQueue) { - taskQueue = await tx.taskQueue.update({ - where: { - id: taskQueue.id, - }, - data: { - concurrencyLimit, - rateLimit: body.options.queue.rateLimit, - }, - }); - } else { - taskQueue = await tx.taskQueue.create({ - data: { - friendlyId: generateFriendlyId("queue"), - name: queueName, - concurrencyLimit, - runtimeEnvironmentId: environment.id, - projectId: environment.projectId, - rateLimit: body.options.queue.rateLimit, - type: "NAMED", - }, - }); - } - - if (typeof taskQueue.concurrencyLimit === "number") { - await marqs?.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); - } else { - await marqs?.removeQueueConcurrencyLimits(environment, taskQueue.name); - } - } - - if (taskRun.delayUntil) { - await workerQueue.enqueue( - "v3.enqueueDelayedRun", - { runId: taskRun.id }, - { tx, runAt: delayUntil, jobKey: `v3.enqueueDelayedRun.${taskRun.id}` } - ); - } - - if (!taskRun.delayUntil && taskRun.ttl) { - const expireAt = parseNaturalLanguageDuration(taskRun.ttl); - - if (expireAt) { - await ExpireEnqueuedRunService.enqueue(taskRun.id, expireAt, tx); - } - } - - return taskRun; - }, - async (_, tx) => { - const counter = await tx.taskRunNumberCounter.findUnique({ - where: { - taskIdentifier_environmentId: { - taskIdentifier: taskId, - environmentId: environment.id, - }, - }, - select: { lastNumber: true }, - }); - - return counter?.lastNumber; - }, - this._prisma - ); - - //release the concurrency for the env and org, if part of a (batch)triggerAndWait - if (dependentAttempt) { - const isSameTask = dependentAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency(dependentAttempt.taskRun.id, isSameTask); - } - if (dependentBatchRun?.dependentTaskAttempt) { - const isSameTask = - dependentBatchRun.dependentTaskAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency( - dependentBatchRun.dependentTaskAttempt.taskRun.id, - isSameTask - ); - } - - if (!run) { - return; - } - - // We need to enqueue the task run into the appropriate queue. This is done after the tx completes to prevent a race condition where the task run hasn't been created yet by the time we dequeue. - if (run.status === "PENDING") { - await marqs?.enqueueMessage( - environment, - run.queue, - run.id, - { - type: "EXECUTE", - taskIdentifier: taskId, - projectId: environment.projectId, - environmentId: environment.id, - environmentType: environment.type, - }, - body.options?.concurrencyKey - ); - } - - return run; - } - ); + return await this.callV2(taskId, environment, body, options); }); } - async #getQueueName(taskId: string, environment: AuthenticatedEnvironment, queueName?: string) { - if (queueName) { - return queueName; - } - - const defaultQueueName = `task/${taskId}`; - - const worker = await findCurrentWorkerFromEnvironment(environment); - - if (!worker) { - logger.debug("Failed to get queue name: No worker found", { - taskId, - environmentId: environment.id, - }); - - return defaultQueueName; - } - - const task = await this._prisma.backgroundWorkerTask.findUnique({ - where: { - workerId_slug: { - workerId: worker.id, - slug: taskId, - }, - }, - }); - - if (!task) { - console.log("Failed to get queue name: No task found", { - taskId, - environmentId: environment.id, - }); - - return defaultQueueName; - } - - const queueConfig = QueueOptions.optional().nullable().safeParse(task.queueConfig); - - if (!queueConfig.success) { - console.log("Failed to get queue name: Invalid queue config", { - taskId, - environmentId: environment.id, - queueConfig: task.queueConfig, - }); - - return defaultQueueName; - } - - return queueConfig.data?.name ?? defaultQueueName; + private async callV1( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} + ) { + const service = new TriggerTaskServiceV1(this._prisma); + return await service.call(taskId, environment, body, options); } - async #handlePayloadPacket( - payload: any, - payloadType: string, - pathPrefix: string, - environment: AuthenticatedEnvironment + private async callV2( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} ) { - return await startActiveSpan("handlePayloadPacket()", async (span) => { - const packet = this.#createPayloadPacket(payload, payloadType); - - if (!packet.data) { - return packet; - } - - const { needsOffloading, size } = packetRequiresOffloading( - packet, - env.TASK_PAYLOAD_OFFLOAD_THRESHOLD - ); - - if (!needsOffloading) { - return packet; - } - - const filename = `${pathPrefix}/payload.json`; - - await uploadToObjectStore(filename, packet.data, packet.dataType, environment); - - return { - data: filename, - dataType: "application/store", - }; + const service = new TriggerTaskServiceV2({ + prisma: this._prisma, + engine: this._engine, + }); + return await service.call({ + taskId, + environment, + body, + options, }); } - - #createPayloadPacket(payload: any, payloadType: string): IOPacket { - if (payloadType === "application/json") { - return { data: JSON.stringify(payload), dataType: "application/json" }; - } - - if (typeof payload === "string") { - return { data: payload, dataType: payloadType }; - } - - return { dataType: payloadType }; - } -} - -export async function parseDelay(value?: string | Date): Promise { - if (!value) { - return; - } - - if (value instanceof Date) { - return value; - } - - try { - const date = new Date(value); - - // Check if the date is valid - if (isNaN(date.getTime())) { - return parseNaturalLanguageDuration(value); - } - - if (date.getTime() <= Date.now()) { - return; - } - - return date; - } catch (error) { - return parseNaturalLanguageDuration(value); - } -} - -function stringifyDuration(seconds: number): string | undefined { - if (seconds <= 0) { - return; - } - - const units = { - w: Math.floor(seconds / 604800), - d: Math.floor((seconds % 604800) / 86400), - h: Math.floor((seconds % 86400) / 3600), - m: Math.floor((seconds % 3600) / 60), - s: Math.floor(seconds % 60), - }; - - // Filter the units having non-zero values and join them - const result: string = Object.entries(units) - .filter(([unit, val]) => val != 0) - .map(([unit, val]) => `${val}${unit}`) - .join(""); - - return result; } diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts new file mode 100644 index 0000000000..3114d2d749 --- /dev/null +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -0,0 +1,609 @@ +import { + IOPacket, + QueueOptions, + SemanticInternalAttributes, + TriggerTaskRequestBody, + packetRequiresOffloading, +} from "@trigger.dev/core/v3"; +import { env } from "~/env.server"; +import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; +import { workerQueue } from "~/services/worker.server"; +import { marqs, sanitizeQueueName } from "~/v3/marqs/index.server"; +import { eventRepository } from "../eventRepository.server"; +import { generateFriendlyId } from "../friendlyIdentifiers"; +import { uploadToObjectStore } from "../r2.server"; +import { startActiveSpan } from "../tracer.server"; +import { getEntitlement } from "~/services/platform.v3.server"; +import { BaseService, ServiceValidationError } from "./baseService.server"; +import { logger } from "~/services/logger.server"; +import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; +import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; +import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; +import { handleMetadataPacket } from "~/utils/packets"; +import { parseNaturalLanguageDuration, stringifyDuration } from "@trigger.dev/core/v3/apps"; +import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server"; +import { guardQueueSizeLimitsForEnv } from "../queueSizeLimits.server"; +import { clampMaxDuration } from "../utils/maxDuration"; +import { parseDelay } from "~/utils/delays"; +import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; + +/** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ +export class TriggerTaskServiceV1 extends BaseService { + public async call( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} + ) { + return await this.traceWithEnv("call()", environment, async (span) => { + span.setAttribute("taskId", taskId); + + const idempotencyKey = options.idempotencyKey ?? body.options?.idempotencyKey; + const delayUntil = await parseDelay(body.options?.delay); + + const ttl = + typeof body.options?.ttl === "number" + ? stringifyDuration(body.options?.ttl) + : body.options?.ttl ?? (environment.type === "DEVELOPMENT" ? "10m" : undefined); + + const existingRun = idempotencyKey + ? await this._prisma.taskRun.findUnique({ + where: { + runtimeEnvironmentId_taskIdentifier_idempotencyKey: { + runtimeEnvironmentId: environment.id, + idempotencyKey, + taskIdentifier: taskId, + }, + }, + }) + : undefined; + + if (existingRun) { + span.setAttribute("runId", existingRun.friendlyId); + + return existingRun; + } + + if (environment.type !== "DEVELOPMENT") { + const result = await getEntitlement(environment.organizationId); + if (result && result.hasAccess === false) { + throw new OutOfEntitlementError(); + } + } + + const queueSizeGuard = await guardQueueSizeLimitsForEnv(environment, marqs); + + logger.debug("Queue size guard result", { + queueSizeGuard, + environment: { + id: environment.id, + type: environment.type, + organization: environment.organization, + project: environment.project, + }, + }); + + if (!queueSizeGuard.isWithinLimits) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` + ); + } + + if ( + body.options?.tags && + typeof body.options.tags !== "string" && + body.options.tags.length > MAX_TAGS_PER_RUN + ) { + throw new ServiceValidationError( + `Runs can only have ${MAX_TAGS_PER_RUN} tags, you're trying to set ${body.options.tags.length}.` + ); + } + + const runFriendlyId = generateFriendlyId("run"); + + const payloadPacket = await this.#handlePayloadPacket( + body.payload, + body.options?.payloadType ?? "application/json", + runFriendlyId, + environment + ); + + const metadataPacket = body.options?.metadata + ? handleMetadataPacket( + body.options?.metadata, + body.options?.metadataType ?? "application/json" + ) + : undefined; + + const dependentAttempt = body.options?.dependentAttempt + ? await this._prisma.taskRunAttempt.findUnique({ + where: { friendlyId: body.options.dependentAttempt }, + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }) + : undefined; + + if ( + dependentAttempt && + (isFinalAttemptStatus(dependentAttempt.status) || + isFinalRunStatus(dependentAttempt.taskRun.status)) + ) { + logger.debug("Dependent attempt or run is in a terminal state", { + dependentAttempt: dependentAttempt, + }); + + if (isFinalAttemptStatus(dependentAttempt.status)) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentAttempt.status}` + ); + } else { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent run has a status of ${dependentAttempt.taskRun.status}` + ); + } + } + + const parentAttempt = body.options?.parentAttempt + ? await this._prisma.taskRunAttempt.findUnique({ + where: { friendlyId: body.options.parentAttempt }, + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }) + : undefined; + + const dependentBatchRun = body.options?.dependentBatch + ? await this._prisma.batchTaskRun.findUnique({ + where: { friendlyId: body.options.dependentBatch }, + include: { + dependentTaskAttempt: { + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + depth: true, + }, + }, + }, + }, + }, + }) + : undefined; + + if ( + dependentBatchRun && + dependentBatchRun.dependentTaskAttempt && + (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status) || + isFinalRunStatus(dependentBatchRun.dependentTaskAttempt.taskRun.status)) + ) { + logger.debug("Dependent batch run task attempt or run has been canceled", { + dependentBatchRunId: dependentBatchRun.id, + status: dependentBatchRun.status, + attempt: dependentBatchRun.dependentTaskAttempt, + }); + + if (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status)) { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentBatchRun.dependentTaskAttempt.status}` + ); + } else { + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent run has a status of ${dependentBatchRun.dependentTaskAttempt.taskRun.status}` + ); + } + } + + const parentBatchRun = body.options?.parentBatch + ? await this._prisma.batchTaskRun.findUnique({ + where: { friendlyId: body.options.parentBatch }, + include: { + dependentTaskAttempt: { + include: { + taskRun: { + select: { + id: true, + status: true, + taskIdentifier: true, + rootTaskRunId: true, + }, + }, + }, + }, + }, + }) + : undefined; + + return await eventRepository.traceEvent( + taskId, + { + context: options.traceContext, + spanParentAsLink: options.spanParentAsLink, + parentAsLinkType: options.parentAsLinkType, + kind: "SERVER", + environment, + taskSlug: taskId, + attributes: { + properties: { + [SemanticInternalAttributes.SHOW_ACTIONS]: true, + }, + style: { + icon: options.customIcon ?? "task", + }, + runIsTest: body.options?.test ?? false, + batchId: options.batchId, + idempotencyKey, + }, + incomplete: true, + immediate: true, + }, + async (event, traceContext, traceparent) => { + const run = await autoIncrementCounter.incrementInTransaction( + `v3-run:${environment.id}:${taskId}`, + async (num, tx) => { + const lockedToBackgroundWorker = body.options?.lockToVersion + ? await tx.backgroundWorker.findUnique({ + where: { + projectId_runtimeEnvironmentId_version: { + projectId: environment.projectId, + runtimeEnvironmentId: environment.id, + version: body.options?.lockToVersion, + }, + }, + }) + : undefined; + + let queueName = sanitizeQueueName( + await this.#getQueueName(taskId, environment, body.options?.queue?.name) + ); + + // Check that the queuename is not an empty string + if (!queueName) { + queueName = sanitizeQueueName(`task/${taskId}`); + } + + event.setAttribute("queueName", queueName); + span.setAttribute("queueName", queueName); + + //upsert tags + let tagIds: string[] = []; + const bodyTags = + typeof body.options?.tags === "string" ? [body.options.tags] : body.options?.tags; + if (bodyTags && bodyTags.length > 0) { + for (const tag of bodyTags) { + const tagRecord = await createTag({ + tag, + projectId: environment.projectId, + }); + if (tagRecord) { + tagIds.push(tagRecord.id); + } + } + } + + const depth = dependentAttempt + ? dependentAttempt.taskRun.depth + 1 + : parentAttempt + ? parentAttempt.taskRun.depth + 1 + : dependentBatchRun?.dependentTaskAttempt + ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 + : 0; + + const taskRun = await tx.taskRun.create({ + data: { + status: delayUntil ? "DELAYED" : "PENDING", + number: num, + friendlyId: runFriendlyId, + runtimeEnvironmentId: environment.id, + projectId: environment.projectId, + idempotencyKey, + taskIdentifier: taskId, + payload: payloadPacket.data ?? "", + payloadType: payloadPacket.dataType, + context: body.context, + traceContext: traceContext, + traceId: event.traceId, + spanId: event.spanId, + parentSpanId: + options.parentAsLinkType === "replay" ? undefined : traceparent?.spanId, + lockedToVersionId: lockedToBackgroundWorker?.id, + concurrencyKey: body.options?.concurrencyKey, + queue: queueName, + isTest: body.options?.test ?? false, + delayUntil, + queuedAt: delayUntil ? undefined : new Date(), + maxAttempts: body.options?.maxAttempts, + ttl, + tags: + tagIds.length === 0 + ? undefined + : { + connect: tagIds.map((id) => ({ id })), + }, + parentTaskRunId: + dependentAttempt?.taskRun.id ?? + parentAttempt?.taskRun.id ?? + dependentBatchRun?.dependentTaskAttempt?.taskRun.id, + parentTaskRunAttemptId: + dependentAttempt?.id ?? + parentAttempt?.id ?? + dependentBatchRun?.dependentTaskAttempt?.id, + rootTaskRunId: + dependentAttempt?.taskRun.rootTaskRunId ?? + dependentAttempt?.taskRun.id ?? + parentAttempt?.taskRun.rootTaskRunId ?? + parentAttempt?.taskRun.id ?? + dependentBatchRun?.dependentTaskAttempt?.taskRun.rootTaskRunId ?? + dependentBatchRun?.dependentTaskAttempt?.taskRun.id, + batchId: dependentBatchRun?.id ?? parentBatchRun?.id, + resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), + depth, + metadata: metadataPacket?.data, + metadataType: metadataPacket?.dataType, + seedMetadata: metadataPacket?.data, + seedMetadataType: metadataPacket?.dataType, + maxDurationInSeconds: body.options?.maxDuration + ? clampMaxDuration(body.options.maxDuration) + : undefined, + runTags: bodyTags, + }, + }); + + event.setAttribute("runId", taskRun.friendlyId); + span.setAttribute("runId", taskRun.friendlyId); + + if (dependentAttempt) { + await tx.taskRunDependency.create({ + data: { + taskRunId: taskRun.id, + dependentAttemptId: dependentAttempt.id, + }, + }); + } else if (dependentBatchRun) { + await tx.taskRunDependency.create({ + data: { + taskRunId: taskRun.id, + dependentBatchRunId: dependentBatchRun.id, + }, + }); + } + + if (body.options?.queue) { + const concurrencyLimit = + typeof body.options.queue.concurrencyLimit === "number" + ? Math.max(0, body.options.queue.concurrencyLimit) + : undefined; + + let taskQueue = await tx.taskQueue.findFirst({ + where: { + runtimeEnvironmentId: environment.id, + name: queueName, + }, + }); + + if (taskQueue) { + taskQueue = await tx.taskQueue.update({ + where: { + id: taskQueue.id, + }, + data: { + concurrencyLimit, + rateLimit: body.options.queue.rateLimit, + }, + }); + } else { + taskQueue = await tx.taskQueue.create({ + data: { + friendlyId: generateFriendlyId("queue"), + name: queueName, + concurrencyLimit, + runtimeEnvironmentId: environment.id, + projectId: environment.projectId, + rateLimit: body.options.queue.rateLimit, + type: "NAMED", + }, + }); + } + + if (typeof taskQueue.concurrencyLimit === "number") { + await marqs?.updateQueueConcurrencyLimits( + environment, + taskQueue.name, + taskQueue.concurrencyLimit + ); + } else { + await marqs?.removeQueueConcurrencyLimits(environment, taskQueue.name); + } + } + + if (taskRun.delayUntil) { + await workerQueue.enqueue( + "v3.enqueueDelayedRun", + { runId: taskRun.id }, + { tx, runAt: delayUntil, jobKey: `v3.enqueueDelayedRun.${taskRun.id}` } + ); + } + + if (!taskRun.delayUntil && taskRun.ttl) { + const expireAt = parseNaturalLanguageDuration(taskRun.ttl); + + if (expireAt) { + await ExpireEnqueuedRunService.enqueue(taskRun.id, expireAt, tx); + } + } + + return taskRun; + }, + async (_, tx) => { + const counter = await tx.taskRunNumberCounter.findUnique({ + where: { + taskIdentifier_environmentId: { + taskIdentifier: taskId, + environmentId: environment.id, + }, + }, + select: { lastNumber: true }, + }); + + return counter?.lastNumber; + }, + this._prisma + ); + + //release the concurrency for the env and org, if part of a (batch)triggerAndWait + if (dependentAttempt) { + const isSameTask = dependentAttempt.taskRun.taskIdentifier === taskId; + await marqs?.releaseConcurrency(dependentAttempt.taskRun.id, isSameTask); + } + if (dependentBatchRun?.dependentTaskAttempt) { + const isSameTask = + dependentBatchRun.dependentTaskAttempt.taskRun.taskIdentifier === taskId; + await marqs?.releaseConcurrency( + dependentBatchRun.dependentTaskAttempt.taskRun.id, + isSameTask + ); + } + + if (!run) { + return; + } + + // We need to enqueue the task run into the appropriate queue. This is done after the tx completes to prevent a race condition where the task run hasn't been created yet by the time we dequeue. + if (run.status === "PENDING") { + await marqs?.enqueueMessage( + environment, + run.queue, + run.id, + { + type: "EXECUTE", + taskIdentifier: taskId, + projectId: environment.projectId, + environmentId: environment.id, + environmentType: environment.type, + }, + body.options?.concurrencyKey + ); + } + + return run; + } + ); + }); + } + + async #getQueueName(taskId: string, environment: AuthenticatedEnvironment, queueName?: string) { + if (queueName) { + return queueName; + } + + const defaultQueueName = `task/${taskId}`; + + const worker = await findCurrentWorkerFromEnvironment(environment); + + if (!worker) { + logger.debug("Failed to get queue name: No worker found", { + taskId, + environmentId: environment.id, + }); + + return defaultQueueName; + } + + const task = await this._prisma.backgroundWorkerTask.findUnique({ + where: { + workerId_slug: { + workerId: worker.id, + slug: taskId, + }, + }, + }); + + if (!task) { + console.log("Failed to get queue name: No task found", { + taskId, + environmentId: environment.id, + }); + + return defaultQueueName; + } + + const queueConfig = QueueOptions.optional().nullable().safeParse(task.queueConfig); + + if (!queueConfig.success) { + console.log("Failed to get queue name: Invalid queue config", { + taskId, + environmentId: environment.id, + queueConfig: task.queueConfig, + }); + + return defaultQueueName; + } + + return queueConfig.data?.name ?? defaultQueueName; + } + + async #handlePayloadPacket( + payload: any, + payloadType: string, + pathPrefix: string, + environment: AuthenticatedEnvironment + ) { + return await startActiveSpan("handlePayloadPacket()", async (span) => { + const packet = this.#createPayloadPacket(payload, payloadType); + + if (!packet.data) { + return packet; + } + + const { needsOffloading, size } = packetRequiresOffloading( + packet, + env.TASK_PAYLOAD_OFFLOAD_THRESHOLD + ); + + if (!needsOffloading) { + return packet; + } + + const filename = `${pathPrefix}/payload.json`; + + await uploadToObjectStore(filename, packet.data, packet.dataType, environment); + + return { + data: filename, + dataType: "application/store", + }; + }); + } + + #createPayloadPacket(payload: any, payloadType: string): IOPacket { + if (payloadType === "application/json") { + return { data: JSON.stringify(payload), dataType: "application/json" }; + } + + if (typeof payload === "string") { + return { data: payload, dataType: payloadType }; + } + + return { dataType: payloadType }; + } +} diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 7de918d5b7..a93c4a669f 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -21,24 +21,11 @@ import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { handleMetadataPacket } from "~/utils/packets"; import { WorkerGroupService } from "./worker/workerGroupService.server"; -import { engine } from "../runEngine.server"; - -export type TriggerTaskServiceOptions = { - idempotencyKey?: string; - triggerVersion?: string; - traceContext?: Record; - spanParentAsLink?: boolean; - parentAsLinkType?: "replay" | "trigger"; - batchId?: string; - customIcon?: string; -}; - -export class OutOfEntitlementError extends Error { - constructor() { - super("You can't trigger a task because you have run out of credits."); - } -} +import { parseDelay } from "~/utils/delays"; +import { stringifyDuration } from "@trigger.dev/core/v3/apps"; +import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; +/** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV2 extends WithRunEngine { public async call({ taskId, @@ -88,7 +75,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } //check the env queue isn't beyond the limit - const queueSizeGuard = await guardQueueSizeLimitsForEnv(environment); + const queueSizeGuard = await this.#guardQueueSizeLimitsForEnv(environment); logger.debug("Queue size guard result", { queueSizeGuard, @@ -502,123 +489,22 @@ export class TriggerTaskServiceV2 extends WithRunEngine { return { dataType: payloadType }; } -} - -export async function parseDelay(value?: string | Date): Promise { - if (!value) { - return; - } - - if (value instanceof Date) { - return value; - } - - try { - const date = new Date(value); - - // Check if the date is valid - if (isNaN(date.getTime())) { - return parseNaturalLanguageDuration(value); - } - - if (date.getTime() <= Date.now()) { - return; - } - - return date; - } catch (error) { - return parseNaturalLanguageDuration(value); - } -} - -export function parseNaturalLanguageDuration(duration: string): Date | undefined { - const regexPattern = /^(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?$/; - const result: Date = new Date(); - let hasMatch = false; + async #guardQueueSizeLimitsForEnv(environment: AuthenticatedEnvironment) { + const maximumSize = getMaximumSizeForEnvironment(environment); - const elements = duration.match(regexPattern); - if (elements) { - if (elements[1]) { - const weeks = Number(elements[1].slice(0, -1)); - if (weeks >= 0) { - result.setDate(result.getDate() + 7 * weeks); - hasMatch = true; - } - } - if (elements[2]) { - const days = Number(elements[2].slice(0, -1)); - if (days >= 0) { - result.setDate(result.getDate() + days); - hasMatch = true; - } - } - if (elements[3]) { - const hours = Number(elements[3].slice(0, -1)); - if (hours >= 0) { - result.setHours(result.getHours() + hours); - hasMatch = true; - } - } - if (elements[4]) { - const minutes = Number(elements[4].slice(0, -1)); - if (minutes >= 0) { - result.setMinutes(result.getMinutes() + minutes); - hasMatch = true; - } + if (typeof maximumSize === "undefined") { + return { isWithinLimits: true }; } - if (elements[5]) { - const seconds = Number(elements[5].slice(0, -1)); - if (seconds >= 0) { - result.setSeconds(result.getSeconds() + seconds); - hasMatch = true; - } - } - } - - if (hasMatch) { - return result; - } - return undefined; -} + const queueSize = await this._engine.lengthOfEnvQueue(environment); -function stringifyDuration(seconds: number): string | undefined { - if (seconds <= 0) { - return; + return { + isWithinLimits: queueSize < maximumSize, + maximumSize, + queueSize, + }; } - - const units = { - w: Math.floor(seconds / 604800), - d: Math.floor((seconds % 604800) / 86400), - h: Math.floor((seconds % 86400) / 3600), - m: Math.floor((seconds % 3600) / 60), - s: Math.floor(seconds % 60), - }; - - // Filter the units having non-zero values and join them - const result: string = Object.entries(units) - .filter(([unit, val]) => val != 0) - .map(([unit, val]) => `${val}${unit}`) - .join(""); - - return result; -} - -async function guardQueueSizeLimitsForEnv(environment: AuthenticatedEnvironment) { - const maximumSize = getMaximumSizeForEnvironment(environment); - - if (typeof maximumSize === "undefined") { - return { isWithinLimits: true }; - } - - const queueSize = await engine.lengthOfEnvQueue(environment); - - return { - isWithinLimits: queueSize < maximumSize, - maximumSize, - queueSize, - }; } function getMaximumSizeForEnvironment(environment: AuthenticatedEnvironment): number | undefined { diff --git a/packages/core/src/v3/apps/duration.ts b/packages/core/src/v3/apps/duration.ts index 85c0dbc88c..d14271c8c9 100644 --- a/packages/core/src/v3/apps/duration.ts +++ b/packages/core/src/v3/apps/duration.ts @@ -49,3 +49,25 @@ export function parseNaturalLanguageDuration(duration: string): Date | undefined return undefined; } + +export function stringifyDuration(seconds: number): string | undefined { + if (seconds <= 0) { + return; + } + + const units = { + w: Math.floor(seconds / 604800), + d: Math.floor((seconds % 604800) / 86400), + h: Math.floor((seconds % 86400) / 3600), + m: Math.floor((seconds % 3600) / 60), + s: Math.floor(seconds % 60), + }; + + // Filter the units having non-zero values and join them + const result: string = Object.entries(units) + .filter(([unit, val]) => val != 0) + .map(([unit, val]) => `${val}${unit}`) + .join(""); + + return result; +} From 320142465d924b825778d47966db99dc6620dbdd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:39:04 +0000 Subject: [PATCH 161/485] minor changes to deployments and run pages --- .../v3/DeploymentListPresenter.server.ts | 41 ++++++++++--------- .../v3/DeploymentPresenter.server.ts | 2 + .../app/presenters/v3/SpanPresenter.server.ts | 2 + .../route.tsx | 5 +++ .../route.tsx | 3 +- .../route.tsx | 4 ++ apps/webapp/app/utils/string.ts | 3 ++ .../v3/services/rollbackDeployment.server.ts | 10 ++++- 8 files changed, 49 insertions(+), 21 deletions(-) create mode 100644 apps/webapp/app/utils/string.ts diff --git a/apps/webapp/app/presenters/v3/DeploymentListPresenter.server.ts b/apps/webapp/app/presenters/v3/DeploymentListPresenter.server.ts index c08454e4a6..d7d0add22a 100644 --- a/apps/webapp/app/presenters/v3/DeploymentListPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/DeploymentListPresenter.server.ts @@ -1,4 +1,4 @@ -import { WorkerDeploymentStatus } from "@trigger.dev/database"; +import { WorkerDeploymentStatus, WorkerInstanceGroupType } from "@trigger.dev/database"; import { sqlDatabaseSchema, PrismaClient, prisma } from "~/db.server"; import { Organization } from "~/models/organization.server"; import { Project } from "~/models/project.server"; @@ -95,29 +95,31 @@ export class DeploymentListPresenter { userName: string | null; userDisplayName: string | null; userAvatarUrl: string | null; + type: WorkerInstanceGroupType; }[] >` - SELECT - wd."id", - wd."shortCode", - wd."version", - (SELECT COUNT(*) FROM ${sqlDatabaseSchema}."BackgroundWorkerTask" WHERE "BackgroundWorkerTask"."workerId" = wd."workerId") AS "tasksCount", - wd."environmentId", - wd."status", - u."id" AS "userId", - u."name" AS "userName", - u."displayName" AS "userDisplayName", - u."avatarUrl" AS "userAvatarUrl", + SELECT + wd."id", + wd."shortCode", + wd."version", + (SELECT COUNT(*) FROM ${sqlDatabaseSchema}."BackgroundWorkerTask" WHERE "BackgroundWorkerTask"."workerId" = wd."workerId") AS "tasksCount", + wd."environmentId", + wd."status", + u."id" AS "userId", + u."name" AS "userName", + u."displayName" AS "userDisplayName", + u."avatarUrl" AS "userAvatarUrl", wd."builtAt", - wd."deployedAt" -FROM + wd."deployedAt", + wd."type" +FROM ${sqlDatabaseSchema}."WorkerDeployment" as wd -INNER JOIN - ${sqlDatabaseSchema}."User" as u ON wd."triggeredById" = u."id" -WHERE +INNER JOIN + ${sqlDatabaseSchema}."User" as u ON wd."triggeredById" = u."id" +WHERE wd."projectId" = ${project.id} -ORDER BY - string_to_array(wd."version", '.')::int[] DESC +ORDER BY + string_to_array(wd."version", '.')::int[] DESC LIMIT ${pageSize} OFFSET ${pageSize * (page - 1)};`; return { @@ -146,6 +148,7 @@ LIMIT ${pageSize} OFFSET ${pageSize * (page - 1)};`; isCurrent: label?.label === "current", isDeployed: deployment.status === "DEPLOYED", isLatest: page === 1 && index === 0, + type: deployment.type, environment: { id: environment.id, type: environment.type, diff --git a/apps/webapp/app/presenters/v3/DeploymentPresenter.server.ts b/apps/webapp/app/presenters/v3/DeploymentPresenter.server.ts index 7891c1191c..5b3ec0e5ce 100644 --- a/apps/webapp/app/presenters/v3/DeploymentPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/DeploymentPresenter.server.ts @@ -68,6 +68,7 @@ export class DeploymentPresenter { imageReference: true, externalBuildData: true, projectId: true, + type: true, environment: { select: { id: true, @@ -152,6 +153,7 @@ export class DeploymentPresenter { organizationId: project.organizationId, errorData: DeploymentPresenter.prepareErrorData(deployment.errorData), isBuilt: !!deployment.builtAt, + type: deployment.type, }, }; } diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index ac1869b90f..f921d89e5b 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -83,6 +83,7 @@ export class SpanPresenter extends BasePresenter { sdkVersion: true, }, }, + engine: true, //status + duration status: true, startedAt: true, @@ -312,6 +313,7 @@ export class SpanPresenter extends BasePresenter { context: JSON.stringify(context, null, 2), metadata, maxDurationInSeconds: getMaxDuration(run.maxDurationInSeconds), + engine: run.engine, }; } diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments.$deploymentParam/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments.$deploymentParam/route.tsx index 58efbcb5b4..2c20d19c24 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments.$deploymentParam/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments.$deploymentParam/route.tsx @@ -30,6 +30,7 @@ import { DeploymentPresenter } from "~/presenters/v3/DeploymentPresenter.server" import { requireUserId } from "~/services/session.server"; import { cn } from "~/utils/cn"; import { v3DeploymentParams, v3DeploymentsPath } from "~/utils/pathBuilder"; +import { capitalizeWord } from "~/utils/string"; export const loader = async ({ request, params }: LoaderFunctionArgs) => { const userId = await requireUserId(request); @@ -151,6 +152,10 @@ export default function Page() { SDK Version {deployment.sdkVersion ? deployment.sdkVersion : "–"} + + Worker type + {capitalizeWord(deployment.type)} + Started at diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx index a73f2c43b1..07add33417 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx @@ -277,7 +277,8 @@ function DeploymentActionsCell({ const location = useLocation(); const project = useProject(); - const canRollback = !deployment.isCurrent && deployment.isDeployed; + const canRollback = + deployment.type === "SHARED" && !deployment.isCurrent && deployment.isDeployed; const canRetryIndexing = deployment.isLatest && deploymentIndexingIsRetryable(deployment); if (!canRollback && !canRetryIndexing) { diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index a871672684..78a84f6bd1 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -615,6 +615,10 @@ function RunBody({ )} + + Engine version + {run.engine} + Test run diff --git a/apps/webapp/app/utils/string.ts b/apps/webapp/app/utils/string.ts new file mode 100644 index 0000000000..d2dfdbb1d6 --- /dev/null +++ b/apps/webapp/app/utils/string.ts @@ -0,0 +1,3 @@ +export function capitalizeWord(word: string) { + return word.charAt(0).toUpperCase() + word.slice(1).toLowerCase(); +} diff --git a/apps/webapp/app/v3/services/rollbackDeployment.server.ts b/apps/webapp/app/v3/services/rollbackDeployment.server.ts index 24f25e69cd..a7c1ffb86e 100644 --- a/apps/webapp/app/v3/services/rollbackDeployment.server.ts +++ b/apps/webapp/app/v3/services/rollbackDeployment.server.ts @@ -1,6 +1,6 @@ import { logger } from "~/services/logger.server"; import { BaseService } from "./baseService.server"; -import { WorkerDeployment } from "@trigger.dev/database"; +import { WorkerDeployment, WorkerInstanceGroupType } from "@trigger.dev/database"; import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; @@ -11,6 +11,14 @@ export class RollbackDeploymentService extends BaseService { return; } + if (deployment.type !== WorkerInstanceGroupType.SHARED) { + logger.error("Can only roll back shared deployments", { + id: deployment.id, + type: deployment.type, + }); + return; + } + const promotion = await this._prisma.workerDeploymentPromotion.findFirst({ where: { deploymentId: deployment.id, From 513ea90bc812f01ab92823aa2e9e638e8cc6efda Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:39:20 +0000 Subject: [PATCH 162/485] update helper import --- apps/webapp/app/v3/services/rescheduleTaskRun.server.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts b/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts index 4d9461d06b..e764a6c459 100644 --- a/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts +++ b/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts @@ -1,9 +1,9 @@ import { TaskRun } from "@trigger.dev/database"; import { BaseService, ServiceValidationError } from "./baseService.server"; import { RescheduleRunRequestBody } from "@trigger.dev/core/v3"; -import { parseDelay } from "./triggerTask.server"; import { $transaction } from "~/db.server"; import { workerQueue } from "~/services/worker.server"; +import { parseDelay } from "~/utils/delays"; export class RescheduleTaskRunService extends BaseService { public async call(taskRun: TaskRun, body: RescheduleRunRequestBody) { From 786b035b49fadf6399361a3e91dc059304837c56 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:49:51 +0000 Subject: [PATCH 163/485] managed worker secret and other auth improvements --- apps/webapp/app/env.server.ts | 1 + .../worker/workerGroupService.server.ts | 13 +- .../worker/workerGroupTokenService.server.ts | 249 +++++++++++++----- .../database/prisma/schema.prisma | 18 +- 4 files changed, 211 insertions(+), 70 deletions(-) diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts index dca87c9299..70deb5129a 100644 --- a/apps/webapp/app/env.server.ts +++ b/apps/webapp/app/env.server.ts @@ -150,6 +150,7 @@ const EnvironmentSchema = z.object({ SHARED_QUEUE_CONSUMER_POOL_SIZE: z.coerce.number().int().default(10), SHARED_QUEUE_CONSUMER_INTERVAL_MS: z.coerce.number().int().default(100), SHARED_QUEUE_CONSUMER_NEXT_TICK_INTERVAL_MS: z.coerce.number().int().default(100), + MANAGED_WORKER_SECRET: z.string().default("managed-secret"), // Development OTEL environment variables DEV_OTEL_EXPORTER_OTLP_ENDPOINT: z.string().optional(), diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index 3416b4cde3..3b1c362889 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -48,15 +48,17 @@ export class WorkerGroupService extends WithRunEngine { }; } + /** + This updates a single worker group. + The name should never be updated. This would mean changing the masterQueue name which can have unexpected consequences. + */ async updateWorkerGroup({ projectId, workerGroupId, - name, description, }: { projectId: string; workerGroupId: string; - name?: string; description?: string; }) { const workerGroup = await this._prisma.workerInstanceGroup.findUnique({ @@ -69,7 +71,6 @@ export class WorkerGroupService extends WithRunEngine { if (!workerGroup) { logger.error("[WorkerGroupService] No worker group found for update", { workerGroupId, - name, description, }); return; @@ -81,11 +82,15 @@ export class WorkerGroupService extends WithRunEngine { }, data: { description, - name, }, }); } + /** + This lists worker groups. + Without a project ID, only shared worker groups will be returned. + With a project ID, in addition to all shared worker groups, ones associated with the project will also be returned. + */ async listWorkerGroups({ projectId }: { projectId?: string }) { const workerGroups = await this._prisma.workerInstanceGroup.findMany({ where: { diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index e00a928b07..ff20c9274a 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -1,12 +1,14 @@ import { customAlphabet } from "nanoid"; import { WithRunEngine, WithRunEngineOptions } from "../baseService.server"; -import { createHash } from "crypto"; +import { createHash, timingSafeEqual } from "crypto"; import { logger } from "~/services/logger.server"; import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/database"; import { z } from "zod"; import { HEADER_NAME } from "@trigger.dev/worker"; -import { DequeuedMessage } from "@internal/run-engine/engine/messages"; -import { TaskRunExecutionResult } from "@trigger.dev/core/v3"; +import { TaskRunExecutionResult, DequeuedMessage } from "@trigger.dev/core/v3"; +import { env } from "~/env.server"; +import { $transaction } from "~/db.server"; +import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; export class WorkerGroupTokenService extends WithRunEngine { private readonly tokenPrefix = "tr_wgt_"; @@ -136,6 +138,38 @@ export class WorkerGroupTokenService extends WithRunEngine { return; } + if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + const managedWorkerSecret = request.headers.get(HEADER_NAME.WORKER_MANAGED_SECRET); + + if (!managedWorkerSecret) { + logger.error("[WorkerGroupTokenService] Managed secret not found in request", { + headers: this.sanitizeHeaders(request), + }); + return; + } + + const encoder = new TextEncoder(); + + const a = encoder.encode(managedWorkerSecret); + const b = encoder.encode(env.MANAGED_WORKER_SECRET); + + if (a.byteLength !== b.byteLength) { + logger.error("[WorkerGroupTokenService] Managed secret length mismatch", { + managedWorkerSecret, + headers: this.sanitizeHeaders(request), + }); + return; + } + + if (!timingSafeEqual(a, b)) { + logger.error("[WorkerGroupTokenService] Managed secret mismatch", { + managedWorkerSecret, + headers: this.sanitizeHeaders(request), + }); + return; + } + } + const workerInstance = await this.getOrCreateWorkerInstance({ workerGroup, instanceName, @@ -207,76 +241,171 @@ export class WorkerGroupTokenService extends WithRunEngine { instanceName: string; deploymentId?: string; }) { - const workerInstance = await this._prisma.workerInstance.findUnique({ - where: { - workerGroupId_name: { - workerGroupId: workerGroup.id, - name: instanceName, - }, - }, - include: { - deployment: true, - }, - }); - - if (workerInstance) { - return workerInstance; - } - - if (workerGroup.type === WorkerInstanceGroupType.SHARED) { - return this._prisma.workerInstance.create({ - data: { - workerGroupId: workerGroup.id, - name: instanceName, + return await $transaction(this._prisma, async (tx) => { + const resourceIdentifier = deploymentId ? `${deploymentId}:${instanceName}` : instanceName; + + const workerInstance = await tx.workerInstance.findUnique({ + where: { + workerGroupId_resourceIdentifier: { + workerGroupId: workerGroup.id, + resourceIdentifier, + }, }, include: { deployment: true, }, }); - } - if (!workerGroup.projectId || !workerGroup.organizationId) { - logger.error( - "[WorkerGroupTokenService] Non-shared worker group missing project or organization", - workerGroup - ); - return; - } + if (workerInstance) { + return workerInstance; + } - // Unmanaged workers instances are locked to a specific deployment version + if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + if (deploymentId) { + logger.warn( + "[WorkerGroupTokenService] Shared worker group instances should not authenticate with a deployment ID", + { + workerGroup, + workerInstance, + deploymentId, + } + ); + } + + return tx.workerInstance.create({ + data: { + workerGroupId: workerGroup.id, + name: instanceName, + resourceIdentifier, + }, + include: { + // This will always be empty for shared worker instances, but required for types + deployment: true, + }, + }); + } - const deployment = await this._prisma.workerDeployment.findUnique({ - where: { - id: deploymentId, - }, - }); + if (!workerGroup.projectId || !workerGroup.organizationId) { + logger.error( + "[WorkerGroupTokenService] Non-shared worker group missing project or organization", + { + workerGroup, + workerInstance, + deploymentId, + } + ); + return; + } - if (!deployment) { - logger.error("[WorkerGroupTokenService] Deployment not found", { deploymentId }); - return; - } + if (!deploymentId) { + logger.error("[WorkerGroupTokenService] Non-shared worker group required deployment ID", { + workerGroup, + workerInstance, + }); + return; + } - if (deployment.projectId !== workerGroup.projectId) { - logger.error("[WorkerGroupTokenService] Deployment does not match worker group project", { - deployment, - workerGroup, + // Unmanaged workers instances are locked to a specific deployment version + + const deployment = await tx.workerDeployment.findUnique({ + where: { + ...(deploymentId.startsWith("deployment_") + ? { + friendlyId: deploymentId, + } + : { + id: deploymentId, + }), + }, }); - return; - } - const nonSharedWorkerInstance = this._prisma.workerInstance.create({ - data: { - workerGroupId: workerGroup.id, - name: instanceName, - environmentId: deployment.environmentId, - deploymentId: deployment.id, - }, - include: { - deployment: true, - }, - }); + if (!deployment) { + logger.error("[WorkerGroupTokenService] Deployment not found", { + workerGroup, + workerInstance, + deploymentId, + }); + return; + } + + if (deployment.projectId !== workerGroup.projectId) { + logger.error("[WorkerGroupTokenService] Deployment does not match worker group project", { + deployment, + workerGroup, + workerInstance, + }); + return; + } + + if (deployment.status === "DEPLOYING") { + // This is the first instance to be created for this deployment, so mark it as deployed + await tx.workerDeployment.update({ + where: { + id: deployment.id, + }, + data: { + status: "DEPLOYED", + deployedAt: new Date(), + }, + }); + + // Check if the deployment should be promoted + const workerPromotion = await tx.workerDeploymentPromotion.findFirst({ + where: { + label: CURRENT_UNMANAGED_DEPLOYMENT_LABEL, + environmentId: deployment.environmentId, + }, + include: { + deployment: true, + }, + }); + + const shouldPromote = + !workerPromotion || deployment.createdAt > workerPromotion.deployment.createdAt; + + if (shouldPromote) { + // Promote the deployment + await tx.workerDeploymentPromotion.upsert({ + where: { + environmentId_label: { + environmentId: deployment.environmentId, + label: CURRENT_UNMANAGED_DEPLOYMENT_LABEL, + }, + }, + create: { + deploymentId: deployment.id, + environmentId: deployment.environmentId, + label: CURRENT_UNMANAGED_DEPLOYMENT_LABEL, + }, + update: { + deploymentId: deployment.id, + }, + }); + } + } else if (deployment.status !== "DEPLOYED") { + logger.error("[WorkerGroupTokenService] Deployment not deploying or deployed", { + deployment, + workerGroup, + workerInstance, + }); + return; + } + + const nonSharedWorkerInstance = tx.workerInstance.create({ + data: { + workerGroupId: workerGroup.id, + name: instanceName, + resourceIdentifier, + environmentId: deployment.environmentId, + deploymentId: deployment.id, + }, + include: { + deployment: true, + }, + }); - return nonSharedWorkerInstance; + return nonSharedWorkerInstance; + }); } private sanitizeHeaders(request: Request, skipHeaders = ["authorization"]) { diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 7843ed4bb1..bc70575945 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -450,7 +450,8 @@ model Project { updatedAt DateTime @updatedAt deletedAt DateTime? - version ProjectVersion @default(V2) + version ProjectVersion @default(V2) + engine RunEngineVersion @default(V1) builderProjectId String? @@ -2086,9 +2087,15 @@ model FeatureFlag { } model WorkerInstance { - id String @id @default(cuid()) + id String @id @default(cuid()) + + /// For example "worker-1" name String + /// If managed, it will default to the name, e.g. "worker-1" + /// If unmanged, it will be prefixed with the deployment ID e.g. "deploy-123-worker-1" + resourceIdentifier String + workerGroup WorkerInstanceGroup @relation(fields: [workerGroupId], references: [id]) workerGroupId String @@ -2112,7 +2119,7 @@ model WorkerInstance { lastDequeueAt DateTime? lastHeartbeatAt DateTime? - @@unique([workerGroupId, name]) + @@unique([workerGroupId, resourceIdentifier]) } enum WorkerInstanceGroupType { @@ -2150,8 +2157,6 @@ model WorkerInstanceGroup { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - - @@map("WorkerGroup") } model WorkerGroupToken { @@ -2613,7 +2618,8 @@ model WorkerDeployment { externalBuildData Json? - status WorkerDeploymentStatus @default(PENDING) + status WorkerDeploymentStatus @default(PENDING) + type WorkerInstanceGroupType @default(SHARED) project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String From c898fb2087ecf829e0cdedc55a8bb03e638085b2 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:50:21 +0000 Subject: [PATCH 164/485] update populate script --- apps/webapp/prisma/populate.ts | 231 +++++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) diff --git a/apps/webapp/prisma/populate.ts b/apps/webapp/prisma/populate.ts index 6b3f277d39..fb31a1e978 100644 --- a/apps/webapp/prisma/populate.ts +++ b/apps/webapp/prisma/populate.ts @@ -4,12 +4,212 @@ // 2. pnpm run db:populate -- --projectRef=proj_liazlkfgmfcusswwgohl --taskIdentifier=child-task --runCount=100000 import { generateFriendlyId } from "~/v3/friendlyIdentifiers"; import { prisma } from "../app/db.server"; +import { createHash } from "crypto"; +import { + BackgroundWorker, + BackgroundWorkerTask, + RuntimeEnvironmentType, + WorkerInstanceGroupType, +} from "@trigger.dev/database"; +import { nanoid } from "nanoid"; async function populate() { if (process.env.NODE_ENV !== "development") { return; } + const project = await getProject(); + + await generateRuns(project); + await createWorkerGroup(project); + const { worker, tasks } = await createBackgroundWorker(project, getEnvTypeFromArg()); + await createWorkerDeployment(project, worker, getEnvTypeFromArg()); +} + +function getEnvironment( + project: ProjectWithEnvironment, + envType: RuntimeEnvironmentType = "PRODUCTION" +) { + const env = project.environments.find((e) => e.type === envType); + + if (!env) { + throw new Error(`No environment of type "${envType}" found for project ${project.id}`); + } + + return env; +} + +async function createWorkerDeployment( + project: ProjectWithEnvironment, + worker: BackgroundWorker, + envType: RuntimeEnvironmentType = "PRODUCTION" +) { + const env = getEnvironment(project, envType); + const deploymentId = `cm3c821sk00032v6is7ufqy3d-${env.slug}`; + + if (env.type === "DEVELOPMENT") { + console.warn("Skipping deployment creation for development environment"); + return; + } + + let deployment = await prisma.workerDeployment.findUnique({ + where: { + id: deploymentId, + }, + }); + + if (deployment) { + console.log(`Deployment "${deploymentId}" already exists`); + return deployment; + } + + const firstOrgMember = project.organization.members[0]; + + deployment = await prisma.workerDeployment.create({ + data: { + id: deploymentId, + friendlyId: generateFriendlyId("deployment"), + contentHash: worker.contentHash, + version: worker.version, + shortCode: nanoid(8), + imageReference: `trigger/${project.externalRef}:${worker.version}.${env.slug}`, + status: "DEPLOYING", + projectId: project.id, + environmentId: env.id, + workerId: worker.id, + triggeredById: firstOrgMember.userId, + }, + }); + + console.log(`Created deployment "${deploymentId}"`); + + return deployment; +} + +async function createBackgroundWorker( + project: ProjectWithEnvironment, + envType: RuntimeEnvironmentType = "PRODUCTION" +) { + const env = getEnvironment(project, envType); + const taskIdentifier = "seed-task"; + const backgroundWorkerId = `cm3c8fmiv00042v6imoqwxst1-${env.slug}`; + + let worker = await prisma.backgroundWorker.findUnique({ + where: { + id: backgroundWorkerId, + }, + include: { + tasks: true, + }, + }); + + if (worker) { + console.log(`Worker "${backgroundWorkerId}" already exists`); + + return { + worker, + tasks: worker.tasks, + }; + } + + worker = await prisma.backgroundWorker.create({ + data: { + id: backgroundWorkerId, + friendlyId: generateFriendlyId("worker"), + contentHash: "hash", + projectId: project.id, + runtimeEnvironmentId: env.id, + version: "20241111.1", + metadata: {}, + }, + include: { + tasks: true, + }, + }); + + console.log(`Created worker "${backgroundWorkerId}"`); + + const taskIdentifiers = Array.isArray(taskIdentifier) ? taskIdentifier : [taskIdentifier]; + + const tasks: BackgroundWorkerTask[] = []; + + for (const identifier of taskIdentifiers) { + const task = await prisma.backgroundWorkerTask.create({ + data: { + friendlyId: generateFriendlyId("task"), + slug: identifier, + filePath: `/trigger/${identifier}.ts`, + exportName: identifier, + workerId: worker.id, + runtimeEnvironmentId: env.id, + projectId: project.id, + }, + }); + + tasks.push(task); + } + + return { + worker, + tasks, + }; +} + +async function createWorkerGroup(project: ProjectWithEnvironment) { + const workerGroupName = "seed-unmanaged"; + const rawToken = "tr_wgt_15480aa1712cae4b8db8c7a49707d69d"; + + const existingWorkerGroup = await prisma.workerInstanceGroup.findFirst({ + where: { + projectId: project.id, + name: workerGroupName, + }, + }); + + if (existingWorkerGroup) { + console.log(`Worker group "${workerGroupName}" already exists`); + + await setAsDefaultWorkerGroup(project, existingWorkerGroup.id); + + return existingWorkerGroup; + } + + const token = await prisma.workerGroupToken.create({ + data: { + tokenHash: createHash("sha256").update(rawToken).digest("hex"), + }, + }); + + const workerGroup = await prisma.workerInstanceGroup.create({ + data: { + projectId: project.id, + organizationId: project.organizationId, + type: WorkerInstanceGroupType.UNMANAGED, + masterQueue: `${project.id}-${workerGroupName}`, + tokenId: token.id, + description: "Seeded worker group", + name: workerGroupName, + }, + }); + + await setAsDefaultWorkerGroup(project, workerGroup.id); + + return workerGroup; +} + +async function setAsDefaultWorkerGroup(project: ProjectWithEnvironment, workerGroupId: string) { + // Set as default worker group + await prisma.project.update({ + where: { + id: project.id, + }, + data: { + defaultWorkerGroupId: workerGroupId, + }, + }); +} + +async function getProject() { const projectRef = getArg("projectRef"); if (!projectRef) { throw new Error("projectRef is required"); @@ -18,15 +218,27 @@ async function populate() { const project = await prisma.project.findUnique({ include: { environments: true, + organization: { + include: { + members: true, + }, + }, }, where: { externalRef: projectRef, }, }); + if (!project) { throw new Error("Project not found"); } + return project; +} + +type ProjectWithEnvironment = Awaited>; + +async function generateRuns(project: ProjectWithEnvironment) { const taskIdentifier = getArg("taskIdentifier"); if (!taskIdentifier) { throw new Error("taskIdentifier is required"); @@ -74,6 +286,25 @@ async function populate() { console.log(`Added ${runs.count} runs`); } +function getEnvTypeFromArg(): RuntimeEnvironmentType { + const env = getArg("env"); + + if (!env) { + return RuntimeEnvironmentType.PRODUCTION; + } + + switch (env) { + case "dev": + return RuntimeEnvironmentType.DEVELOPMENT; + case "prod": + return RuntimeEnvironmentType.PRODUCTION; + case "stg": + return RuntimeEnvironmentType.STAGING; + default: + throw new Error(`Invalid environment: ${env}`); + } +} + function getArg(name: string) { const args = process.argv.slice(2); From 0237752758bf844ebd070e9e01f1a341852fd6b7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:50:34 +0000 Subject: [PATCH 165/485] fix webapp builds --- apps/webapp/remix.config.js | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/webapp/remix.config.js b/apps/webapp/remix.config.js index d2417a3eb5..32af713449 100644 --- a/apps/webapp/remix.config.js +++ b/apps/webapp/remix.config.js @@ -11,6 +11,7 @@ module.exports = { /^remix-utils.*/, "marked", "axios", + "@internal/redis-worker", "@trigger.dev/core", "@trigger.dev/sdk", "@trigger.dev/platform", From 9fa3c9818df3c21dafe2d2d5777ec96a6e96ab7e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:50:56 +0000 Subject: [PATCH 166/485] update lockfile --- pnpm-lock.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 8601d37634..686ec002d1 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1148,6 +1148,9 @@ importers: '@trigger.dev/core': specifier: workspace:3.1.2 version: link:../core + '@trigger.dev/worker': + specifier: workspace:3.1.2 + version: link:../worker c12: specifier: ^1.11.1 version: 1.11.1(magicast@0.3.4) From 017d8cfd4206321107fbbcaa3c9f49202e1f4579 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:51:56 +0000 Subject: [PATCH 167/485] add worker socket --- apps/webapp/app/v3/handleSocketIo.server.ts | 70 ++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/handleSocketIo.server.ts b/apps/webapp/app/v3/handleSocketIo.server.ts index bbad9e3264..b041def01a 100644 --- a/apps/webapp/app/v3/handleSocketIo.server.ts +++ b/apps/webapp/app/v3/handleSocketIo.server.ts @@ -8,7 +8,7 @@ import { SharedQueueToClientMessages, } from "@trigger.dev/core/v3"; import { ZodNamespace } from "@trigger.dev/core/v3/zodNamespace"; -import { Server } from "socket.io"; +import { Server, Socket } from "socket.io"; import { env } from "~/env.server"; import { singleton } from "~/utils/singleton"; import { SharedSocketConnection } from "./sharedSocketConnection"; @@ -25,6 +25,7 @@ import { createAdapter } from "@socket.io/redis-adapter"; import { CrashTaskRunService } from "./services/crashTaskRun.server"; import { CreateTaskRunAttemptService } from "./services/createTaskRunAttempt.server"; import { UpdateFatalRunErrorService } from "./services/updateFatalRunError.server"; +import { WorkerGroupTokenService } from "./services/worker/workerGroupTokenService.server"; export const socketIo = singleton("socketIo", initalizeIoServer); @@ -38,12 +39,14 @@ function initalizeIoServer() { const coordinatorNamespace = createCoordinatorNamespace(io); const providerNamespace = createProviderNamespace(io); const sharedQueueConsumerNamespace = createSharedQueueConsumerNamespace(io); + const workerNamespace = createWorkerNamespace(io); return { io, coordinatorNamespace, providerNamespace, sharedQueueConsumerNamespace, + workerNamespace, }; } @@ -366,3 +369,68 @@ function createSharedQueueConsumerNamespace(io: Server) { return sharedQueue.namespace; } + +function headersFromHandshake(handshake: Socket["handshake"]) { + const headers = new Headers(); + + for (const [key, value] of Object.entries(handshake.headers)) { + if (typeof value !== "string") continue; + headers.append(key, value); + } + + return headers; +} + +function createWorkerNamespace(io: Server) { + const provider = new ZodNamespace({ + // @ts-ignore - for some reason the built ZodNamespace Server type is not compatible with the Server type here, but only when doing typechecking + io, + name: "worker", + clientMessages: ProviderToPlatformMessages, + serverMessages: PlatformToProviderMessages, + preAuth: async (socket, next, logger) => { + const request = new Request("https://example.com", { + headers: headersFromHandshake(socket.handshake), + }); + + const tokenService = new WorkerGroupTokenService(); + const authenticatedInstance = await tokenService.authenticate(request); + + if (!authenticatedInstance) { + logger.error("authentication failed", { handshake: socket.handshake }); + next(new Error("unauthorized")); + return; + } + + logger.debug("authentication succeeded", { authenticatedInstance }); + + next(); + }, + handlers: { + WORKER_CRASHED: async (message) => { + try { + if (message.overrideCompletion) { + const updateErrorService = new UpdateFatalRunErrorService(); + await updateErrorService.call(message.runId, { ...message }); + } else { + const crashRunService = new CrashTaskRunService(); + await crashRunService.call(message.runId, { ...message }); + } + } catch (error) { + logger.error("Error while handling crashed worker", { error }); + } + }, + INDEXING_FAILED: async (message) => { + try { + const service = new DeploymentIndexFailed(); + + await service.call(message.deploymentId, message.error, message.overrideCompletion); + } catch (e) { + logger.error("Error while indexing", { error: e }); + } + }, + }, + }); + + return provider.namespace; +} From ece3303d3a4aafad750665d8c055bcc814016960 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:53:23 +0000 Subject: [PATCH 168/485] deployment service updates --- ...createDeploymentBackgroundWorker.server.ts | 2 +- .../services/initializeDeployment.server.ts | 45 ++++++++++++++++--- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts index cea395510c..7ace78ae86 100644 --- a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts @@ -97,7 +97,7 @@ export class CreateDeploymentBackgroundWorkerService extends BaseService { data: { status: "DEPLOYING", workerId: backgroundWorker.id, - deployedAt: new Date(), + builtAt: new Date(), }, }); diff --git a/apps/webapp/app/v3/services/initializeDeployment.server.ts b/apps/webapp/app/v3/services/initializeDeployment.server.ts index e711813692..da0e1c68ca 100644 --- a/apps/webapp/app/v3/services/initializeDeployment.server.ts +++ b/apps/webapp/app/v3/services/initializeDeployment.server.ts @@ -7,6 +7,8 @@ import { calculateNextBuildVersion } from "../utils/calculateNextBuildVersion"; import { BaseService } from "./baseService.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; import { env } from "~/env.server"; +import { WorkerInstanceGroupType } from "@trigger.dev/database"; +import { logger } from "~/services/logger.server"; const nanoid = customAlphabet("1234567890abcdefghijklmnopqrstuvwxyz", 8); @@ -46,6 +48,38 @@ export class InitializeDeploymentService extends BaseService { }) : undefined; + const sharedImageTag = `${payload.namespace ?? env.DEPLOY_REGISTRY_NAMESPACE}/${ + environment.project.externalRef + }:${nextVersion}.${environment.slug}`; + + const unmanagedImageParts = []; + + if (payload.registryHost) { + unmanagedImageParts.push(payload.registryHost); + } + if (payload.namespace) { + unmanagedImageParts.push(payload.namespace); + } + unmanagedImageParts.push( + `${environment.project.externalRef}:${nextVersion}.${environment.slug}` + ); + + const unmanagedImageTag = unmanagedImageParts.join("/"); + + const defaultType = WorkerInstanceGroupType.SHARED; + const deploymentType = payload.type ?? defaultType; + const isShared = deploymentType === WorkerInstanceGroupType.SHARED; + + logger.debug("Creating deployment", { + environmentId: environment.id, + projectId: environment.projectId, + version: nextVersion, + triggeredById: triggeredBy?.id, + type: deploymentType, + imageTag: isShared ? sharedImageTag : unmanagedImageTag, + imageReference: isShared ? undefined : unmanagedImageTag, + }); + const deployment = await this._prisma.workerDeployment.create({ data: { friendlyId: generateFriendlyId("deployment"), @@ -57,6 +91,8 @@ export class InitializeDeploymentService extends BaseService { projectId: environment.projectId, externalBuildData, triggeredById: triggeredBy?.id, + type: deploymentType, + imageReference: isShared ? undefined : unmanagedImageTag, }, }); @@ -67,11 +103,10 @@ export class InitializeDeploymentService extends BaseService { new Date(Date.now() + 180_000) // 3 minutes ); - const imageTag = `${payload.namespace ?? env.DEPLOY_REGISTRY_NAMESPACE}/${ - environment.project.externalRef - }:${deployment.version}.${environment.slug}`; - - return { deployment, imageTag }; + return { + deployment, + imageTag: isShared ? sharedImageTag : unmanagedImageTag, + }; }); } } From 2ebdbf2180c698aae66260ccc3682b24893fc977 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:53:49 +0000 Subject: [PATCH 169/485] use new trigger service in tests --- apps/webapp/test/workerGroup.test.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts index ced58be84d..5aad0ad538 100644 --- a/apps/webapp/test/workerGroup.test.ts +++ b/apps/webapp/test/workerGroup.test.ts @@ -11,13 +11,14 @@ import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.serv import { PrismaClient, PrismaClientOrTransaction, + RunEngineVersion, TaskRunStatus, WorkerInstanceGroupType, } from "@trigger.dev/database"; import { HEADER_NAME } from "@trigger.dev/worker"; import { RunEngine } from "@internal/run-engine"; import { trace } from "@opentelemetry/api"; -import { TriggerTaskServiceV2 } from "~/v3/services/triggerTaskV2.server"; +import { TriggerTaskService } from "~/v3/services/triggerTask.server"; describe("worker", () => { const defaultInstanceName = "test_worker"; @@ -109,7 +110,7 @@ describe("worker", () => { assert(deployment, "deployment should be defined"); const engine = setupRunEngine(prisma, redisContainer); - const triggerService = new TriggerTaskServiceV2({ prisma, engine }); + const triggerService = new TriggerTaskService({ prisma, engine }); const { token, workerGroupService, workerGroup } = await setupWorkerGroup({ prisma, @@ -143,13 +144,12 @@ describe("worker", () => { ); // Trigger - const run = await triggerService.call({ - environment: authenticatedEnvironment, - taskId: taskIdentifier, - body: {}, - }); + const run = await triggerService.call(taskIdentifier, authenticatedEnvironment, {}); assert(run, "run should be defined"); + // Check this is a V2 run + expect(run.engine).toBe(RunEngineVersion.V2); + const queueLengthBefore = await engine.runQueue.lengthOfQueue( authenticatedEnvironment, run.queue From da5226ae794ff5d8c7af2a7360817b1592a029e7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:54:25 +0000 Subject: [PATCH 170/485] correctly lock to unmanaged background worker --- .../worker/workerGroupTokenService.server.ts | 3 +- .../run-engine/src/engine/db/worker.ts | 40 +++++++++++++++++-- .../run-engine/src/engine/index.ts | 27 ++++++++++++- 3 files changed, 64 insertions(+), 6 deletions(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index ff20c9274a..c4791a7f1d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -488,12 +488,13 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { consumerId: this.workerInstanceId, environmentId: this.environmentId, maxRunCount, + backgroundWorkerId: this.backgroundWorkerId, }); } return await this._engine.dequeueFromBackgroundWorkerMasterQueue({ consumerId: this.workerInstanceId, - backgroundWorkerId: this.deploymentId, + backgroundWorkerId: this.backgroundWorkerId, maxRunCount, }); } diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts index f3bdb40e59..046779251a 100644 --- a/internal-packages/run-engine/src/engine/db/worker.ts +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -26,10 +26,24 @@ type RunWithBackgroundWorkerTasksResult = } | { success: false; - code: "NO_WORKER" | "TASK_NOT_IN_LATEST" | "TASK_NEVER_REGISTERED"; + code: + | "NO_WORKER" + | "TASK_NOT_IN_LATEST" + | "TASK_NEVER_REGISTERED" + | "BACKGROUND_WORKER_MISMATCH"; message: string; run: RunWithMininimalEnvironment; } + | { + success: false; + code: "BACKGROUND_WORKER_MISMATCH"; + message: string; + backgroundWorker: { + expected: string; + received: string; + }; + run: RunWithMininimalEnvironment; + } | { success: true; run: RunWithMininimalEnvironment; @@ -40,7 +54,8 @@ type RunWithBackgroundWorkerTasksResult = export async function getRunWithBackgroundWorkerTasks( prisma: PrismaClientOrTransaction, - runId: string + runId: string, + backgroundWorkerId?: string ): Promise { const run = await prisma.taskRun.findFirst({ where: { @@ -70,9 +85,11 @@ export async function getRunWithBackgroundWorkerTasks( }; } + const workerId = run.lockedToVersionId ?? backgroundWorkerId; + //get the relevant BackgroundWorker with tasks and deployment (if not DEV) - const workerWithTasks = run.lockedToVersionId - ? await getWorkerDeploymentFromWorker(prisma, run.lockedToVersionId) + const workerWithTasks = workerId + ? await getWorkerDeploymentFromWorker(prisma, workerId) : run.runtimeEnvironment.type === "DEVELOPMENT" ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); @@ -86,6 +103,21 @@ export async function getRunWithBackgroundWorkerTasks( }; } + if (backgroundWorkerId) { + if (backgroundWorkerId !== workerWithTasks.worker.id) { + return { + success: false as const, + code: "BACKGROUND_WORKER_MISMATCH", + message: `Background worker mismatch for run: ${run.id}`, + backgroundWorker: { + expected: backgroundWorkerId, + received: workerWithTasks.worker.id, + }, + run, + }; + } + } + const backgroundTask = workerWithTasks.tasks.find((task) => task.slug === run.taskIdentifier); if (!backgroundTask) { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 78a8de0d47..0b053a91a0 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -51,6 +51,7 @@ import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; +import { assertExhaustive } from "@trigger.dev/core"; type Options = { redis: RedisOptions; @@ -490,12 +491,14 @@ export class RunEngine { masterQueue, maxRunCount, maxResources, + backgroundWorkerId, tx, }: { consumerId: string; masterQueue: string; maxRunCount: number; maxResources?: MachineResources; + backgroundWorkerId?: string; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; @@ -558,7 +561,7 @@ export class RunEngine { return null; } - const result = await getRunWithBackgroundWorkerTasks(prisma, runId); + const result = await getRunWithBackgroundWorkerTasks(prisma, runId, backgroundWorkerId); if (!result.success) { switch (result.code) { @@ -590,6 +593,24 @@ export class RunEngine { return null; } + case "BACKGROUND_WORKER_MISMATCH": { + this.logger.warn( + "RunEngine.dequeueFromMasterQueue(): Background worker mismatch", + { + runId, + latestSnapshot: snapshot.id, + result, + } + ); + + //worker mismatch so put it back in the queue + await this.runQueue.nackMessage(orgId, runId); + + return null; + } + default: { + assertExhaustive(result); + } } } @@ -797,12 +818,14 @@ export class RunEngine { environmentId, maxRunCount, maxResources, + backgroundWorkerId, tx, }: { consumerId: string; environmentId: string; maxRunCount: number; maxResources?: MachineResources; + backgroundWorkerId: string; tx?: PrismaClientOrTransaction; }) { return this.dequeueFromMasterQueue({ @@ -810,6 +833,7 @@ export class RunEngine { masterQueue: this.#environmentMasterQueueKey(environmentId), maxRunCount, maxResources, + backgroundWorkerId, tx, }); } @@ -832,6 +856,7 @@ export class RunEngine { masterQueue: this.#backgroundWorkerQueueKey(backgroundWorkerId), maxRunCount, maxResources, + backgroundWorkerId, tx, }); } From 232475ea0be3f63d9183fad6bd86ed5abbe44ef9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:59:48 +0000 Subject: [PATCH 171/485] fix worker http client --- packages/worker/src/client/http.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts index b87a35610c..74bd6f39af 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/client/http.ts @@ -24,8 +24,6 @@ export class WorkerHttpClient { this.apiURL = opts.apiUrl.replace(/\/$/, ""); this.workerToken = opts.workerToken; this.instanceName = opts.instanceName; - this.deploymentId = opts.deploymentId; - this.managedWorkerSecret = opts.managedWorkerSecret; this.defaultHeaders = getDefaultHeaders(opts); if (!this.apiURL) { From 2cbcbdb89e6a048013a140f164c769ad0bea5ab7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:43:03 +0000 Subject: [PATCH 172/485] compare core schema enums against db types --- packages/core/package.json | 1 + packages/core/src/v3/schemas/runEngine.ts | 9 ++++++--- pnpm-lock.yaml | 3 +++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/packages/core/package.json b/packages/core/package.json index 570b14f824..a0265644cf 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -209,6 +209,7 @@ }, "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", + "@trigger.dev/database": "workspace:*", "@types/humanize-duration": "^3.27.1", "@types/node": "20.14.14", "@types/readable-stream": "^4.0.14", diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 2ddbd9ee74..76dda779ff 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -1,6 +1,9 @@ import { z } from "zod"; import { MachinePreset } from "./common.js"; import { EnvironmentType } from "./schemas.js"; +import type * as DB_TYPES from "@trigger.dev/database"; + +type Enum = { [K in T]: K }; export const TaskRunExecutionStatus = { RUN_CREATED: "RUN_CREATED", @@ -11,7 +14,7 @@ export const TaskRunExecutionStatus = { BLOCKED_BY_WAITPOINTS: "BLOCKED_BY_WAITPOINTS", PENDING_CANCEL: "PENDING_CANCEL", FINISHED: "FINISHED", -} as const; +} satisfies Enum; export type TaskRunExecutionStatus = (typeof TaskRunExecutionStatus)[keyof typeof TaskRunExecutionStatus]; @@ -32,7 +35,7 @@ export const TaskRunStatus = { CRASHED: "CRASHED", EXPIRED: "EXPIRED", TIMED_OUT: "TIMED_OUT", -} as const; +} satisfies Enum; export type TaskRunStatus = (typeof TaskRunStatus)[keyof typeof TaskRunStatus]; @@ -40,7 +43,7 @@ export const WaitpointType = { RUN: "RUN", DATETIME: "DATETIME", MANUAL: "MANUAL", -} as const; +} satisfies Enum; export type WaitpointType = (typeof WaitpointType)[keyof typeof WaitpointType]; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 686ec002d1..f12de8d925 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1390,6 +1390,9 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 + '@trigger.dev/database': + specifier: workspace:^ + version: link:../../internal-packages/database '@types/humanize-duration': specifier: ^3.27.1 version: 3.27.1 From 6323e7bb4e9926aab96c935c4ed2b2da08a1d488 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 12 Nov 2024 16:26:29 +0000 Subject: [PATCH 173/485] Added jsdocs to the redis worker enqueue function --- internal-packages/redis-worker/src/worker.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index dcfc62b01b..04cc9c64b5 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -96,6 +96,16 @@ class Worker { this.setupSubscriber(); } + /** + * Enqueues a job for processing. + * @param options - The enqueue options. + * @param options.id - Optional unique identifier for the job. If not provided, one will be generated. It prevents duplication. + * @param options.job - The job type from the worker catalog. + * @param options.payload - The job payload that matches the schema defined in the catalog. + * @param options.visibilityTimeoutMs - Optional visibility timeout in milliseconds. Defaults to value from catalog. + * @param options.availableAt - Optional date when the job should become available for processing. Defaults to now. + * @returns A promise that resolves when the job is enqueued. + */ enqueue({ id, job, From 6625f6975e6e674adcc0c3e944c925c042bc5589 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 12 Nov 2024 19:01:23 +0000 Subject: [PATCH 174/485] =?UTF-8?q?Deal=20with=20runs=20being=20dequeued?= =?UTF-8?q?=20where=20there=E2=80=99s=20no=20background=20worker,=20with?= =?UTF-8?q?=20a=20test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../database/prisma/schema.prisma | 1 + .../run-engine/src/engine/index.ts | 139 ++++++++++++++-- .../src/engine/tests/notDeployed.test.ts | 152 ++++++++++++++++++ 3 files changed, 281 insertions(+), 11 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/tests/notDeployed.test.ts diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index bc70575945..5d93d3a107 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2460,6 +2460,7 @@ model BatchTaskRun { runtimeEnvironment RuntimeEnvironment @relation(fields: [runtimeEnvironmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) runtimeEnvironmentId String + /// This isn't used with the new RunEngine dependentTaskAttempt TaskRunAttempt? @relation(fields: [dependentTaskAttemptId], references: [id], onDelete: Cascade, onUpdate: Cascade) dependentTaskAttemptId String? diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 0b053a91a0..80c98918db 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -70,6 +70,7 @@ type Options = { /** If not set then checkpoints won't ever be used */ retryWarmStartThresholdMs?: number; heartbeatTimeoutsMs?: Partial; + queueRunsWaitingForWorkerBatchSize?: number; tracer: Tracer; }; @@ -149,6 +150,12 @@ const workerCatalog = { }), visibilityTimeoutMs: 5000, }, + queueRunsWaitingForWorker: { + schema: z.object({ + backgroundWorkerId: z.string(), + }), + visibilityTimeoutMs: 5000, + }, }; type EngineWorker = Worker; @@ -214,6 +221,9 @@ export class RunEngine { reason: payload.reason, }); }, + queueRunsWaitingForWorker: async ({ payload }) => { + await this.#queueRunsWaitingForWorker({ backgroundWorkerId: payload.backgroundWorkerId }); + }, }, }); @@ -475,11 +485,6 @@ export class RunEngine { ); } - /** Triggers multiple runs. - * This doesn't start execution, but it will create a batch and schedule them for execution. - */ - async batchTrigger() {} - /** * Gets a fairly selected run from the specified master queue, returning the information required to run it. * @param consumerId: The consumer that is pulling, allows multiple consumers to pull from the same queue @@ -585,12 +590,10 @@ export class RunEngine { //not deployed yet, so we'll wait for the deploy await this.#waitingForDeploy({ + orgId, runId, tx: prisma, }); - //we ack because when it's deployed it will be requeued - await this.runQueue.acknowledgeMessage(orgId, runId); - return null; } case "BACKGROUND_WORKER_MISMATCH": { @@ -624,11 +627,11 @@ export class RunEngine { }); //not deployed yet, so we'll wait for the deploy await this.#waitingForDeploy({ + orgId, runId, tx: prisma, }); - //we ack because when it's deployed it will be requeued - await this.runQueue.acknowledgeMessage(orgId, runId); + return null; } } @@ -1423,6 +1426,14 @@ export class RunEngine { }); } + async queueRunsWaitingForWorker({ backgroundWorkerId }: { backgroundWorkerId: string }) { + //we want this to happen in the background + await this.worker.enqueue({ + job: "queueRunsWaitingForWorker", + payload: { backgroundWorkerId }, + }); + } + /** * Reschedules a delayed run where the run hasn't been queued yet */ @@ -1897,7 +1908,41 @@ export class RunEngine { tx?: PrismaClientOrTransaction; }) {} - async #waitingForDeploy({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) {} + async #waitingForDeploy({ + orgId, + runId, + tx, + }: { + orgId: string; + runId: string; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + return this.#trace("#waitingForDeploy", { runId }, async (span) => { + return this.runLock.lock([runId], 5_000, async (signal) => { + //mark run as waiting for deploy + const run = await prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "WAITING_FOR_DEPLOY", + }, + }); + + await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "RUN_CREATED", + description: + "The run doesn't have a background worker, so we're going to ack it for now.", + }, + }); + + //we ack because when it's deployed it will be requeued + await this.runQueue.acknowledgeMessage(orgId, runId); + }); + }); + } async #attemptSucceeded({ runId, @@ -2304,6 +2349,78 @@ export class RunEngine { }); } + async #queueRunsWaitingForWorker({ backgroundWorkerId }: { backgroundWorkerId: string }) { + //It could be a lot of runs, so we will process them in a batch + //if there are still more to process we will enqueue this function again + const maxCount = this.options.queueRunsWaitingForWorkerBatchSize ?? 200; + + const backgroundWorker = await this.prisma.backgroundWorker.findFirst({ + where: { + id: backgroundWorkerId, + }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + }, + }, + tasks: true, + }, + }); + + if (!backgroundWorker) { + this.logger.error("#queueRunsWaitingForWorker: background worker not found", { + id: backgroundWorkerId, + }); + return; + } + + const runsWaitingForDeploy = await this.prisma.taskRun.findMany({ + where: { + runtimeEnvironmentId: backgroundWorker.runtimeEnvironmentId, + projectId: backgroundWorker.projectId, + status: "WAITING_FOR_DEPLOY", + taskIdentifier: { + in: backgroundWorker.tasks.map((task) => task.slug), + }, + }, + orderBy: { + createdAt: "asc", + }, + take: maxCount + 1, + }); + + //none to process + if (!runsWaitingForDeploy.length) return; + + for (const run of runsWaitingForDeploy) { + await this.prisma.$transaction(async (tx) => { + const updatedRun = await tx.taskRun.update({ + where: { + id: run.id, + }, + data: { + status: "PENDING", + }, + }); + await this.#enqueueRun({ + run: updatedRun, + env: backgroundWorker.runtimeEnvironment, + //add to the queue using the original run created time + //this should ensure they're in the correct order in the queue + timestamp: updatedRun.createdAt.getTime(), + tx, + }); + }); + } + + //enqueue more if needed + if (runsWaitingForDeploy.length > maxCount) { + await this.queueRunsWaitingForWorker({ backgroundWorkerId }); + } + } + //MARK: - Waitpoints async #createRunAssociatedWaitpoint( tx: PrismaClientOrTransaction, diff --git a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts new file mode 100644 index 0000000000..b748475c51 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts @@ -0,0 +1,152 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, + assertNonNullable, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "timers/promises"; + +describe("RunEngine not deployed", () => { + containerTest("Not yet deployed", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + //set this so we have to requeue the runs in two batches + queueRunsWaitingForWorkerBatchSize: 1, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //trigger another run + const run2 = await engine.trigger( + { + number: 2, + friendlyId: "run_1235", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12346", + spanId: "s12346", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //should be queued + const executionDataR1 = await engine.getRunExecutionData({ runId: run.id }); + const executionDataR2 = await engine.getRunExecutionData({ runId: run2.id }); + assertNonNullable(executionDataR1); + assertNonNullable(executionDataR2); + expect(executionDataR1.snapshot.executionStatus).toBe("QUEUED"); + expect(executionDataR2.snapshot.executionStatus).toBe("QUEUED"); + + //dequeuing should fail + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued.length).toBe(0); + + //queue should be empty + const queueLength = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength).toBe(0); + + //check the execution data now + const executionData2R1 = await engine.getRunExecutionData({ runId: run.id }); + const executionData2R2 = await engine.getRunExecutionData({ runId: run2.id }); + assertNonNullable(executionData2R1); + assertNonNullable(executionData2R2); + expect(executionData2R1.snapshot.executionStatus).toBe("RUN_CREATED"); + expect(executionData2R2.snapshot.executionStatus).toBe("RUN_CREATED"); + expect(executionData2R1.run.status).toBe("WAITING_FOR_DEPLOY"); + expect(executionData2R2.run.status).toBe("WAITING_FOR_DEPLOY"); + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //now we deploy the background worker + await engine.queueRunsWaitingForWorker({ backgroundWorkerId: backgroundWorker.worker.id }); + + //it's async so we wait + await setTimeout(500); + + //should now be queued + const executionData3R1 = await engine.getRunExecutionData({ runId: run.id }); + const executionData3R2 = await engine.getRunExecutionData({ runId: run2.id }); + assertNonNullable(executionData3R1); + assertNonNullable(executionData3R2); + expect(executionData3R1.snapshot.executionStatus).toBe("QUEUED"); + expect(executionData3R2.snapshot.executionStatus).toBe("QUEUED"); + expect(executionData3R1.run.status).toBe("PENDING"); + expect(executionData3R2.run.status).toBe("PENDING"); + + //queue should be empty + const queueLength2 = await engine.runQueue.lengthOfQueue(authenticatedEnvironment, run.queue); + expect(queueLength2).toBe(2); + } finally { + engine.quit(); + } + }); +}); From 85f3dab7038390c19249e86673162d86857d8c7e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 12:10:46 +0000 Subject: [PATCH 175/485] Added explicit return types to the other dequeue functions --- internal-packages/run-engine/src/engine/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 80c98918db..b247cb6923 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -830,7 +830,7 @@ export class RunEngine { maxResources?: MachineResources; backgroundWorkerId: string; tx?: PrismaClientOrTransaction; - }) { + }): Promise { return this.dequeueFromMasterQueue({ consumerId, masterQueue: this.#environmentMasterQueueKey(environmentId), @@ -853,7 +853,7 @@ export class RunEngine { maxRunCount: number; maxResources?: MachineResources; tx?: PrismaClientOrTransaction; - }) { + }): Promise { return this.dequeueFromMasterQueue({ consumerId, masterQueue: this.#backgroundWorkerQueueKey(backgroundWorkerId), From de9dd66f0317c409ecc48f632b946823e0b5424d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 12:59:59 +0000 Subject: [PATCH 176/485] BatchTaskRun taskidentifier optional, updated unique constraint --- .../v3/services/batchTriggerTask.server.ts | 3 +-- .../database/prisma/schema.prisma | 20 ++++++++++++------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/apps/webapp/app/v3/services/batchTriggerTask.server.ts b/apps/webapp/app/v3/services/batchTriggerTask.server.ts index fc5874bd4d..5dc1742e69 100644 --- a/apps/webapp/app/v3/services/batchTriggerTask.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerTask.server.ts @@ -26,10 +26,9 @@ export class BatchTriggerTaskService extends BaseService { const existingBatch = options.idempotencyKey ? await this._prisma.batchTaskRun.findUnique({ where: { - runtimeEnvironmentId_taskIdentifier_idempotencyKey: { + runtimeEnvironmentId_idempotencyKey: { runtimeEnvironmentId: environment.id, idempotencyKey: options.idempotencyKey, - taskIdentifier: taskId, }, }, include: { diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 5d93d3a107..1364829d57 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2452,26 +2452,32 @@ model BatchTaskRun { status BatchTaskRunStatus @default(PENDING) idempotencyKey String? - taskIdentifier String + //engine v1 only + taskIdentifier String? + + //engine v1 only checkpointEvent CheckpointRestoreEvent? @relation(fields: [checkpointEventId], references: [id], onDelete: Cascade, onUpdate: Cascade) checkpointEventId String? @unique runtimeEnvironment RuntimeEnvironment @relation(fields: [runtimeEnvironmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) runtimeEnvironmentId String - /// This isn't used with the new RunEngine + //engine v1 only dependentTaskAttempt TaskRunAttempt? @relation(fields: [dependentTaskAttemptId], references: [id], onDelete: Cascade, onUpdate: Cascade) dependentTaskAttemptId String? - items BatchTaskRunItem[] + items BatchTaskRunItem[] + + //engine v1 only runDependencies TaskRunDependency[] @relation("dependentBatchRun") - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - TaskRun TaskRun[] + runs TaskRun[] - @@unique([runtimeEnvironmentId, taskIdentifier, idempotencyKey]) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@unique([runtimeEnvironmentId, idempotencyKey]) } enum BatchTaskRunStatus { From e4cb7fd6bdbe285bf52b6ff633e9fe14d5e4fd3a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 13:00:19 +0000 Subject: [PATCH 177/485] DB migration for BatchTaskRun and worker changes --- .../migration.sql | 94 +++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql b/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql new file mode 100644 index 0000000000..46551cc7f9 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql @@ -0,0 +1,94 @@ +/* + Warnings: + + - You are about to drop the `WorkerGroup` table. If the table is not empty, all the data it contains will be lost. + - A unique constraint covering the columns `[runtimeEnvironmentId,idempotencyKey]` on the table `BatchTaskRun` will be added. If there are existing duplicate values, this will fail. + - A unique constraint covering the columns `[workerGroupId,resourceIdentifier]` on the table `WorkerInstance` will be added. If there are existing duplicate values, this will fail. + - Added the required column `resourceIdentifier` to the `WorkerInstance` table without a default value. This is not possible if the table is not empty. + +*/ +-- DropForeignKey +ALTER TABLE "BackgroundWorker" DROP CONSTRAINT "BackgroundWorker_workerGroupId_fkey"; + +-- DropForeignKey +ALTER TABLE "Project" DROP CONSTRAINT "Project_defaultWorkerGroupId_fkey"; + +-- DropForeignKey +ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_organizationId_fkey"; + +-- DropForeignKey +ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_projectId_fkey"; + +-- DropForeignKey +ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_tokenId_fkey"; + +-- DropForeignKey +ALTER TABLE "WorkerInstance" DROP CONSTRAINT "WorkerInstance_workerGroupId_fkey"; + +-- DropIndex +DROP INDEX "BatchTaskRun_runtimeEnvironmentId_taskIdentifier_idempotenc_key"; + +-- DropIndex +DROP INDEX "WorkerInstance_workerGroupId_name_key"; + +-- AlterTable +ALTER TABLE "BatchTaskRun" ALTER COLUMN "taskIdentifier" DROP NOT NULL; + +-- AlterTable +ALTER TABLE "Project" ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1'; + +-- AlterTable +ALTER TABLE "WorkerDeployment" ADD COLUMN "type" "WorkerInstanceGroupType" NOT NULL DEFAULT 'SHARED'; + +-- AlterTable +ALTER TABLE "WorkerInstance" ADD COLUMN "resourceIdentifier" TEXT NOT NULL; + +-- DropTable +DROP TABLE "WorkerGroup"; + +-- CreateTable +CREATE TABLE "WorkerInstanceGroup" ( + "id" TEXT NOT NULL, + "type" "WorkerInstanceGroupType" NOT NULL, + "name" TEXT NOT NULL, + "masterQueue" TEXT NOT NULL, + "description" TEXT, + "hidden" BOOLEAN NOT NULL DEFAULT false, + "tokenId" TEXT NOT NULL, + "organizationId" TEXT, + "projectId" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "WorkerInstanceGroup_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstanceGroup_masterQueue_key" ON "WorkerInstanceGroup"("masterQueue"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstanceGroup_tokenId_key" ON "WorkerInstanceGroup"("tokenId"); + +-- CreateIndex +CREATE UNIQUE INDEX "BatchTaskRun_runtimeEnvironmentId_idempotencyKey_key" ON "BatchTaskRun"("runtimeEnvironmentId", "idempotencyKey"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_resourceIdentifier_key" ON "WorkerInstance"("workerGroupId", "resourceIdentifier"); + +-- AddForeignKey +ALTER TABLE "Project" ADD CONSTRAINT "Project_defaultWorkerGroupId_fkey" FOREIGN KEY ("defaultWorkerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "BackgroundWorker" ADD CONSTRAINT "BackgroundWorker_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_tokenId_fkey" FOREIGN KEY ("tokenId") REFERENCES "WorkerGroupToken"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; From 7f7644fdedbee1283854baf68ac70c514a37fda2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:32:52 +0000 Subject: [PATCH 178/485] Added a todo to triggerTask --- apps/webapp/app/v3/services/triggerTask.server.ts | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 76b45d597b..9102143c20 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -31,18 +31,23 @@ export class TriggerTaskService extends WithRunEngine { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); + //todo we need to determine the version using the BackgroundWorker + //- triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + //- No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + // const workerWithTasks = workerId + // ? await getWorkerDeploymentFromWorker(prisma, workerId) + // : run.runtimeEnvironment.type === "DEVELOPMENT" + // ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) + // : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); + if (environment.project.engine === RunEngineVersion.V1) { return await this.callV1(taskId, environment, body, options); } - // The project is using the new Run Engine - if (environment.type === RuntimeEnvironmentType.DEVELOPMENT) { return await this.callV1(taskId, environment, body, options); } - // The environment is not development, so we need to use the new Run Engine - //todo Additional checks /* - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. From 7c0befe40aaccea46ace5719daee2e8173a71da7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:33:25 +0000 Subject: [PATCH 179/485] =?UTF-8?q?Added=20some=20notes=20to=20the=20schem?= =?UTF-8?q?a=20making=20it=20clear=20what=20we=E2=80=99re=20using=20for=20?= =?UTF-8?q?batches=20in=20engine=20v2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../database/prisma/schema.prisma | 45 +++++++------------ 1 file changed, 17 insertions(+), 28 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 1364829d57..fe75f4963f 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2445,38 +2445,26 @@ enum TaskQueueType { } model BatchTaskRun { - id String @id @default(cuid()) - - friendlyId String @unique - - status BatchTaskRunStatus @default(PENDING) - - idempotencyKey String? - - //engine v1 only - taskIdentifier String? - - //engine v1 only - checkpointEvent CheckpointRestoreEvent? @relation(fields: [checkpointEventId], references: [id], onDelete: Cascade, onUpdate: Cascade) - checkpointEventId String? @unique - + id String @id @default(cuid()) + friendlyId String @unique + idempotencyKey String? runtimeEnvironment RuntimeEnvironment @relation(fields: [runtimeEnvironmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) runtimeEnvironmentId String - - //engine v1 only - dependentTaskAttempt TaskRunAttempt? @relation(fields: [dependentTaskAttemptId], references: [id], onDelete: Cascade, onUpdate: Cascade) + runs TaskRun[] + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + ///all the below properties are engine v1 only + items BatchTaskRunItem[] + status BatchTaskRunStatus @default(PENDING) + taskIdentifier String? + checkpointEvent CheckpointRestoreEvent? @relation(fields: [checkpointEventId], references: [id], onDelete: Cascade, onUpdate: Cascade) + checkpointEventId String? @unique + dependentTaskAttempt TaskRunAttempt? @relation(fields: [dependentTaskAttemptId], references: [id], onDelete: Cascade, onUpdate: Cascade) dependentTaskAttemptId String? + runDependencies TaskRunDependency[] @relation("dependentBatchRun") - items BatchTaskRunItem[] - - //engine v1 only - runDependencies TaskRunDependency[] @relation("dependentBatchRun") - - runs TaskRun[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - + ///this is used for all engine versions @@unique([runtimeEnvironmentId, idempotencyKey]) } @@ -2485,6 +2473,7 @@ enum BatchTaskRunStatus { COMPLETED } +///Used in engine V1 only model BatchTaskRunItem { id String @id @default(cuid()) From 33fbc2e1d390d541554c2b0c8c25bf21cd6fbd45 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:34:07 +0000 Subject: [PATCH 180/485] Move the emitted event down --- .../run-engine/src/engine/index.ts | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index b247cb6923..a1e99e9af2 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -2186,16 +2186,6 @@ export class RunEngine { }, }); - this.eventBus.emit("runFailed", { - time: failedAt, - run: { - id: runId, - status: run.status, - spanId: run.spanId, - error, - }, - }); - if (!run.associatedWaitpoint) { throw new ServiceValidationError("No associated waitpoint found", 400); } @@ -2207,6 +2197,16 @@ export class RunEngine { await this.runQueue.acknowledgeMessage(run.runtimeEnvironment.organizationId, runId); + this.eventBus.emit("runFailed", { + time: failedAt, + run: { + id: runId, + status: run.status, + spanId: run.spanId, + error, + }, + }); + return "COMPLETED" as const; }); }); From 51c4434f299d39f243c1d469cf3e67962157108d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:48:39 +0000 Subject: [PATCH 181/485] Added a batchTrigger test where a BatchTaskRun is created and passed in --- .../src/engine/tests/batchTrigger.test.ts | 115 +++++++++++++++++- .../engine/tests/batchTriggerAndWait.test.ts | 16 --- 2 files changed, 112 insertions(+), 19 deletions(-) delete mode 100644 internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index 731c4964e6..aaa631f6b8 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -7,10 +7,119 @@ import { import { trace } from "@opentelemetry/api"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; -import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "../eventBus.js"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; describe("RunEngine batchTrigger", () => { - //todo batchTrigger tests - test("empty test", async () => {}); + containerTest( + "Batch trigger shares a batch", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + const batch = await prisma.batchTaskRun.create({ + data: { + friendlyId: generateFriendlyId("batch"), + runtimeEnvironmentId: authenticatedEnvironment.id, + }, + }); + + //trigger the runs + const run1 = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + batchId: batch.id, + }, + prisma + ); + + const run2 = await engine.trigger( + { + number: 2, + friendlyId: "run_1235", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + batchId: batch.id, + }, + prisma + ); + + expect(run1).toBeDefined(); + expect(run1.friendlyId).toBe("run_1234"); + expect(run1.batchId).toBe(batch.id); + + expect(run2).toBeDefined(); + expect(run2.friendlyId).toBe("run_1235"); + expect(run2.batchId).toBe(batch.id); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(2); + } finally { + engine.quit(); + } + } + ); }); diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts deleted file mode 100644 index c7fc43a89e..0000000000 --- a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { - containerTest, - setupAuthenticatedEnvironment, - setupBackgroundWorker, - assertNonNullable, -} from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; -import { expect } from "vitest"; -import { RunEngine } from "../index.js"; -import { setTimeout } from "timers/promises"; -import { EventBusEventArgs } from "../eventBus.js"; - -describe("RunEngine batchTriggerAndWait", () => { - //todo batchTriggerAndWait tests - test("empty test", async () => {}); -}); From d203c3e965f2cf484b474a21fc69fe8c54a9593a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 13 Nov 2024 15:33:16 +0000 Subject: [PATCH 182/485] add workers list command --- apps/webapp/app/routes/api.v1.workers.ts | 26 ++++ .../worker/workerGroupService.server.ts | 3 +- packages/cli-v3/src/apiClient.ts | 20 +++ packages/cli-v3/src/cli/index.ts | 2 + packages/cli-v3/src/commands/login.ts | 5 +- packages/cli-v3/src/commands/workers/index.ts | 10 ++ packages/cli-v3/src/commands/workers/list.ts | 119 ++++++++++++++++++ packages/core/src/v3/schemas/api.ts | 15 ++- 8 files changed, 196 insertions(+), 4 deletions(-) create mode 100644 apps/webapp/app/routes/api.v1.workers.ts create mode 100644 packages/cli-v3/src/commands/workers/index.ts create mode 100644 packages/cli-v3/src/commands/workers/list.ts diff --git a/apps/webapp/app/routes/api.v1.workers.ts b/apps/webapp/app/routes/api.v1.workers.ts new file mode 100644 index 0000000000..094fcf9e84 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.workers.ts @@ -0,0 +1,26 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { ListWorkersResponseBody } from "@trigger.dev/core/v3"; +import { createLoaderApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; +import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.server"; + +export const loader = createLoaderApiRoute( + { + corsStrategy: "all", + }, + async ({ authentication }): Promise> => { + const service = new WorkerGroupService(); + const workers = await service.listWorkerGroups({ + projectId: authentication.environment.projectId, + }); + + return json( + workers.map((w) => ({ + type: w.type, + name: w.name, + description: w.description, + isDefault: w.id === authentication.environment.project.defaultWorkerGroupId, + updatedAt: w.updatedAt, + })) + ); + } +); diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index 3b1c362889..836537e2c2 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -91,7 +91,7 @@ export class WorkerGroupService extends WithRunEngine { Without a project ID, only shared worker groups will be returned. With a project ID, in addition to all shared worker groups, ones associated with the project will also be returned. */ - async listWorkerGroups({ projectId }: { projectId?: string }) { + async listWorkerGroups({ projectId, listHidden }: { projectId?: string; listHidden?: boolean }) { const workerGroups = await this._prisma.workerInstanceGroup.findMany({ where: { OR: [ @@ -102,6 +102,7 @@ export class WorkerGroupService extends WithRunEngine { projectId, }, ], + AND: listHidden ? [] : [{ hidden: false }], }, }); diff --git a/packages/cli-v3/src/apiClient.ts b/packages/cli-v3/src/apiClient.ts index 34a1009db1..3349a68bc3 100644 --- a/packages/cli-v3/src/apiClient.ts +++ b/packages/cli-v3/src/apiClient.ts @@ -20,6 +20,7 @@ import { FailDeploymentRequestBody, FailDeploymentResponseBody, FinalizeDeploymentRequestBody, + ListWorkersResponseBody, } from "@trigger.dev/core/v3"; import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; @@ -301,6 +302,25 @@ export class CliApiClient { } ); } + + get workers() { + return { + list: this.listWorkers.bind(this), + }; + } + + private async listWorkers() { + if (!this.accessToken) { + throw new Error("listWorkers: No access token"); + } + + return wrapZodFetch(ListWorkersResponseBody, `${this.apiURL}/api/v1/workers`, { + headers: { + Authorization: `Bearer ${this.accessToken}`, + Accept: "application/json", + }, + }); + } } type ApiResult = diff --git a/packages/cli-v3/src/cli/index.ts b/packages/cli-v3/src/cli/index.ts index 3c31d030f2..a0b1b9dae7 100644 --- a/packages/cli-v3/src/cli/index.ts +++ b/packages/cli-v3/src/cli/index.ts @@ -10,6 +10,7 @@ import { configureUpdateCommand } from "../commands/update.js"; import { VERSION } from "../version.js"; import { configureDeployCommand } from "../commands/deploy.js"; import { installExitHandler } from "./common.js"; +import { configureWorkersCommand } from "../commands/workers/index.js"; export const program = new Command(); @@ -26,5 +27,6 @@ configureWhoamiCommand(program); configureLogoutCommand(program); configureListProfilesCommand(program); configureUpdateCommand(program); +configureWorkersCommand(program); installExitHandler(); diff --git a/packages/cli-v3/src/commands/login.ts b/packages/cli-v3/src/commands/login.ts index 67e136c771..e820ad1b13 100644 --- a/packages/cli-v3/src/commands/login.ts +++ b/packages/cli-v3/src/commands/login.ts @@ -23,6 +23,7 @@ import { spinner } from "../utilities/windows.js"; import { isLinuxServer } from "../utilities/linux.js"; import { VERSION } from "../version.js"; import { env } from "std-env"; +import { CLOUD_API_URL } from "../consts.js"; export const LoginCommandOptions = CommonCommandOptions.extend({ apiUrl: z.string(), @@ -66,7 +67,7 @@ export async function login(options?: LoginOptions): Promise { return await tracer.startActiveSpan("login", async (span) => { try { const opts = { - defaultApiUrl: "https://api.trigger.dev", + defaultApiUrl: CLOUD_API_URL, embedded: false, silent: false, ...options, @@ -86,7 +87,7 @@ export async function login(options?: LoginOptions): Promise { if (accessTokenFromEnv) { const auth = { accessToken: accessTokenFromEnv, - apiUrl: env.TRIGGER_API_URL ?? opts.defaultApiUrl ?? "https://api.trigger.dev", + apiUrl: env.TRIGGER_API_URL ?? opts.defaultApiUrl ?? CLOUD_API_URL, }; const apiClient = new CliApiClient(auth.apiUrl, auth.accessToken); const userData = await apiClient.whoAmI(); diff --git a/packages/cli-v3/src/commands/workers/index.ts b/packages/cli-v3/src/commands/workers/index.ts new file mode 100644 index 0000000000..ad93e708e7 --- /dev/null +++ b/packages/cli-v3/src/commands/workers/index.ts @@ -0,0 +1,10 @@ +import { Command } from "commander"; +import { configureWorkersListCommand } from "./list.js"; + +export function configureWorkersCommand(program: Command) { + const workers = program.command("workers").description("Subcommands for managing workers"); + + configureWorkersListCommand(workers); + + return workers; +} diff --git a/packages/cli-v3/src/commands/workers/list.ts b/packages/cli-v3/src/commands/workers/list.ts new file mode 100644 index 0000000000..cebe80b29c --- /dev/null +++ b/packages/cli-v3/src/commands/workers/list.ts @@ -0,0 +1,119 @@ +import { Command } from "commander"; +import { printStandloneInitialBanner } from "../../utilities/initialBanner.js"; +import { + CommonCommandOptions, + commonOptions, + handleTelemetry, + wrapCommandAction, +} from "../../cli/common.js"; +import { login } from "../login.js"; +import { loadConfig } from "../../config.js"; +import { resolve } from "path"; +import { getProjectClient } from "../../utilities/session.js"; +import { logger } from "../../utilities/logger.js"; +import { z } from "zod"; +import { intro, log } from "@clack/prompts"; + +const WorkersListCommandOptions = CommonCommandOptions.extend({ + env: z.enum(["prod", "staging"]), + config: z.string().optional(), + projectRef: z.string().optional(), +}); +type WorkersListCommandOptions = z.infer; + +export function configureWorkersListCommand(program: Command) { + return commonOptions( + program + .command("list") + .description("List all available workers") + .argument("[path]", "The path to the project", ".") + .option( + "-e, --env ", + "Deploy to a specific environment (currently only prod and staging are supported)", + "prod" + ) + .option("-c, --config ", "The name of the config file, found at [path]") + .option( + "-p, --project-ref ", + "The project ref. Required if there is no config file. This will override the project specified in the config file." + ) + .action(async (path, options) => { + await handleTelemetry(async () => { + await printStandloneInitialBanner(true); + await workersListCommand(path, options); + }); + }) + ); +} + +async function workersListCommand(dir: string, options: unknown) { + return await wrapCommandAction( + "workerBuildCommand", + WorkersListCommandOptions, + options, + async (opts) => { + return await _workersListCommand(dir, opts); + } + ); +} + +async function _workersListCommand(dir: string, options: WorkersListCommandOptions) { + intro("Listing workers"); + + const authorization = await login({ + embedded: true, + defaultApiUrl: options.apiUrl, + profile: options.profile, + silent: true, + }); + + if (!authorization.ok) { + if (authorization.error === "fetch failed") { + throw new Error( + `Failed to connect to ${authorization.auth?.apiUrl}. Are you sure it's the correct URL?` + ); + } else { + throw new Error( + `You must login first. Use the \`login\` CLI command.\n\n${authorization.error}` + ); + } + } + + const projectPath = resolve(process.cwd(), dir); + + const resolvedConfig = await loadConfig({ + cwd: projectPath, + overrides: { project: options.projectRef }, + configFile: options.config, + }); + + logger.debug("Resolved config", resolvedConfig); + + const projectClient = await getProjectClient({ + accessToken: authorization.auth.accessToken, + apiUrl: authorization.auth.apiUrl, + projectRef: resolvedConfig.project, + env: options.env, + profile: options.profile, + }); + + if (!projectClient) { + throw new Error("Failed to get project client"); + } + + const workers = await projectClient.client.workers.list(); + + if (!workers.success) { + throw new Error("Failed to list workers"); + } + + logger.table( + workers.data.map((worker) => ({ + default: worker.isDefault ? "x" : "-", + type: worker.type, + name: worker.name, + description: worker.description ?? "-", + "updated at": worker.updatedAt.toLocaleString(), + })) + ); +} diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index f61e28be9e..edeee39039 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -1,4 +1,4 @@ -import { z } from "zod"; +import { coerce, date, z } from "zod"; import { DeserializedJsonSchema } from "../../schemas/json.js"; import { SerializedError } from "./common.js"; import { BackgroundWorkerMetadata } from "./resources.js"; @@ -253,6 +253,19 @@ export const GetDeploymentResponseBody = z.object({ export type GetDeploymentResponseBody = z.infer; +export const ListWorkersResponseBody = z + .object({ + type: z.string(), + name: z.string(), + description: z.string().nullish(), + latestVersion: z.string().nullish(), + lastHeartbeatAt: z.string().nullish(), + isDefault: z.boolean(), + updatedAt: z.coerce.date(), + }) + .array(); +export type ListWorkersResponseBody = z.infer; + export const CreateUploadPayloadUrlResponseBody = z.object({ presignedUrl: z.string(), }); From 1bf37c9b7a1dad69f384e3a62a3b720dd1cc0b7c Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 13 Nov 2024 15:34:59 +0000 Subject: [PATCH 183/485] support a worker group being set as default by multiple projects --- internal-packages/database/prisma/schema.prisma | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index fe75f4963f..ce6500ede7 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2147,7 +2147,7 @@ model WorkerInstanceGroup { workers WorkerInstance[] backgroundWorkers BackgroundWorker[] - defaultForProject Project? @relation("ProjectDefaultWorkerGroup") + defaultForProjects Project[] @relation("ProjectDefaultWorkerGroup") organization Organization? @relation(fields: [organizationId], references: [id], onDelete: Cascade, onUpdate: Cascade) organizationId String? From 57474179fa60b64ed6045dd58f608dcc48b05d0d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:55:22 +0000 Subject: [PATCH 184/485] Run test files in sequence --- internal-packages/run-engine/vitest.config.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/internal-packages/run-engine/vitest.config.ts b/internal-packages/run-engine/vitest.config.ts index 0e8719d265..a34eb571d6 100644 --- a/internal-packages/run-engine/vitest.config.ts +++ b/internal-packages/run-engine/vitest.config.ts @@ -9,6 +9,7 @@ export default defineConfig({ threads: { singleThread: true, }, + fileParallelism: false, }, }, }); From a2dd5e34618132627459c6eb9f4826d79b9cf9d3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 15:55:32 +0000 Subject: [PATCH 185/485] Moved types to a separate file --- .../run-engine/src/engine/index.ts | 82 ++----------------- .../run-engine/src/engine/types.ts | 74 +++++++++++++++++ 2 files changed, 79 insertions(+), 77 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/types.ts diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index a1e99e9af2..65cc5ecb87 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,14 +1,11 @@ -import { Worker, type WorkerConcurrencyOptions } from "@internal/redis-worker"; +import { Worker } from "@internal/redis-worker"; import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; +import { assertExhaustive } from "@trigger.dev/core"; import { Logger } from "@trigger.dev/core/logger"; import { CompleteAttemptResult, DequeuedMessage, - MachinePreset, - MachinePresetName, parsePacket, - QueueOptions, - RetryOptions, RunExecutionData, sanitizeError, shouldRetryError, @@ -34,10 +31,9 @@ import { TaskRun, TaskRunExecutionStatus, TaskRunStatus, - Waitpoint, } from "@trigger.dev/database"; import assertNever from "assert-never"; -import { Redis, type RedisOptions } from "ioredis"; +import { Redis } from "ioredis"; import { nanoid } from "nanoid"; import { EventEmitter } from "node:events"; import { z } from "zod"; @@ -51,75 +47,7 @@ import { EventBusEvents } from "./eventBus"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; -import { assertExhaustive } from "@trigger.dev/core"; - -type Options = { - redis: RedisOptions; - prisma: PrismaClient; - worker: WorkerConcurrencyOptions & { - pollIntervalMs?: number; - }; - machines: { - defaultMachine: MachinePresetName; - machines: Record; - baseCostInCents: number; - }; - queue?: { - retryOptions?: RetryOptions; - }; - /** If not set then checkpoints won't ever be used */ - retryWarmStartThresholdMs?: number; - heartbeatTimeoutsMs?: Partial; - queueRunsWaitingForWorkerBatchSize?: number; - tracer: Tracer; -}; - -type HeartbeatTimeouts = { - PENDING_EXECUTING: number; - PENDING_CANCEL: number; - EXECUTING: number; - EXECUTING_WITH_WAITPOINTS: number; -}; - -type MachineResources = { - cpu: number; - memory: number; -}; - -type TriggerParams = { - friendlyId: string; - number: number; - environment: MinimalAuthenticatedEnvironment; - idempotencyKey?: string; - taskIdentifier: string; - payload: string; - payloadType: string; - context: any; - traceContext: Record; - traceId: string; - spanId: string; - parentSpanId?: string; - lockedToVersionId?: string; - concurrencyKey?: string; - masterQueue: string; - queueName: string; - queue?: QueueOptions; - isTest: boolean; - delayUntil?: Date; - queuedAt?: Date; - maxAttempts?: number; - ttl?: string; - tags: string[]; - parentTaskRunId?: string; - rootTaskRunId?: string; - batchId?: string; - resumeParentOnCompletion?: boolean; - depth?: number; - metadata?: string; - metadataType?: string; - seedMetadata?: string; - seedMetadataType?: string; -}; +import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; const workerCatalog = { finishWaitpoint: { @@ -171,7 +99,7 @@ export class RunEngine { private heartbeatTimeouts: HeartbeatTimeouts; eventBus = new EventEmitter(); - constructor(private readonly options: Options) { + constructor(private readonly options: RunEngineOptions) { this.prisma = options.prisma; this.redis = new Redis(options.redis); this.runLock = new RunLocker({ redis: this.redis }); diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts new file mode 100644 index 0000000000..c6d598690f --- /dev/null +++ b/internal-packages/run-engine/src/engine/types.ts @@ -0,0 +1,74 @@ +import { type WorkerConcurrencyOptions } from "@internal/redis-worker"; +import { Tracer } from "@opentelemetry/api"; +import { MachinePreset, MachinePresetName, QueueOptions, RetryOptions } from "@trigger.dev/core/v3"; +import { PrismaClient } from "@trigger.dev/database"; +import { type RedisOptions } from "ioredis"; +import { MinimalAuthenticatedEnvironment } from "../shared"; + +export type RunEngineOptions = { + redis: RedisOptions; + prisma: PrismaClient; + worker: WorkerConcurrencyOptions & { + pollIntervalMs?: number; + }; + machines: { + defaultMachine: MachinePresetName; + machines: Record; + baseCostInCents: number; + }; + queue?: { + retryOptions?: RetryOptions; + }; + /** If not set then checkpoints won't ever be used */ + retryWarmStartThresholdMs?: number; + heartbeatTimeoutsMs?: Partial; + queueRunsWaitingForWorkerBatchSize?: number; + tracer: Tracer; +}; + +export type HeartbeatTimeouts = { + PENDING_EXECUTING: number; + PENDING_CANCEL: number; + EXECUTING: number; + EXECUTING_WITH_WAITPOINTS: number; +}; + +export type MachineResources = { + cpu: number; + memory: number; +}; + +export type TriggerParams = { + friendlyId: string; + number: number; + environment: MinimalAuthenticatedEnvironment; + idempotencyKey?: string; + taskIdentifier: string; + payload: string; + payloadType: string; + context: any; + traceContext: Record; + traceId: string; + spanId: string; + parentSpanId?: string; + lockedToVersionId?: string; + concurrencyKey?: string; + masterQueue: string; + queueName: string; + queue?: QueueOptions; + isTest: boolean; + delayUntil?: Date; + queuedAt?: Date; + maxAttempts?: number; + ttl?: string; + tags: string[]; + parentTaskRunId?: string; + rootTaskRunId?: string; + batchId?: string; + resumeParentOnCompletion?: boolean; + depth?: number; + metadata?: string; + metadataType?: string; + seedMetadata?: string; + seedMetadataType?: string; +}; From 12f4fbb9a77855eb1b69e98494030e3c2028c0f7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 17:31:12 +0000 Subject: [PATCH 186/485] WIP improving the consistency of the return types in the run engine --- .../src/engine/executionSnapshots.ts | 74 +++++ .../run-engine/src/engine/index.ts | 263 +++++++++--------- .../src/engine/tests/cancelling.test.ts | 6 +- .../src/engine/tests/trigger.test.ts | 17 +- .../src/engine/tests/waitpoints.test.ts | 5 +- packages/core/src/v3/schemas/runEngine.ts | 66 +++-- packages/trigger-sdk/src/package.json | 3 + 7 files changed, 275 insertions(+), 159 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/executionSnapshots.ts create mode 100644 packages/trigger-sdk/src/package.json diff --git a/internal-packages/run-engine/src/engine/executionSnapshots.ts b/internal-packages/run-engine/src/engine/executionSnapshots.ts new file mode 100644 index 0000000000..547e379fea --- /dev/null +++ b/internal-packages/run-engine/src/engine/executionSnapshots.ts @@ -0,0 +1,74 @@ +import { ExecutionResult } from "@trigger.dev/core/v3"; +import { PrismaClientOrTransaction, TaskRunExecutionSnapshot } from "@trigger.dev/database"; + +/* Gets the most recent valid snapshot for a run */ +export async function getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { + const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { runId, isValid: true }, + include: { + completedWaitpoints: true, + checkpoint: true, + }, + orderBy: { createdAt: "desc" }, + }); + + if (!snapshot) { + throw new Error(`No execution snapshot found for TaskRun ${runId}`); + } + + return { + ...snapshot, + completedWaitpoints: snapshot.completedWaitpoints.map((w) => ({ + id: w.id, + type: w.type, + completedAt: w.completedAt ?? new Date(), + idempotencyKey: + w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey ? w.idempotencyKey : undefined, + completedByTaskRunId: w.completedByTaskRunId ?? undefined, + completedAfter: w.completedAfter ?? undefined, + output: w.output ?? undefined, + outputType: w.outputType, + outputIsError: w.outputIsError, + })), + }; +} + +export async function getExecutionSnapshotCompletedWaitpoints( + prisma: PrismaClientOrTransaction, + snapshotId: string +) { + const waitpoints = await prisma.taskRunExecutionSnapshot.findFirst({ + where: { id: snapshotId }, + include: { + completedWaitpoints: true, + }, + }); + + //deduplicate waitpoints + const waitpointIds = new Set(); + return ( + waitpoints?.completedWaitpoints.filter((waitpoint) => { + if (waitpointIds.has(waitpoint.id)) { + return false; + } else { + waitpointIds.add(waitpoint.id); + return true; + } + }) ?? [] + ); +} + +export function executionResultFromSnapshot(snapshot: TaskRunExecutionSnapshot): ExecutionResult { + return { + snapshot: { + id: snapshot.id, + executionStatus: snapshot.executionStatus, + description: snapshot.description, + }, + run: { + id: snapshot.runId, + status: snapshot.runStatus, + attemptNumber: snapshot.attemptNumber, + }, + }; +} diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 65cc5ecb87..af8a53d767 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -3,12 +3,15 @@ import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; import { assertExhaustive } from "@trigger.dev/core"; import { Logger } from "@trigger.dev/core/logger"; import { - CompleteAttemptResult, + CancelRunResult, + CompleteRunAttemptResult, DequeuedMessage, + ExecutionResult, parsePacket, RunExecutionData, sanitizeError, shouldRetryError, + StartRunAttemptResult, TaskRunError, taskRunErrorEnhancer, TaskRunExecution, @@ -29,6 +32,7 @@ import { PrismaClient, PrismaClientOrTransaction, TaskRun, + TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, } from "@trigger.dev/database"; @@ -48,6 +52,11 @@ import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; +import { + executionResultFromSnapshot, + getExecutionSnapshotCompletedWaitpoints, + getLatestExecutionSnapshot, +} from "./executionSnapshots"; const workerCatalog = { finishWaitpoint: { @@ -208,7 +217,7 @@ export class RunEngine { seedMetadataType, }: TriggerParams, tx?: PrismaClientOrTransaction - ) { + ): Promise { const prisma = tx ?? this.prisma; return this.#trace( @@ -457,7 +466,7 @@ export class RunEngine { //lock the run so nothing else can modify it try { const dequeuedRun = await this.runLock.lock([runId], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); if (!isDequeueableExecutionStatus(snapshot.executionStatus)) { //create a failed snapshot @@ -667,6 +676,8 @@ export class RunEngine { version: "1" as const, snapshot: { id: newSnapshot.id, + executionStatus: newSnapshot.executionStatus, + description: newSnapshot.description, }, image: result.deployment?.imageReference ?? undefined, checkpoint: newSnapshot.checkpoint ?? undefined, @@ -800,12 +811,12 @@ export class RunEngine { runId: string; snapshotId: string; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("startRunAttempt", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { //if there is a big delay between the snapshot and the attempt, the snapshot might have changed @@ -884,13 +895,19 @@ export class RunEngine { const nextAttemptNumber = (taskRun.attemptNumber ?? 0) + 1; if (nextAttemptNumber > MAX_TASK_RUN_ATTEMPTS) { - await this.#crash({ + await this.#attemptFailed({ runId: taskRun.id, - error: { - type: "INTERNAL_ERROR", - code: "TASK_RUN_CRASHED", - message: "Max attempts reached.", + snapshotId, + completion: { + ok: false, + id: taskRun.id, + error: { + type: "INTERNAL_ERROR", + code: "TASK_RUN_CRASHED", + message: "Max attempts reached.", + }, }, + tx: prisma, }); throw new ServiceValidationError("Max attempts reached", 400); } @@ -1044,11 +1061,7 @@ export class RunEngine { machine: machinePreset, }; - return { - run, - snapshot, - execution, - }; + return { run, snapshot, execution }; }); }); } @@ -1062,7 +1075,7 @@ export class RunEngine { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - }): Promise { + }): Promise { switch (completion.ok) { case true: { return this.#attemptSucceeded({ runId, snapshotId, completion, tx: this.prisma }); @@ -1093,7 +1106,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); if (snapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1166,7 +1179,7 @@ export class RunEngine { async expireRun({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { const prisma = tx ?? this.prisma; await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); //if we're executing then we won't expire the run if (isExecuting(snapshot.executionStatus)) { @@ -1247,23 +1260,23 @@ export class RunEngine { reason?: string; finalizeRun?: boolean; tx?: PrismaClientOrTransaction; - }): Promise<"FINISHED" | "PENDING_CANCEL"> { + }): Promise { const prisma = tx ?? this.prisma; reason = reason ?? "Cancelled by user"; return this.#trace("cancelRun", { runId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); //already finished, do nothing if (latestSnapshot.executionStatus === "FINISHED") { - return "FINISHED" as const; + return executionResultFromSnapshot(latestSnapshot); } //is pending cancellation and we're not finalizing, alert the worker again if (latestSnapshot.executionStatus === "PENDING_CANCEL" && !finalizeRun) { await this.#sendNotificationToWorker({ runId }); - return "PENDING_CANCEL" as const; + return executionResultFromSnapshot(latestSnapshot); } //set the run to cancelled immediately @@ -1305,11 +1318,11 @@ export class RunEngine { //the worker needs to be notified so it can kill the run and complete the attempt await this.#sendNotificationToWorker({ runId }); - return "PENDING_CANCEL" as const; + return executionResultFromSnapshot(newSnapshot); } //not executing, so we will actually finish the run - await this.#createExecutionSnapshot(prisma, { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { run, snapshot: { executionStatus: "FINISHED", @@ -1349,7 +1362,7 @@ export class RunEngine { } } - return "FINISHED" as const; + return executionResultFromSnapshot(newSnapshot); }); }); } @@ -1377,7 +1390,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return this.#trace("rescheduleRun", { runId }, async (span) => { await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); //if the run isn't just created then we can't reschedule it if (snapshot.executionStatus !== "RUN_CREATED") { @@ -1526,7 +1539,7 @@ export class RunEngine { }, }); - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; if ( @@ -1618,7 +1631,7 @@ export class RunEngine { // 4. Add the completed waitpoints to the snapshots for (const run of affectedTaskRuns) { await this.runLock.lock([run.taskRunId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(tx, run.taskRunId); + const latestSnapshot = await getLatestExecutionSnapshot(tx, run.taskRunId); await tx.taskRunExecutionSnapshot.update({ where: { id: latestSnapshot.id }, @@ -1678,7 +1691,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); if (snapshot.id !== snapshotId) { return { ok: false as const, @@ -1711,7 +1724,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; //we don't need to acquire a run lock for any of this, it's not critical if it happens on an older version - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { this.logger.log("heartbeatRun no longer the latest snapshot, stopping the heartbeat.", { runId, @@ -1745,7 +1758,7 @@ export class RunEngine { }): Promise { const prisma = tx ?? this.prisma; try { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const snapshot = await getLatestExecutionSnapshot(prisma, runId); const executionData: RunExecutionData = { version: "1" as const, @@ -1802,18 +1815,26 @@ export class RunEngine { runId: string; error: TaskRunInternalError; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("#systemFailure", { runId }, async (span) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); //already finished if (latestSnapshot.executionStatus === "FINISHED") { //todo check run is in the correct state - return; + return { + attemptStatus: "RUN_FINISHED", + snapshot: latestSnapshot, + run: { + id: runId, + status: latestSnapshot.runStatus, + attemptNumber: latestSnapshot.attemptNumber, + }, + }; } - await this.#attemptFailed({ + const result = await this.#attemptFailed({ runId, snapshotId: latestSnapshot.id, completion: { @@ -1823,19 +1844,11 @@ export class RunEngine { }, tx: prisma, }); + + return result; }); } - async #crash({ - runId, - error, - tx, - }: { - runId: string; - error: TaskRunInternalError; - tx?: PrismaClientOrTransaction; - }) {} - async #waitingForDeploy({ orgId, runId, @@ -1882,11 +1895,11 @@ export class RunEngine { snapshotId: string; completion: TaskRunSuccessfulExecutionResult; tx: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("#completeRunAttemptSuccess", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1895,6 +1908,7 @@ export class RunEngine { span.setAttribute("completionStatus", completion.ok); const completedAt = new Date(); + const run = await prisma.taskRun.update({ where: { id: runId }, data: { @@ -1912,6 +1926,9 @@ export class RunEngine { }, }, select: { + id: true, + status: true, + attemptNumber: true, spanId: true, associatedWaitpoint: { select: { @@ -1925,6 +1942,7 @@ export class RunEngine { }, }, }); + const newSnapshot = await getLatestExecutionSnapshot(prisma, runId); await this.runQueue.acknowledgeMessage(run.project.organizationId, runId); if (!run.associatedWaitpoint) { @@ -1948,7 +1966,11 @@ export class RunEngine { }, }); - return "COMPLETED" as const; + return { + attemptStatus: "RUN_FINISHED", + snapshot: newSnapshot, + run, + }; }); }); } @@ -1965,12 +1987,12 @@ export class RunEngine { completion: TaskRunFailedExecutionResult; forceRequeue?: boolean; tx: PrismaClientOrTransaction; - }): Promise<"COMPLETED" | "RETRY_QUEUED" | "RETRY_IMMEDIATELY"> { + }): Promise { const prisma = this.prisma; return this.#trace("completeRunAttemptFailure", { runId, snapshotId }, async (span) => { return this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); @@ -1991,14 +2013,20 @@ export class RunEngine { completion.error.code === "TASK_RUN_CANCELLED" ) { // We need to cancel the task run instead of fail it - await this.cancelRun({ + const result = await this.cancelRun({ runId, completedAt: failedAt, reason: completion.error.message, finalizeRun: true, tx: prisma, }); - return "COMPLETED" as const; + return { + attemptStatus: + result.snapshot.executionStatus === "PENDING_CANCEL" + ? "RUN_PENDING_CANCEL" + : "RUN_FINISHED", + ...result, + }; } const error = sanitizeError(completion.error); @@ -2054,8 +2082,8 @@ export class RunEngine { (this.options.retryWarmStartThresholdMs !== undefined && completion.retry.delay >= this.options.retryWarmStartThresholdMs) ) { - //we nack the message, this allows another work to pick up the run - const gotRequeued = await this.#tryNackAndRequeue({ + //we nack the message, requeuing it for later + const nackResult = await this.#tryNackAndRequeue({ run, orgId: run.runtimeEnvironment.organizationId, timestamp: retryAt.getTime(), @@ -2067,25 +2095,31 @@ export class RunEngine { tx: prisma, }); - if (!gotRequeued) { - return "COMPLETED"; + if (!nackResult.wasRequeued) { + return { + attemptStatus: "RUN_FINISHED", + ...nackResult, + }; } else { - return "RETRY_QUEUED"; + return { attemptStatus: "RETRY_QUEUED", ...nackResult }; } - } else { - //it will continue running because the retry delay is short - await this.#createExecutionSnapshot(prisma, { - run, - snapshot: { - executionStatus: "PENDING_EXECUTING", - description: "Attempt failed wth a short delay, starting a new attempt.", - }, - }); - //the worker can fetch the latest snapshot and should create a new attempt - await this.#sendNotificationToWorker({ runId }); } - return "RETRY_IMMEDIATELY"; + //it will continue running because the retry delay is short + const newSnapshot = await this.#createExecutionSnapshot(prisma, { + run, + snapshot: { + executionStatus: "PENDING_EXECUTING", + description: "Attempt failed wth a short delay, starting a new attempt.", + }, + }); + //the worker can fetch the latest snapshot and should create a new attempt + await this.#sendNotificationToWorker({ runId }); + + return { + attemptStatus: "RETRY_IMMEDIATELY", + ...executionResultFromSnapshot(newSnapshot), + }; } const status = runStatusFromError(completion.error); @@ -2106,7 +2140,7 @@ export class RunEngine { }, }); - await this.#createExecutionSnapshot(prisma, { + const newSnapshot = await this.#createExecutionSnapshot(prisma, { run, snapshot: { executionStatus: "FINISHED", @@ -2135,7 +2169,11 @@ export class RunEngine { }, }); - return "COMPLETED" as const; + return { + attemptStatus: "RUN_FINISHED", + snapshot: newSnapshot, + run, + }; }); }); } @@ -2201,7 +2239,7 @@ export class RunEngine { timestamp?: number; error: TaskRunInternalError; tx?: PrismaClientOrTransaction; - }): Promise { + }): Promise<{ wasRequeued: boolean } & ExecutionResult> { const prisma = tx ?? this.prisma; return await this.runLock.lock([run.id], 5000, async (signal) => { @@ -2209,12 +2247,12 @@ export class RunEngine { const gotRequeued = await this.runQueue.nackMessage(orgId, run.id, timestamp); if (!gotRequeued) { - await this.#systemFailure({ + const result = await this.#systemFailure({ runId: run.id, error, tx: prisma, }); - return false; + return { wasRequeued: false, ...result }; } const newSnapshot = await this.#createExecutionSnapshot(prisma, { @@ -2225,7 +2263,19 @@ export class RunEngine { }, }); - return true; + return { + wasRequeued: true, + snapshot: { + id: newSnapshot.id, + executionStatus: newSnapshot.executionStatus, + description: newSnapshot.description, + }, + run: { + id: newSnapshot.runId, + status: newSnapshot.runStatus, + attemptNumber: newSnapshot.attemptNumber, + }, + }; }); } @@ -2237,9 +2287,9 @@ export class RunEngine { const prisma = tx ?? this.prisma; await this.runLock.lock([run.id], 5000, async (signal) => { - const snapshot = await this.#getLatestExecutionSnapshot(prisma, run.id); + const snapshot = await getLatestExecutionSnapshot(prisma, run.id); - const completedWaitpoints = await this.#getExecutionSnapshotCompletedWaitpoints( + const completedWaitpoints = await getExecutionSnapshotCompletedWaitpoints( prisma, snapshot.id ); @@ -2553,63 +2603,6 @@ export class RunEngine { } } - /* Gets the most recent valid snapshot for a run */ - async #getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { - const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { runId, isValid: true }, - include: { - completedWaitpoints: true, - checkpoint: true, - }, - orderBy: { createdAt: "desc" }, - }); - - if (!snapshot) { - throw new Error(`No execution snapshot found for TaskRun ${runId}`); - } - - return { - ...snapshot, - completedWaitpoints: snapshot.completedWaitpoints.map((w) => ({ - id: w.id, - type: w.type, - completedAt: w.completedAt ?? new Date(), - idempotencyKey: - w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey ? w.idempotencyKey : undefined, - completedByTaskRunId: w.completedByTaskRunId ?? undefined, - completedAfter: w.completedAfter ?? undefined, - output: w.output ?? undefined, - outputType: w.outputType, - outputIsError: w.outputIsError, - })), - }; - } - - async #getExecutionSnapshotCompletedWaitpoints( - prisma: PrismaClientOrTransaction, - snapshotId: string - ) { - const waitpoints = await prisma.taskRunExecutionSnapshot.findFirst({ - where: { id: snapshotId }, - include: { - completedWaitpoints: true, - }, - }); - - //deduplicate waitpoints - const waitpointIds = new Set(); - return ( - waitpoints?.completedWaitpoints.filter((waitpoint) => { - if (waitpointIds.has(waitpoint.id)) { - return false; - } else { - waitpointIds.add(waitpoint.id); - return true; - } - }) ?? [] - ); - } - //#endregion //#region Heartbeat @@ -2647,7 +2640,7 @@ export class RunEngine { }) { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await this.#getLatestExecutionSnapshot(prisma, runId); + const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); if (latestSnapshot.id !== snapshotId) { this.logger.log( "RunEngine.#handleStalledSnapshot() no longer the latest snapshot, stopping the heartbeat.", diff --git a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts index cda76200cc..8567c874f2 100644 --- a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts +++ b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts @@ -135,7 +135,7 @@ describe("RunEngine cancelling", () => { completedAt: new Date(), reason: "Cancelled by the user", }); - expect(result).toBe("PENDING_CANCEL"); + expect(result.snapshot.executionStatus).toBe("PENDING_CANCEL"); //check a worker notification was sent for the running parent expect(workerNotifications).toHaveLength(1); @@ -200,6 +200,8 @@ describe("RunEngine cancelling", () => { }, }, }); + expect(completeChildResult.snapshot.executionStatus).toBe("FINISHED"); + expect(completeChildResult.run.status).toBe("CANCELED"); //child should now be pending cancel const childExecutionDataCancelled = await engine.getRunExecutionData({ @@ -305,7 +307,7 @@ describe("RunEngine cancelling", () => { completedAt: new Date(), reason: "Cancelled by the user", }); - expect(result).toBe("FINISHED"); + expect(result.snapshot.executionStatus).toBe("FINISHED"); const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts index 0882dec08c..84e5951c57 100644 --- a/internal-packages/run-engine/src/engine/tests/trigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -164,7 +164,10 @@ describe("RunEngine trigger()", () => { outputType: "application/json", }, }); - expect(result).toBe("COMPLETED"); + expect(result.attemptStatus).toBe("RUN_FINISHED"); + expect(result.snapshot.executionStatus).toBe("FINISHED"); + expect(result.run.attemptNumber).toBe(1); + expect(result.run.status).toBe("COMPLETED_SUCCESSFULLY"); //state should be completed const executionData3 = await engine.getRunExecutionData({ runId: run.id }); @@ -292,7 +295,10 @@ describe("RunEngine trigger()", () => { error, }, }); - expect(result).toBe("COMPLETED"); + expect(result.attemptStatus).toBe("RUN_FINISHED"); + expect(result.snapshot.executionStatus).toBe("FINISHED"); + expect(result.run.attemptNumber).toBe(1); + expect(result.run.status).toBe("COMPLETED_WITH_ERRORS"); //state should be completed const executionData3 = await engine.getRunExecutionData({ runId: run.id }); @@ -422,7 +428,9 @@ describe("RunEngine trigger()", () => { }, }, }); - expect(result).toBe("RETRY_IMMEDIATELY"); + expect(result.attemptStatus).toBe("RETRY_IMMEDIATELY"); + expect(result.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(result.run.status).toBe("RETRYING_AFTER_FAILURE"); //state should be completed const executionData3 = await engine.getRunExecutionData({ runId: run.id }); @@ -450,6 +458,9 @@ describe("RunEngine trigger()", () => { outputType: "application/json", }, }); + expect(result2.snapshot.executionStatus).toBe("FINISHED"); + expect(result2.run.attemptNumber).toBe(2); + expect(result2.run.status).toBe("COMPLETED_SUCCESSFULLY"); //waitpoint should have been completed, with the output const runWaitpointAfter = await prisma.waitpoint.findMany({ diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 0277943497..d60b2a4962 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -214,7 +214,10 @@ describe("RunEngine Waitpoints", () => { }, }, }); - expect(failResult).toBe("RETRY_IMMEDIATELY"); + expect(failResult.attemptStatus).toBe("RETRY_IMMEDIATELY"); + expect(failResult.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(failResult.run.attemptNumber).toBe(1); + expect(failResult.run.status).toBe("RETRYING_AFTER_FAILURE"); const executionData2 = await engine.getRunExecutionData({ runId: run.id }); assertNonNullable(executionData2); diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 76dda779ff..7b4cfa5dea 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -1,5 +1,5 @@ import { z } from "zod"; -import { MachinePreset } from "./common.js"; +import { MachinePreset, TaskRunExecution } from "./common.js"; import { EnvironmentType } from "./schemas.js"; import type * as DB_TYPES from "@trigger.dev/database"; @@ -61,12 +61,29 @@ const CompletedWaitpoint = z.object({ outputIsError: z.boolean(), }); +const ExecutionSnapshot = z.object({ + id: z.string(), + executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), + description: z.string(), +}); + +const BaseRunMetadata = z.object({ + id: z.string(), + status: z.enum(Object.values(TaskRunStatus) as [TaskRunStatus]), + attemptNumber: z.number().nullish(), +}); + +export const ExecutionResult = z.object({ + snapshot: ExecutionSnapshot, + run: BaseRunMetadata, +}); + +export type ExecutionResult = z.infer; + /** This is sent to a Worker when a run is dequeued (a new run or continuing run) */ export const DequeuedMessage = z.object({ version: z.literal("1"), - snapshot: z.object({ - id: z.string(), - }), + snapshot: ExecutionSnapshot, image: z.string().optional(), checkpoint: z .object({ @@ -103,18 +120,35 @@ export const DequeuedMessage = z.object({ }); export type DequeuedMessage = z.infer; +/** The response to the Worker when starting an attempt */ +export type StartRunAttemptResult = ExecutionResult & { + execution: TaskRunExecution; +}; + +/** The response to the Worker when completing an attempt */ +const CompleteAttemptStatus = z.enum([ + "RUN_FINISHED", + "RUN_PENDING_CANCEL", + "RETRY_QUEUED", + "RETRY_IMMEDIATELY", +]); + +export const CompleteRunAttemptResult = z + .object({ + attemptStatus: CompleteAttemptStatus, + }) + .and(ExecutionResult); +export type CompleteRunAttemptResult = z.infer; + +/** The response when cancelling a run. */ +export const CancelRunResult = ExecutionResult; +export type CancelRunResult = z.infer; + +/** The response when a Worker asks for the latest execution state */ export const RunExecutionData = z.object({ version: z.literal("1"), - snapshot: z.object({ - id: z.string(), - executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), - description: z.string(), - }), - run: z.object({ - id: z.string(), - status: z.enum(Object.values(TaskRunStatus) as [TaskRunStatus]), - attemptNumber: z.number().optional(), - }), + snapshot: ExecutionSnapshot, + run: BaseRunMetadata, checkpoint: z .object({ id: z.string(), @@ -126,8 +160,4 @@ export const RunExecutionData = z.object({ .optional(), completedWaitpoints: z.array(CompletedWaitpoint), }); - export type RunExecutionData = z.infer; - -export const CompleteAttemptResult = z.enum(["COMPLETED", "RETRY_QUEUED", "RETRY_IMMEDIATELY"]); -export type CompleteAttemptResult = z.infer; diff --git a/packages/trigger-sdk/src/package.json b/packages/trigger-sdk/src/package.json new file mode 100644 index 0000000000..5bbefffbab --- /dev/null +++ b/packages/trigger-sdk/src/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} From 766c4ace663bf6bff58fe36feb3abd2714372667 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 17:33:34 +0000 Subject: [PATCH 187/485] Latest lockfile --- pnpm-lock.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f12de8d925..541791a32e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1148,9 +1148,6 @@ importers: '@trigger.dev/core': specifier: workspace:3.1.2 version: link:../core - '@trigger.dev/worker': - specifier: workspace:3.1.2 - version: link:../worker c12: specifier: ^1.11.1 version: 1.11.1(magicast@0.3.4) @@ -1391,7 +1388,7 @@ importers: specifier: ^0.15.4 version: 0.15.4 '@trigger.dev/database': - specifier: workspace:^ + specifier: workspace:* version: link:../../internal-packages/database '@types/humanize-duration': specifier: ^3.27.1 From f6208de3acad25313d9a0586fed479651060f259 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 17:56:37 +0000 Subject: [PATCH 188/485] Wait for duration explicit return type, including the execution result --- .../run-engine/src/engine/index.ts | 40 ++++++++++--------- .../src/engine/tests/waitpoints.test.ts | 5 ++- packages/core/src/v3/schemas/runEngine.ts | 15 +++++-- 3 files changed, 36 insertions(+), 24 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index af8a53d767..67db692f51 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -3,7 +3,6 @@ import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; import { assertExhaustive } from "@trigger.dev/core"; import { Logger } from "@trigger.dev/core/logger"; import { - CancelRunResult, CompleteRunAttemptResult, DequeuedMessage, ExecutionResult, @@ -19,6 +18,7 @@ import { TaskRunFailedExecutionResult, TaskRunInternalError, TaskRunSuccessfulExecutionResult, + WaitForDurationResult, } from "@trigger.dev/core/v3"; import { generateFriendlyId, @@ -1100,9 +1100,7 @@ export class RunEngine { releaseConcurrency?: boolean; idempotencyKey?: string; tx?: PrismaClientOrTransaction; - }): Promise<{ - willWaitUntil: Date; - }> { + }): Promise { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5_000, async (signal) => { @@ -1151,11 +1149,14 @@ export class RunEngine { //waitpoint already completed, so we don't need to wait if (waitpoint.status === "COMPLETED") { - return { willWaitUntil: waitpoint.completedAt ?? new Date() }; + return { + waitUntil: waitpoint.completedAt ?? new Date(), + ...executionResultFromSnapshot(snapshot), + }; } //block the run - await this.blockRunWithWaitpoint({ + const blockResult = await this.blockRunWithWaitpoint({ runId, waitpointId: waitpoint.id, environmentId: waitpoint.environmentId, @@ -1171,7 +1172,8 @@ export class RunEngine { ); return { - willWaitUntil: date, + waitUntil: date, + ...executionResultFromSnapshot(blockResult), }; }); } @@ -1260,7 +1262,7 @@ export class RunEngine { reason?: string; finalizeRun?: boolean; tx?: PrismaClientOrTransaction; - }): Promise { + }): Promise { const prisma = tx ?? this.prisma; reason = reason ?? "Cancelled by user"; @@ -1527,10 +1529,10 @@ export class RunEngine { projectId: string; failAfter?: Date; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; - await this.runLock.lock([runId], 5000, async (signal) => { + return await this.runLock.lock([runId], 5000, async (signal) => { const taskWaitpoint = await prisma.taskRunWaitpoint.create({ data: { taskRunId: runId, @@ -1539,23 +1541,23 @@ export class RunEngine { }, }); - const latestSnapshot = await getLatestExecutionSnapshot(prisma, runId); + let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; if ( - latestSnapshot.executionStatus === "EXECUTING" || - latestSnapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" + snapshot.executionStatus === "EXECUTING" || + snapshot.executionStatus === "EXECUTING_WITH_WAITPOINTS" ) { newStatus = "EXECUTING_WITH_WAITPOINTS"; } //if the state has changed, create a new snapshot - if (newStatus !== latestSnapshot.executionStatus) { - await this.#createExecutionSnapshot(prisma, { + if (newStatus !== snapshot.executionStatus) { + snapshot = await this.#createExecutionSnapshot(prisma, { run: { - id: latestSnapshot.runId, - status: latestSnapshot.runStatus, - attemptNumber: latestSnapshot.attemptNumber, + id: snapshot.runId, + status: snapshot.runStatus, + attemptNumber: snapshot.attemptNumber, }, snapshot: { executionStatus: newStatus, @@ -1572,6 +1574,8 @@ export class RunEngine { availableAt: failAfter, }); } + + return snapshot; }); } diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index d60b2a4962..0a11e4cb95 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -91,8 +91,9 @@ describe("RunEngine Waitpoints", () => { date, releaseConcurrency: false, }); - - expect(result.willWaitUntil.toISOString()).toBe(date.toISOString()); + expect(result.waitUntil.toISOString()).toBe(date.toISOString()); + expect(result.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + expect(result.run.status).toBe("EXECUTING"); const executionData = await engine.getRunExecutionData({ runId: run.id }); expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 7b4cfa5dea..4343befc47 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -140,10 +140,6 @@ export const CompleteRunAttemptResult = z .and(ExecutionResult); export type CompleteRunAttemptResult = z.infer; -/** The response when cancelling a run. */ -export const CancelRunResult = ExecutionResult; -export type CancelRunResult = z.infer; - /** The response when a Worker asks for the latest execution state */ export const RunExecutionData = z.object({ version: z.literal("1"), @@ -161,3 +157,14 @@ export const RunExecutionData = z.object({ completedWaitpoints: z.array(CompletedWaitpoint), }); export type RunExecutionData = z.infer; + +export const WaitForDurationResult = z + .object({ + /** + If you pass an idempotencyKey, you may actually not need to wait. + Use this date to determine when to continue. + */ + waitUntil: z.coerce.date(), + }) + .and(ExecutionResult); +export type WaitForDurationResult = z.infer; From 07999383e0356e125582cfa93ee50c0071cd6006 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 18:03:23 +0000 Subject: [PATCH 189/485] Recheduling returns the updated TaskRun --- .../run-engine/src/engine/index.ts | 144 +++++++++--------- .../src/engine/tests/delays.test.ts | 4 +- 2 files changed, 77 insertions(+), 71 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 67db692f51..554db42622 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -149,7 +149,7 @@ export class RunEngine { await this.#handleStalledSnapshot(payload); }, expireRun: async ({ payload }) => { - await this.expireRun({ runId: payload.runId }); + await this.#expireRun({ runId: payload.runId }); }, cancelRun: async ({ payload }) => { await this.cancelRun({ @@ -1178,72 +1178,6 @@ export class RunEngine { }); } - async expireRun({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { - const prisma = tx ?? this.prisma; - await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await getLatestExecutionSnapshot(prisma, runId); - - //if we're executing then we won't expire the run - if (isExecuting(snapshot.executionStatus)) { - return; - } - - //only expire "PENDING" runs - const run = await prisma.taskRun.findUnique({ where: { id: runId } }); - - if (!run) { - this.logger.debug("Could not find enqueued run to expire", { - runId, - }); - return; - } - - if (run.status !== "PENDING") { - this.logger.debug("Run cannot be expired because it's not in PENDING status", { - run, - }); - return; - } - - const error: TaskRunError = { - type: "STRING_ERROR", - raw: `Run expired because the TTL (${run.ttl}) was reached`, - }; - - const updatedRun = await prisma.taskRun.update({ - where: { id: runId }, - data: { - status: "EXPIRED", - completedAt: new Date(), - expiredAt: new Date(), - error, - executionSnapshots: { - create: { - engine: "V2", - executionStatus: "FINISHED", - description: "Run was expired because the TTL was reached", - runStatus: "EXPIRED", - }, - }, - }, - include: { - associatedWaitpoint: true, - }, - }); - - if (!updatedRun.associatedWaitpoint) { - throw new ServiceValidationError("No associated waitpoint found", 400); - } - - await this.completeWaitpoint({ - id: updatedRun.associatedWaitpoint.id, - output: { value: JSON.stringify(error), isError: true }, - }); - - this.eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); - }); - } - /** Call this to cancel a run. If the run is in-progress it will change it's state to PENDING_CANCEL and notify the worker. @@ -1369,7 +1303,11 @@ export class RunEngine { }); } - async queueRunsWaitingForWorker({ backgroundWorkerId }: { backgroundWorkerId: string }) { + async queueRunsWaitingForWorker({ + backgroundWorkerId, + }: { + backgroundWorkerId: string; + }): Promise { //we want this to happen in the background await this.worker.enqueue({ job: "queueRunsWaitingForWorker", @@ -1388,10 +1326,10 @@ export class RunEngine { runId: string; delayUntil: Date; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; return this.#trace("rescheduleRun", { runId }, async (span) => { - await this.runLock.lock([runId], 5_000, async (signal) => { + return await this.runLock.lock([runId], 5_000, async (signal) => { const snapshot = await getLatestExecutionSnapshot(prisma, runId); //if the run isn't just created then we can't reschedule it @@ -1853,6 +1791,72 @@ export class RunEngine { }); } + async #expireRun({ runId, tx }: { runId: string; tx?: PrismaClientOrTransaction }) { + const prisma = tx ?? this.prisma; + await this.runLock.lock([runId], 5_000, async (signal) => { + const snapshot = await getLatestExecutionSnapshot(prisma, runId); + + //if we're executing then we won't expire the run + if (isExecuting(snapshot.executionStatus)) { + return; + } + + //only expire "PENDING" runs + const run = await prisma.taskRun.findUnique({ where: { id: runId } }); + + if (!run) { + this.logger.debug("Could not find enqueued run to expire", { + runId, + }); + return; + } + + if (run.status !== "PENDING") { + this.logger.debug("Run cannot be expired because it's not in PENDING status", { + run, + }); + return; + } + + const error: TaskRunError = { + type: "STRING_ERROR", + raw: `Run expired because the TTL (${run.ttl}) was reached`, + }; + + const updatedRun = await prisma.taskRun.update({ + where: { id: runId }, + data: { + status: "EXPIRED", + completedAt: new Date(), + expiredAt: new Date(), + error, + executionSnapshots: { + create: { + engine: "V2", + executionStatus: "FINISHED", + description: "Run was expired because the TTL was reached", + runStatus: "EXPIRED", + }, + }, + }, + include: { + associatedWaitpoint: true, + }, + }); + + if (!updatedRun.associatedWaitpoint) { + throw new ServiceValidationError("No associated waitpoint found", 400); + } + + await this.completeWaitpoint({ + id: updatedRun.associatedWaitpoint.id, + output: { value: JSON.stringify(error), isError: true }, + }); + + this.eventBus.emit("runExpired", { run: updatedRun, time: new Date() }); + }); + } + async #waitingForDeploy({ orgId, runId, diff --git a/internal-packages/run-engine/src/engine/tests/delays.test.ts b/internal-packages/run-engine/src/engine/tests/delays.test.ts index 0f35b37385..dcfbcb470c 100644 --- a/internal-packages/run-engine/src/engine/tests/delays.test.ts +++ b/internal-packages/run-engine/src/engine/tests/delays.test.ts @@ -163,7 +163,9 @@ describe("RunEngine delays", () => { assertNonNullable(executionData); expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); - await engine.rescheduleRun({ runId: run.id, delayUntil: new Date(Date.now() + 1_500) }); + const rescheduleTo = new Date(Date.now() + 1_500); + const updatedRun = await engine.rescheduleRun({ runId: run.id, delayUntil: rescheduleTo }); + expect(updatedRun.delayUntil?.toISOString()).toBe(rescheduleTo.toISOString()); //wait so the initial delay passes await setTimeout(1_000); From 77b7d0f55c73b68ae92d1e23e9f1fe1a16024038 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 18:40:55 +0000 Subject: [PATCH 190/485] =?UTF-8?q?Added=20a=20friendlyId=20to=20Waitpoint?= =?UTF-8?q?s=20now=20we=E2=80=99re=20going=20to=20have=20a=20public=20API?= =?UTF-8?q?=20for=20them?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal-packages/database/prisma/schema.prisma | 2 ++ internal-packages/run-engine/src/engine/index.ts | 8 ++++++-- .../run-engine/src/engine/tests/waitpoints.test.ts | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index ce6500ede7..b35d4732cb 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1998,6 +1998,8 @@ enum TaskRunCheckpointType { model Waitpoint { id String @id @default(cuid()) + friendlyId String @unique + type WaitpointType status WaitpointStatus @default(PENDING) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 554db42622..96b6e614d2 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -35,6 +35,7 @@ import { TaskRunExecutionSnapshot, TaskRunExecutionStatus, TaskRunStatus, + Waitpoint, } from "@trigger.dev/database"; import assertNever from "assert-never"; import { Redis } from "ioredis"; @@ -1378,7 +1379,7 @@ export class RunEngine { }); } - async lengthOfEnvQueue(environment: MinimalAuthenticatedEnvironment) { + async lengthOfEnvQueue(environment: MinimalAuthenticatedEnvironment): Promise { return this.runQueue.lengthOfEnvQueue(environment); } @@ -1393,7 +1394,7 @@ export class RunEngine { environmentId: string; projectId: string; idempotencyKey?: string; - }) { + }): Promise { const existingWaitpoint = idempotencyKey ? await this.prisma.waitpoint.findUnique({ where: { @@ -1411,6 +1412,7 @@ export class RunEngine { return this.prisma.waitpoint.create({ data: { + friendlyId: generateFriendlyId("waitpoint"), type: "MANUAL", idempotencyKey: idempotencyKey ?? nanoid(24), userProvidedIdempotencyKey: !!idempotencyKey, @@ -2418,6 +2420,7 @@ export class RunEngine { ) { return tx.waitpoint.create({ data: { + friendlyId: generateFriendlyId("waitpoint"), type: "RUN", status: "PENDING", idempotencyKey: nanoid(24), @@ -2440,6 +2443,7 @@ export class RunEngine { ) { const waitpoint = await tx.waitpoint.create({ data: { + friendlyId: generateFriendlyId("waitpoint"), type: "DATETIME", status: "PENDING", idempotencyKey: idempotencyKey ?? nanoid(24), diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 0a11e4cb95..b3996d0491 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -324,6 +324,7 @@ describe("RunEngine Waitpoints", () => { environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, }); + expect(waitpoint.status).toBe("PENDING"); //block the run await engine.blockRunWithWaitpoint({ From 04dc85fa07ed909db1297b30dfd0abfb4f9280e5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 18:50:30 +0000 Subject: [PATCH 191/485] A couple more return types --- .../run-engine/src/engine/index.ts | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 96b6e614d2..43b847a88c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1430,7 +1430,7 @@ export class RunEngine { environmentId: string; projectId: string; waitpointId: string; - }) { + }): Promise { const waitpoint = await this.prisma.waitpoint.findFirst({ where: { id: waitpointId }, include: { @@ -1531,7 +1531,7 @@ export class RunEngine { type?: string; isError: boolean; }; - }) { + }): Promise { const waitpoint = await this.prisma.waitpoint.findUnique({ where: { id }, }); @@ -1540,7 +1540,7 @@ export class RunEngine { throw new Error(`Waitpoint ${id} not found`); } - await $transaction( + const updatedWaitpoint = await $transaction( this.prisma, async (tx) => { // 1. Find the TaskRuns associated with this waitpoint @@ -1561,7 +1561,7 @@ export class RunEngine { }); // 3. Update the waitpoint status - await tx.waitpoint.update({ + const updatedWaitpoint = await tx.waitpoint.update({ where: { id }, data: { status: "COMPLETED", @@ -1611,6 +1611,8 @@ export class RunEngine { for (const run of taskRunsToResume) { await this.#continueRun(run, run.runtimeEnvironment, tx); } + + return updatedWaitpoint; }, (error) => { this.logger.error(`Error completing waitpoint ${id}, retrying`, { error }); @@ -1618,6 +1620,12 @@ export class RunEngine { }, { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } ); + + if (!updatedWaitpoint) { + throw new Error(`Waitpoint couldn't be updated`); + } + + return updatedWaitpoint; } async createCheckpoint({ @@ -1654,7 +1662,8 @@ export class RunEngine { /** Send a heartbeat to signal the the run is still executing. If a heartbeat isn't received, after a while the run is considered "stalled" - and some logic will be run to try recover it + and some logic will be run to try recover it. + @returns The ExecutionResult, which could be a different snapshot. */ async heartbeatRun({ runId, @@ -1664,7 +1673,7 @@ export class RunEngine { runId: string; snapshotId: string; tx?: PrismaClientOrTransaction; - }) { + }): Promise { const prisma = tx ?? this.prisma; //we don't need to acquire a run lock for any of this, it's not critical if it happens on an older version @@ -1677,7 +1686,7 @@ export class RunEngine { }); await this.worker.ack(`heartbeatSnapshot.${snapshotId}`); - return; + return executionResultFromSnapshot(latestSnapshot); } //update the snapshot heartbeat time @@ -1690,6 +1699,8 @@ export class RunEngine { //extending is the same as creating a new heartbeat await this.#setHeartbeatDeadline({ runId, snapshotId, status: latestSnapshot.executionStatus }); + + return executionResultFromSnapshot(latestSnapshot); } /** Get required data to execute the run */ From 808a6f9bfa21fce77de3b5b4d80fd79183f99a48 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 13 Nov 2024 18:54:35 +0000 Subject: [PATCH 192/485] Organized imports --- .../run-engine/src/engine/tests/batchTrigger.test.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index aaa631f6b8..7e1c756de2 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -2,13 +2,11 @@ import { containerTest, setupAuthenticatedEnvironment, setupBackgroundWorker, - assertNonNullable, } from "@internal/testcontainers"; import { trace } from "@opentelemetry/api"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; -import { EventBusEventArgs } from "../eventBus.js"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; describe("RunEngine batchTrigger", () => { containerTest( From c71ac305ee7fb68f00c6bb58d1b3d6f0689df03b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 12:22:04 +0000 Subject: [PATCH 193/485] Added a test for blocking a second parent with the same child run --- .../src/engine/tests/triggerAndWait.test.ts | 261 ++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 97122eb3f7..028d1bd6a8 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -190,4 +190,265 @@ describe("RunEngine triggerAndWait", () => { engine.quit(); } }); + + /** This happens if you `triggerAndWait` with an idempotencyKey if that run is in progress */ + containerTest( + "triggerAndWait two runs with shared awaited child", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //trigger the run + const parentRun1 = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue parent and create the attempt + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun1.masterQueue, + maxRunCount: 10, + }); + const attemptResult = await engine.startRunAttempt({ + runId: parentRun1.id, + snapshotId: dequeued[0].snapshot.id, + }); + + //trigger the child + const childRun = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun1.id, + }, + prisma + ); + + const childExecutionData = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionData); + expect(childExecutionData.snapshot.executionStatus).toBe("QUEUED"); + + const parentExecutionData = await engine.getRunExecutionData({ runId: parentRun1.id }); + assertNonNullable(parentExecutionData); + expect(parentExecutionData.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun1.id, + }, + include: { + waitpoint: true, + }, + }); + assertNonNullable(runWaitpoint); + expect(runWaitpoint.waitpoint.type).toBe("RUN"); + expect(runWaitpoint.waitpoint.completedByTaskRunId).toBe(childRun.id); + + //dequeue the child run + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: childRun.masterQueue, + maxRunCount: 10, + }); + + //start the child run + const childAttempt = await engine.startRunAttempt({ + runId: childRun.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + + //trigger a second parent run + const parentRun2 = await engine.trigger( + { + number: 2, + friendlyId: "run_p1235", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12346", + spanId: "s12346", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + //dequeue 2nd parent + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun2.masterQueue, + maxRunCount: 10, + }); + + //create the 2nd parent attempt + const attemptResultParent2 = await engine.startRunAttempt({ + runId: parentRun2.id, + snapshotId: dequeued2[0].snapshot.id, + }); + + //block the 2nd parent run with the child + const childRunWithWaitpoint = await prisma.taskRun.findUniqueOrThrow({ + where: { id: childRun.id }, + include: { + associatedWaitpoint: true, + }, + }); + const blockedResult = await engine.blockRunWithWaitpoint({ + runId: parentRun2.id, + waitpointId: childRunWithWaitpoint.associatedWaitpoint!.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.project.id, + tx: prisma, + }); + expect(blockedResult.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + const parent2ExecutionData = await engine.getRunExecutionData({ runId: parentRun2.id }); + assertNonNullable(parent2ExecutionData); + expect(parent2ExecutionData.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + // complete the child run + await engine.completeRunAttempt({ + runId: childRun.id, + snapshotId: childAttempt.snapshot.id, + completion: { + id: childRun.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); + + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: childRun.id }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + + const waitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: runWaitpoint.waitpointId, + }, + }); + expect(waitpointAfter?.completedAt).not.toBeNull(); + expect(waitpointAfter?.status).toBe("COMPLETED"); + expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); + + const parent1RunWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun1.id, + }, + include: { + waitpoint: true, + }, + }); + expect(parent1RunWaitpointAfter).toBeNull(); + + const parent2RunWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: parentRun2.id, + }, + include: { + waitpoint: true, + }, + }); + expect(parent2RunWaitpointAfter).toBeNull(); + + //parent snapshot + const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun1.id }); + assertNonNullable(parentExecutionDataAfter); + expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); + expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); + expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + childRun.id + ); + expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); + + //parent 2 snapshot + const parent2ExecutionDataAfter = await engine.getRunExecutionData({ + runId: parentRun2.id, + }); + assertNonNullable(parent2ExecutionDataAfter); + expect(parent2ExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); + expect(parent2ExecutionDataAfter.completedWaitpoints?.length).toBe(1); + expect(parent2ExecutionDataAfter.completedWaitpoints![0].id).toBe( + childRunWithWaitpoint.associatedWaitpoint!.id + ); + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + childRun.id + ); + expect(parent2ExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); + } finally { + engine.quit(); + } + } + ); }); From 6c3c9f0b3cac25667f342447d9b404bcae81ad00 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 12:22:39 +0000 Subject: [PATCH 194/485] Modified triggerTaskV2 to work with the new parentRunId/resumeParentOnCompletion --- .../app/v3/services/triggerTaskV2.server.ts | 165 ++++-------------- packages/core/src/v3/schemas/api.ts | 20 +++ 2 files changed, 57 insertions(+), 128 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index a93c4a669f..3c4eff6d40 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -50,13 +50,14 @@ export class TriggerTaskServiceV2 extends WithRunEngine { : body.options?.ttl ?? (environment.type === "DEVELOPMENT" ? "10m" : undefined); const existingRun = idempotencyKey - ? await this._prisma.taskRun.findUnique({ + ? await this._prisma.taskRun.findFirst({ where: { - runtimeEnvironmentId_taskIdentifier_idempotencyKey: { - runtimeEnvironmentId: environment.id, - idempotencyKey, - taskIdentifier: taskId, - }, + runtimeEnvironmentId: environment.id, + idempotencyKey, + taskIdentifier: taskId, + }, + include: { + associatedWaitpoint: true, }, }) : undefined; @@ -64,6 +65,21 @@ export class TriggerTaskServiceV2 extends WithRunEngine { if (existingRun) { span.setAttribute("runId", existingRun.friendlyId); + //We're using `andWait` so we need to block the parent run with a waitpoint + if ( + existingRun.associatedWaitpoint?.status === "PENDING" && + body.options?.resumeParentOnCompletion && + body.options?.parentRunId + ) { + await this._engine.blockRunWithWaitpoint({ + runId: body.options.parentRunId, + waitpointId: existingRun.associatedWaitpoint.id, + environmentId: environment.id, + projectId: environment.projectId, + tx: this._prisma, + }); + } + return existingRun; } @@ -119,124 +135,23 @@ export class TriggerTaskServiceV2 extends WithRunEngine { ) : undefined; - const dependentAttempt = body.options?.dependentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.dependentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, + //todo we will pass in the `parentRun` and `resumeParentOnCompletion` + const parentRun = body.options?.parentRunId + ? await this._prisma.taskRun.findFirst({ + where: { id: body.options.parentRunId }, }) : undefined; - if ( - dependentAttempt && - (isFinalAttemptStatus(dependentAttempt.status) || - isFinalRunStatus(dependentAttempt.taskRun.status)) - ) { - logger.debug("Dependent attempt or run is in a terminal state", { - dependentAttempt: dependentAttempt, + if (parentRun && isFinalRunStatus(parentRun.status)) { + logger.debug("Parent run is in a terminal state", { + parentRun, }); - if (isFinalAttemptStatus(dependentAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentAttempt.taskRun.status}` - ); - } - } - - const parentAttempt = body.options?.parentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.parentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }) - : undefined; - - const dependentBatchRun = body.options?.dependentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.dependentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }, - }, - }) - : undefined; - - if ( - dependentBatchRun && - dependentBatchRun.dependentTaskAttempt && - (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status) || - isFinalRunStatus(dependentBatchRun.dependentTaskAttempt.taskRun.status)) - ) { - logger.debug("Dependent batch run task attempt or run has been canceled", { - dependentBatchRunId: dependentBatchRun.id, - status: dependentBatchRun.status, - attempt: dependentBatchRun.dependentTaskAttempt, - }); - - if (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentBatchRun.dependentTaskAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentBatchRun.dependentTaskAttempt.taskRun.status}` - ); - } + throw new ServiceValidationError( + `Cannot trigger ${taskId} as the parent run has a status of ${parentRun.status}` + ); } - const parentBatchRun = body.options?.parentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.parentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - }, - }, - }, - }, - }, - }) - : undefined; - return await eventRepository.traceEvent( taskId, { @@ -304,13 +219,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } } - const depth = dependentAttempt - ? dependentAttempt.taskRun.depth + 1 - : parentAttempt - ? parentAttempt.taskRun.depth + 1 - : dependentBatchRun?.dependentTaskAttempt - ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 - : 0; + const depth = parentRun ? parentRun.depth + 1 : 0; event.setAttribute("runId", runFriendlyId); span.setAttribute("runId", runFriendlyId); @@ -357,10 +266,10 @@ export class TriggerTaskServiceV2 extends WithRunEngine { maxAttempts: body.options?.maxAttempts, ttl, tags: tagIds, - parentTaskRunId: parentAttempt?.taskRun.id, - rootTaskRunId: parentAttempt?.taskRun.rootTaskRunId ?? parentAttempt?.taskRun.id, - batchId: dependentBatchRun?.id ?? parentBatchRun?.id, - resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), + parentTaskRunId: parentRun?.id, + rootTaskRunId: parentRun?.rootTaskRunId ?? undefined, + batchId: body.options?.parentBatch ?? undefined, + resumeParentOnCompletion: body.options?.resumeParentOnCompletion, depth, metadata: metadataPacket?.data, metadataType: metadataPacket?.dataType, diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index edeee39039..37cc65617e 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -68,10 +68,30 @@ export const TriggerTaskRequestBody = z.object({ context: z.any(), options: z .object({ + /** @deprecated engine v1 only */ dependentAttempt: z.string().optional(), + /** @deprecated engine v1 only */ parentAttempt: z.string().optional(), + /** @deprecated engine v1 only */ dependentBatch: z.string().optional(), + /** + * If triggered in a batch, this is the BatchTaskRun id + */ parentBatch: z.string().optional(), + /** + * RunEngine v2 + * If triggered inside another run, the parentRunId is the id of the parent run. + */ + parentRunId: z.string().optional(), + /** + * RunEngine v2 + * Should be `true` if `triggerAndWait` or `batchTriggerAndWait` + */ + resumeParentOnCompletion: z.boolean().optional(), + /** + * Locks the version to the passed value. + * Automatically set when using `triggerAndWait` or `batchTriggerAndWait` + */ lockToVersion: z.string().optional(), queue: QueueOptions.optional(), concurrencyKey: z.string().optional(), From 81188ebe142f04681ccb01a440b9cefe3842a007 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 16:03:33 +0000 Subject: [PATCH 195/485] Added TaskRun priorityMs column --- internal-packages/database/prisma/schema.prisma | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index b35d4732cb..d47c405442 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1734,6 +1734,9 @@ model TaskRun { lockedToVersion BackgroundWorker? @relation(fields: [lockedToVersionId], references: [id]) lockedToVersionId String? + /// The "priority" of the run. This is just an offset in ms for the queue timestamp + priorityMs Int @default(0) + concurrencyKey String? delayUntil DateTime? From 5e2f70a4c4fc7f43a67f7aa849570f6b408f5a50 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 16:04:40 +0000 Subject: [PATCH 196/485] Migration for Waitpoint.friendlyId and TaskRun.priorityMs --- .../migration.sql | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql diff --git a/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql b/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql new file mode 100644 index 0000000000..344fd8f717 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql @@ -0,0 +1,15 @@ +/* + Warnings: + + - A unique constraint covering the columns `[friendlyId]` on the table `Waitpoint` will be added. If there are existing duplicate values, this will fail. + - Added the required column `friendlyId` to the `Waitpoint` table without a default value. This is not possible if the table is not empty. + +*/ +-- AlterTable +ALTER TABLE "TaskRun" ADD COLUMN "priorityMs" INTEGER NOT NULL DEFAULT 0; + +-- AlterTable +ALTER TABLE "Waitpoint" ADD COLUMN "friendlyId" TEXT NOT NULL; + +-- CreateIndex +CREATE UNIQUE INDEX "Waitpoint_friendlyId_key" ON "Waitpoint"("friendlyId"); From e3d729cfbc32a330a9c692117c34e587dccd3209 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 13 Nov 2024 21:27:55 +0000 Subject: [PATCH 197/485] save cli config in pretty format --- packages/cli-v3/src/utilities/configFiles.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli-v3/src/utilities/configFiles.ts b/packages/cli-v3/src/utilities/configFiles.ts index dc009ff57e..0428991af6 100644 --- a/packages/cli-v3/src/utilities/configFiles.ts +++ b/packages/cli-v3/src/utilities/configFiles.ts @@ -78,7 +78,7 @@ export function writeAuthConfigFile(config: UserAuthConfigFile) { mkdirSync(path.dirname(authConfigFilePath), { recursive: true, }); - writeFileSync(path.join(authConfigFilePath), JSON.stringify(config), { + writeFileSync(path.join(authConfigFilePath), JSON.stringify(config, undefined, 2), { encoding: "utf-8", }); } From f6047d5054633dd0a757df0fb6754d773022e581 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 13 Nov 2024 21:32:27 +0000 Subject: [PATCH 198/485] fix worker schema import --- packages/worker/src/schemas.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index 32955deeab..49c57433c1 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -1,6 +1,6 @@ import { z } from "zod"; import { - CompleteAttemptResult, + CompleteRunAttemptResult, DequeuedMessage, TaskRunExecutionResult, } from "@trigger.dev/core/v3"; @@ -50,7 +50,7 @@ export type WorkerApiRunAttemptCompleteRequestBody = z.infer< >; export const WorkerApiRunAttemptCompleteResponseBody = z.object({ - result: CompleteAttemptResult, + result: CompleteRunAttemptResult, }); export type WorkerApiRunAttemptCompleteResponseBody = z.infer< typeof WorkerApiRunAttemptCompleteResponseBody From 862339a1d425ac1f139af11501cae6458b65676c Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:31:00 +0000 Subject: [PATCH 199/485] cli profile switcher and other goodies --- .../app/routes/api.v1.deployments.latest.ts | 41 +++++ apps/webapp/app/routes/api.v1.workers.ts | 39 ++++- .../routeBuiilders/apiBuilder.server.ts | 134 +++++++++++++++- .../worker/workerGroupService.server.ts | 6 +- packages/cli-v3/src/apiClient.ts | 65 +++++++- packages/cli-v3/src/cli/common.ts | 8 +- packages/cli-v3/src/cli/index.ts | 4 + packages/cli-v3/src/commands/list-profiles.ts | 11 +- packages/cli-v3/src/commands/switch.ts | 89 +++++++++++ packages/cli-v3/src/commands/trigger.ts | 121 ++++++++++++++ .../cli-v3/src/commands/workers/create.ts | 135 ++++++++++++++++ packages/cli-v3/src/commands/workers/index.ts | 4 + packages/cli-v3/src/commands/workers/list.ts | 4 +- packages/cli-v3/src/commands/workers/run.ts | 151 ++++++++++++++++++ packages/cli-v3/src/utilities/configFiles.ts | 80 +++++++--- .../cli-v3/src/utilities/initialBanner.ts | 37 ++++- packages/core/src/v3/schemas/api.ts | 30 +++- 17 files changed, 905 insertions(+), 54 deletions(-) create mode 100644 apps/webapp/app/routes/api.v1.deployments.latest.ts create mode 100644 packages/cli-v3/src/commands/switch.ts create mode 100644 packages/cli-v3/src/commands/trigger.ts create mode 100644 packages/cli-v3/src/commands/workers/create.ts create mode 100644 packages/cli-v3/src/commands/workers/run.ts diff --git a/apps/webapp/app/routes/api.v1.deployments.latest.ts b/apps/webapp/app/routes/api.v1.deployments.latest.ts new file mode 100644 index 0000000000..6f31f58fcc --- /dev/null +++ b/apps/webapp/app/routes/api.v1.deployments.latest.ts @@ -0,0 +1,41 @@ +import { LoaderFunctionArgs, json } from "@remix-run/server-runtime"; +import { WorkerInstanceGroupType } from "@trigger.dev/database"; +import { prisma } from "~/db.server"; +import { authenticateApiRequest } from "~/services/apiAuth.server"; +import { logger } from "~/services/logger.server"; + +export async function loader({ request }: LoaderFunctionArgs) { + // Next authenticate the request + const authenticationResult = await authenticateApiRequest(request); + + if (!authenticationResult) { + logger.info("Invalid or missing api key", { url: request.url }); + return json({ error: "Invalid or Missing API key" }, { status: 401 }); + } + + const authenticatedEnv = authenticationResult.environment; + + const deployment = await prisma.workerDeployment.findFirst({ + where: { + type: WorkerInstanceGroupType.UNMANAGED, + environmentId: authenticatedEnv.id, + }, + orderBy: { + createdAt: "desc", + }, + }); + + if (!deployment) { + return json({ error: "Deployment not found" }, { status: 404 }); + } + + return json({ + id: deployment.friendlyId, + status: deployment.status, + contentHash: deployment.contentHash, + shortCode: deployment.shortCode, + version: deployment.version, + imageReference: deployment.imageReference, + errorData: deployment.errorData, + }); +} diff --git a/apps/webapp/app/routes/api.v1.workers.ts b/apps/webapp/app/routes/api.v1.workers.ts index 094fcf9e84..a6cc93527a 100644 --- a/apps/webapp/app/routes/api.v1.workers.ts +++ b/apps/webapp/app/routes/api.v1.workers.ts @@ -1,13 +1,20 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { ListWorkersResponseBody } from "@trigger.dev/core/v3"; -import { createLoaderApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; +import { + WorkersCreateRequestBody, + WorkersCreateResponseBody, + WorkersListResponseBody, +} from "@trigger.dev/core/v3"; +import { + createActionApiRoute, + createLoaderApiRoute, +} from "~/services/routeBuiilders/apiBuilder.server"; import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.server"; export const loader = createLoaderApiRoute( { corsStrategy: "all", }, - async ({ authentication }): Promise> => { + async ({ authentication }): Promise> => { const service = new WorkerGroupService(); const workers = await service.listWorkerGroups({ projectId: authentication.environment.projectId, @@ -24,3 +31,29 @@ export const loader = createLoaderApiRoute( ); } ); + +export const action = createActionApiRoute( + { + corsStrategy: "all", + body: WorkersCreateRequestBody, + }, + async ({ authentication, body }): Promise> => { + const service = new WorkerGroupService(); + const { workerGroup, token } = await service.createWorkerGroup({ + projectId: authentication.environment.projectId, + organizationId: authentication.environment.organizationId, + name: body.name, + description: body.description, + }); + + return json({ + token: { + plaintext: token.plaintext, + }, + workerGroup: { + name: workerGroup.name, + description: workerGroup.description, + }, + }); + } +); diff --git a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts index d47f7f7eec..695e1af6e7 100644 --- a/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts +++ b/apps/webapp/app/services/routeBuiilders/apiBuilder.server.ts @@ -20,10 +20,12 @@ import { type ApiKeyRouteBuilderOptions< TParamsSchema extends z.AnyZodObject | undefined = undefined, - TSearchParamsSchema extends z.AnyZodObject | undefined = undefined + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined > = { params?: TParamsSchema; searchParams?: TSearchParamsSchema; + body?: TBodySchema; allowJWT?: boolean; corsStrategy?: "all" | "none"; authorization?: { @@ -40,7 +42,8 @@ type ApiKeyRouteBuilderOptions< type ApiKeyHandlerFunction< TParamsSchema extends z.AnyZodObject | undefined, - TSearchParamsSchema extends z.AnyZodObject | undefined + TSearchParamsSchema extends z.AnyZodObject | undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined > = (args: { params: TParamsSchema extends z.AnyZodObject ? z.infer : undefined; searchParams: TSearchParamsSchema extends z.AnyZodObject @@ -48,6 +51,7 @@ type ApiKeyHandlerFunction< : undefined; authentication: ApiAuthenticationResult; request: Request; + body: TBodySchema extends z.AnyZodObject ? z.infer : undefined; }) => Promise; export function createLoaderApiRoute< @@ -139,6 +143,132 @@ export function createLoaderApiRoute< searchParams: parsedSearchParams, authentication: authenticationResult, request, + body: undefined, + }); + return wrapResponse(request, result, corsStrategy !== "none"); + } catch (error) { + console.error("Error in API route:", error); + if (error instanceof Response) { + return wrapResponse(request, error, corsStrategy !== "none"); + } + return wrapResponse( + request, + json({ error: "Internal Server Error" }, { status: 500 }), + corsStrategy !== "none" + ); + } + }; +} + +export function createActionApiRoute< + TParamsSchema extends z.AnyZodObject | undefined = undefined, + TSearchParamsSchema extends z.AnyZodObject | undefined = undefined, + TBodySchema extends z.AnyZodObject | undefined = undefined +>( + options: ApiKeyRouteBuilderOptions, + handler: ApiKeyHandlerFunction +) { + return async function loader({ request, params }: LoaderFunctionArgs) { + const { + params: paramsSchema, + searchParams: searchParamsSchema, + body: bodySchema, + allowJWT = false, + corsStrategy = "none", + authorization, + } = options; + + if (corsStrategy !== "none" && request.method.toUpperCase() === "OPTIONS") { + return apiCors(request, json({})); + } + + const authenticationResult = await authenticateApiRequest(request, { allowJWT }); + + if (!authenticationResult) { + return wrapResponse( + request, + json({ error: "Invalid or Missing API key" }, { status: 401 }), + corsStrategy !== "none" + ); + } + + let parsedParams: any = undefined; + if (paramsSchema) { + const parsed = paramsSchema.safeParse(params); + if (!parsed.success) { + return wrapResponse( + request, + json( + { error: "Params Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ), + corsStrategy !== "none" + ); + } + parsedParams = parsed.data; + } + + let parsedSearchParams: any = undefined; + if (searchParamsSchema) { + const searchParams = Object.fromEntries(new URL(request.url).searchParams); + const parsed = searchParamsSchema.safeParse(searchParams); + if (!parsed.success) { + return wrapResponse( + request, + json( + { error: "Query Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ), + corsStrategy !== "none" + ); + } + parsedSearchParams = parsed.data; + } + + let parsedBody: any = undefined; + if (bodySchema) { + const body = await request.clone().json(); + const parsed = bodySchema.safeParse(body); + if (!parsed.success) { + return wrapResponse( + request, + json( + { error: "Body Error", details: fromZodError(parsed.error).details }, + { status: 400 } + ), + corsStrategy !== "none" + ); + } + parsedBody = parsed.data; + } + + if (authorization) { + const { action, resource, superScopes } = authorization; + const $resource = resource(parsedParams, parsedSearchParams); + + logger.debug("Checking authorization", { + action, + resource: $resource, + superScopes, + scopes: authenticationResult.scopes, + }); + + if (!checkAuthorization(authenticationResult, action, $resource, superScopes)) { + return wrapResponse( + request, + json({ error: "Unauthorized" }, { status: 403 }), + corsStrategy !== "none" + ); + } + } + + try { + const result = await handler({ + params: parsedParams, + searchParams: parsedSearchParams, + authentication: authenticationResult, + request, + body: parsedBody, }); return wrapResponse(request, result, corsStrategy !== "none"); } catch (error) { diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index 836537e2c2..28a2bb139d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -12,13 +12,11 @@ export class WorkerGroupService extends WithRunEngine { organizationId, name, description, - type, }: { projectId?: string; organizationId?: string; name?: string; description?: string; - type?: WorkerInstanceGroupType; }) { name = name ?? (await this.generateWorkerName({ projectId })); @@ -32,9 +30,7 @@ export class WorkerGroupService extends WithRunEngine { data: { projectId, organizationId, - type: projectId - ? WorkerInstanceGroupType.UNMANAGED - : type ?? WorkerInstanceGroupType.SHARED, + type: projectId ? WorkerInstanceGroupType.UNMANAGED : WorkerInstanceGroupType.SHARED, masterQueue: this.generateMasterQueueName({ projectId, name }), tokenId: token.id, description, diff --git a/packages/cli-v3/src/apiClient.ts b/packages/cli-v3/src/apiClient.ts index 3349a68bc3..74d92ed303 100644 --- a/packages/cli-v3/src/apiClient.ts +++ b/packages/cli-v3/src/apiClient.ts @@ -20,7 +20,12 @@ import { FailDeploymentRequestBody, FailDeploymentResponseBody, FinalizeDeploymentRequestBody, - ListWorkersResponseBody, + WorkersListResponseBody, + WorkersCreateResponseBody, + WorkersCreateRequestBody, + TriggerTaskRequestBody, + TriggerTaskResponse, + GetLatestDeploymentResponseBody, } from "@trigger.dev/core/v3"; import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; @@ -303,22 +308,78 @@ export class CliApiClient { ); } + async triggerTaskRun(taskId: string, body?: TriggerTaskRequestBody) { + if (!this.accessToken) { + throw new Error("triggerTaskRun: No access token"); + } + + return wrapZodFetch(TriggerTaskResponse, `${this.apiURL}/api/v1/tasks/${taskId}/trigger`, { + method: "POST", + headers: { + Authorization: `Bearer ${this.accessToken}`, + Accept: "application/json", + }, + body: JSON.stringify(body ?? {}), + }); + } + get workers() { return { list: this.listWorkers.bind(this), + create: this.createWorker.bind(this), + }; + } + + get deployments() { + return { + unmanaged: { + latest: this.getLatestUnmanagedDeployment.bind(this), + }, }; } + private async getLatestUnmanagedDeployment() { + if (!this.accessToken) { + throw new Error("getLatestUnmanagedDeployment: No access token"); + } + + return wrapZodFetch( + GetLatestDeploymentResponseBody, + `${this.apiURL}/api/v1/deployments/latest`, + { + headers: { + Authorization: `Bearer ${this.accessToken}`, + Accept: "application/json", + }, + } + ); + } + private async listWorkers() { if (!this.accessToken) { throw new Error("listWorkers: No access token"); } - return wrapZodFetch(ListWorkersResponseBody, `${this.apiURL}/api/v1/workers`, { + return wrapZodFetch(WorkersListResponseBody, `${this.apiURL}/api/v1/workers`, { + headers: { + Authorization: `Bearer ${this.accessToken}`, + Accept: "application/json", + }, + }); + } + + private async createWorker(options: WorkersCreateRequestBody) { + if (!this.accessToken) { + throw new Error("createWorker: No access token"); + } + + return wrapZodFetch(WorkersCreateResponseBody, `${this.apiURL}/api/v1/workers`, { + method: "POST", headers: { Authorization: `Bearer ${this.accessToken}`, Accept: "application/json", }, + body: JSON.stringify(options), }); } } diff --git a/packages/cli-v3/src/cli/common.ts b/packages/cli-v3/src/cli/common.ts index cddde4736e..31fa09258c 100644 --- a/packages/cli-v3/src/cli/common.ts +++ b/packages/cli-v3/src/cli/common.ts @@ -7,20 +7,22 @@ import { fromZodError } from "zod-validation-error"; import { logger } from "../utilities/logger.js"; import { outro } from "@clack/prompts"; import { chalkError } from "../utilities/cliOutput.js"; +import { CLOUD_API_URL } from "../consts.js"; +import { readAuthConfigCurrentProfileName } from "../utilities/configFiles.js"; export const CommonCommandOptions = z.object({ apiUrl: z.string().optional(), logLevel: z.enum(["debug", "info", "log", "warn", "error", "none"]).default("log"), skipTelemetry: z.boolean().default(false), - profile: z.string().default("default"), + profile: z.string().default(readAuthConfigCurrentProfileName()), }); export type CommonCommandOptions = z.infer; export function commonOptions(command: Command) { return command - .option("--profile ", "The login profile to use", "default") - .option("-a, --api-url ", "Override the API URL", "https://api.trigger.dev") + .option("--profile ", "The login profile to use", readAuthConfigCurrentProfileName()) + .option("-a, --api-url ", "Override the API URL", CLOUD_API_URL) .option( "-l, --log-level ", "The CLI log level to use (debug, info, log, warn, error, none). This does not effect the log level of your trigger.dev tasks.", diff --git a/packages/cli-v3/src/cli/index.ts b/packages/cli-v3/src/cli/index.ts index a0b1b9dae7..8be6a60028 100644 --- a/packages/cli-v3/src/cli/index.ts +++ b/packages/cli-v3/src/cli/index.ts @@ -11,6 +11,8 @@ import { VERSION } from "../version.js"; import { configureDeployCommand } from "../commands/deploy.js"; import { installExitHandler } from "./common.js"; import { configureWorkersCommand } from "../commands/workers/index.js"; +import { configureSwitchProfilesCommand } from "../commands/switch.js"; +import { configureTriggerTaskCommand } from "../commands/trigger.js"; export const program = new Command(); @@ -26,7 +28,9 @@ configureDeployCommand(program); configureWhoamiCommand(program); configureLogoutCommand(program); configureListProfilesCommand(program); +configureSwitchProfilesCommand(program); configureUpdateCommand(program); configureWorkersCommand(program); +configureTriggerTaskCommand(program); installExitHandler(); diff --git a/packages/cli-v3/src/commands/list-profiles.ts b/packages/cli-v3/src/commands/list-profiles.ts index 0a0a0c34e9..ac0c7e12fd 100644 --- a/packages/cli-v3/src/commands/list-profiles.ts +++ b/packages/cli-v3/src/commands/list-profiles.ts @@ -7,7 +7,10 @@ import { readAuthConfigFile } from "../utilities/configFiles.js"; import { printInitialBanner } from "../utilities/initialBanner.js"; import { logger } from "../utilities/logger.js"; -const ListProfilesOptions = CommonCommandOptions; +const ListProfilesOptions = CommonCommandOptions.pick({ + logLevel: true, + skipTelemetry: true, +}); type ListProfilesOptions = z.infer; @@ -43,12 +46,12 @@ export async function listProfiles(options: ListProfilesOptions) { return; } - const profiles = Object.keys(authConfig); + const profileNames = Object.keys(authConfig.profiles); log.message("Profiles:"); - for (const profile of profiles) { - const profileConfig = authConfig[profile]; + for (const profile of profileNames) { + const profileConfig = authConfig.profiles[profile]; log.info(`${profile}${profileConfig?.apiUrl ? ` - ${chalkGrey(profileConfig.apiUrl)}` : ""}`); } diff --git a/packages/cli-v3/src/commands/switch.ts b/packages/cli-v3/src/commands/switch.ts new file mode 100644 index 0000000000..f62d94099c --- /dev/null +++ b/packages/cli-v3/src/commands/switch.ts @@ -0,0 +1,89 @@ +import { intro, isCancel, outro, select } from "@clack/prompts"; +import { Command } from "commander"; +import { z } from "zod"; +import { + CommonCommandOptions, + handleTelemetry, + OutroCommandError, + wrapCommandAction, +} from "../cli/common.js"; +import { chalkGrey } from "../utilities/cliOutput.js"; +import { readAuthConfigFile, writeAuthConfigCurrentProfileName } from "../utilities/configFiles.js"; +import { printInitialBanner } from "../utilities/initialBanner.js"; +import { logger } from "../utilities/logger.js"; +import { CLOUD_API_URL } from "../consts.js"; + +const SwitchProfilesOptions = CommonCommandOptions.pick({ + logLevel: true, + skipTelemetry: true, +}); + +type SwitchProfilesOptions = z.infer; + +export function configureSwitchProfilesCommand(program: Command) { + return program + .command("switch") + .description("Set your default CLI profile") + .option( + "-l, --log-level ", + "The CLI log level to use (debug, info, log, warn, error, none). This does not effect the log level of your trigger.dev tasks.", + "log" + ) + .option("--skip-telemetry", "Opt-out of sending telemetry") + .action(async (options) => { + await handleTelemetry(async () => { + await switchProfilesCommand(options); + }); + }); +} + +export async function switchProfilesCommand(options: unknown) { + return await wrapCommandAction("switch", SwitchProfilesOptions, options, async (opts) => { + await printInitialBanner(false); + return await switchProfiles(opts); + }); +} + +export async function switchProfiles(options: SwitchProfilesOptions) { + intro("Switch profiles"); + + const authConfig = readAuthConfigFile(); + + if (!authConfig) { + logger.info("No profiles found"); + return; + } + + const profileNames = Object.keys(authConfig.profiles).sort((a, b) => { + // Default profile should always be first + if (a === authConfig.currentProfile) return -1; + if (b === authConfig.currentProfile) return 1; + + return a.localeCompare(b); + }); + + const profileSelection = await select({ + message: "Please select a new profile", + initialValue: authConfig.currentProfile, + options: profileNames.map((profile) => ({ + value: profile, + hint: authConfig.profiles[profile]?.apiUrl + ? authConfig.profiles[profile].apiUrl === CLOUD_API_URL + ? undefined + : chalkGrey(authConfig.profiles[profile].apiUrl) + : undefined, + })), + }); + + if (isCancel(profileSelection)) { + throw new OutroCommandError(); + } + + writeAuthConfigCurrentProfileName(profileSelection); + + if (profileSelection === authConfig.currentProfile) { + outro(`No change made`); + } else { + outro(`Switched to ${profileSelection}`); + } +} diff --git a/packages/cli-v3/src/commands/trigger.ts b/packages/cli-v3/src/commands/trigger.ts new file mode 100644 index 0000000000..1bf6425d6a --- /dev/null +++ b/packages/cli-v3/src/commands/trigger.ts @@ -0,0 +1,121 @@ +import { intro, outro } from "@clack/prompts"; +import { Command } from "commander"; +import { z } from "zod"; +import { CommonCommandOptions, handleTelemetry, wrapCommandAction } from "../cli/common.js"; +import { printInitialBanner } from "../utilities/initialBanner.js"; +import { logger } from "../utilities/logger.js"; +import { resolve } from "path"; +import { loadConfig } from "../config.js"; +import { getProjectClient } from "../utilities/session.js"; +import { login } from "./login.js"; +import { chalkGrey, chalkLink, cliLink } from "../utilities/cliOutput.js"; + +const TriggerTaskOptions = CommonCommandOptions.extend({ + env: z.enum(["prod", "staging"]), + config: z.string().optional(), + projectRef: z.string().optional(), +}); + +type TriggerTaskOptions = z.infer; + +export function configureTriggerTaskCommand(program: Command) { + return program + .command("trigger") + .description("Trigger a task") + .argument("[task-name]", "The name of the task") + .option( + "-l, --log-level ", + "The CLI log level to use (debug, info, log, warn, error, none). This does not effect the log level of your trigger.dev tasks.", + "log" + ) + .option("--skip-telemetry", "Opt-out of sending telemetry") + .option( + "-e, --env ", + "Deploy to a specific environment (currently only prod and staging are supported)", + "prod" + ) + .option("-c, --config ", "The name of the config file, found at [path]") + .option( + "-p, --project-ref ", + "The project ref. Required if there is no config file. This will override the project specified in the config file." + ) + .action(async (path, options) => { + await handleTelemetry(async () => { + await triggerTaskCommand(path, options); + }); + }); +} + +export async function triggerTaskCommand(taskName: string, options: unknown) { + return await wrapCommandAction("trigger", TriggerTaskOptions, options, async (opts) => { + await printInitialBanner(false); + return await triggerTask(taskName, opts); + }); +} + +export async function triggerTask(taskName: string, options: TriggerTaskOptions) { + if (!taskName) { + throw new Error("You must provide a task name"); + } + + intro(`Triggering task ${taskName}`); + + const authorization = await login({ + embedded: true, + defaultApiUrl: options.apiUrl, + profile: options.profile, + silent: true, + }); + + if (!authorization.ok) { + if (authorization.error === "fetch failed") { + throw new Error( + `Failed to connect to ${authorization.auth?.apiUrl}. Are you sure it's the correct URL?` + ); + } else { + throw new Error( + `You must login first. Use the \`login\` CLI command.\n\n${authorization.error}` + ); + } + } + + const projectPath = resolve(process.cwd(), "."); + + const resolvedConfig = await loadConfig({ + cwd: projectPath, + overrides: { project: options.projectRef }, + configFile: options.config, + }); + + logger.debug("Resolved config", resolvedConfig); + + const projectClient = await getProjectClient({ + accessToken: authorization.auth.accessToken, + apiUrl: authorization.auth.apiUrl, + projectRef: resolvedConfig.project, + env: options.env, + profile: options.profile, + }); + + if (!projectClient) { + throw new Error("Failed to get project client"); + } + + const triggered = await projectClient.client.triggerTaskRun(taskName, { + payload: { + message: "Triggered by CLI", + }, + }); + + if (!triggered.success) { + throw new Error("Failed to trigger task"); + } + + const baseUrl = `${authorization.dashboardUrl}/projects/v3/${resolvedConfig.project}`; + const runUrl = `${baseUrl}/runs/${triggered.data.id}`; + + const pipe = chalkGrey("|"); + const link = chalkLink(cliLink("View run", runUrl)); + + outro(`Success! ${pipe} ${link}`); +} diff --git a/packages/cli-v3/src/commands/workers/create.ts b/packages/cli-v3/src/commands/workers/create.ts new file mode 100644 index 0000000000..d2e927bf07 --- /dev/null +++ b/packages/cli-v3/src/commands/workers/create.ts @@ -0,0 +1,135 @@ +import { Command } from "commander"; +import { printStandloneInitialBanner } from "../../utilities/initialBanner.js"; +import { + CommonCommandOptions, + commonOptions, + handleTelemetry, + OutroCommandError, + wrapCommandAction, +} from "../../cli/common.js"; +import { login } from "../login.js"; +import { loadConfig } from "../../config.js"; +import { resolve } from "path"; +import { getProjectClient } from "../../utilities/session.js"; +import { logger } from "../../utilities/logger.js"; +import { z } from "zod"; +import { intro, isCancel, outro, text } from "@clack/prompts"; + +const WorkersCreateCommandOptions = CommonCommandOptions.extend({ + env: z.enum(["prod", "staging"]), + config: z.string().optional(), + projectRef: z.string().optional(), +}); +type WorkersCreateCommandOptions = z.infer; + +export function configureWorkersCreateCommand(program: Command) { + return commonOptions( + program + .command("create") + .description("List all available workers") + .argument("[path]", "The path to the project", ".") + .option( + "-e, --env ", + "Deploy to a specific environment (currently only prod and staging are supported)", + "prod" + ) + .option("-c, --config ", "The name of the config file, found at [path]") + .option( + "-p, --project-ref ", + "The project ref. Required if there is no config file. This will override the project specified in the config file." + ) + .action(async (path, options) => { + await handleTelemetry(async () => { + await printStandloneInitialBanner(true); + await workersCreateCommand(path, options); + }); + }) + ); +} + +async function workersCreateCommand(dir: string, options: unknown) { + return await wrapCommandAction( + "workerCreateCommand", + WorkersCreateCommandOptions, + options, + async (opts) => { + return await _workersCreateCommand(dir, opts); + } + ); +} + +async function _workersCreateCommand(dir: string, options: WorkersCreateCommandOptions) { + intro("Creating new worker group"); + + const authorization = await login({ + embedded: true, + defaultApiUrl: options.apiUrl, + profile: options.profile, + silent: true, + }); + + if (!authorization.ok) { + if (authorization.error === "fetch failed") { + throw new Error( + `Failed to connect to ${authorization.auth?.apiUrl}. Are you sure it's the correct URL?` + ); + } else { + throw new Error( + `You must login first. Use the \`login\` CLI command.\n\n${authorization.error}` + ); + } + } + + const projectPath = resolve(process.cwd(), dir); + + const resolvedConfig = await loadConfig({ + cwd: projectPath, + overrides: { project: options.projectRef }, + configFile: options.config, + }); + + logger.debug("Resolved config", resolvedConfig); + + const projectClient = await getProjectClient({ + accessToken: authorization.auth.accessToken, + apiUrl: authorization.auth.apiUrl, + projectRef: resolvedConfig.project, + env: options.env, + profile: options.profile, + }); + + if (!projectClient) { + throw new Error("Failed to get project client"); + } + + const name = await text({ + message: "What would you like to call the new worker?", + placeholder: "", + }); + + if (isCancel(name)) { + throw new OutroCommandError(); + } + + const description = await text({ + message: "What is the purpose of this worker?", + placeholder: "", + }); + + if (isCancel(description)) { + throw new OutroCommandError(); + } + + const newWorker = await projectClient.client.workers.create({ + name, + description, + }); + + if (!newWorker.success) { + throw new Error("Failed to create worker"); + } + + outro( + `Successfully created worker ${newWorker.data.workerGroup.name} with token ${newWorker.data.token.plaintext}` + ); +} diff --git a/packages/cli-v3/src/commands/workers/index.ts b/packages/cli-v3/src/commands/workers/index.ts index ad93e708e7..0cc3c7d84a 100644 --- a/packages/cli-v3/src/commands/workers/index.ts +++ b/packages/cli-v3/src/commands/workers/index.ts @@ -1,10 +1,14 @@ import { Command } from "commander"; import { configureWorkersListCommand } from "./list.js"; +import { configureWorkersCreateCommand } from "./create.js"; +import { configureWorkersRunCommand } from "./run.js"; export function configureWorkersCommand(program: Command) { const workers = program.command("workers").description("Subcommands for managing workers"); configureWorkersListCommand(workers); + configureWorkersCreateCommand(workers); + configureWorkersRunCommand(workers); return workers; } diff --git a/packages/cli-v3/src/commands/workers/list.ts b/packages/cli-v3/src/commands/workers/list.ts index cebe80b29c..47bab8c303 100644 --- a/packages/cli-v3/src/commands/workers/list.ts +++ b/packages/cli-v3/src/commands/workers/list.ts @@ -12,7 +12,7 @@ import { resolve } from "path"; import { getProjectClient } from "../../utilities/session.js"; import { logger } from "../../utilities/logger.js"; import { z } from "zod"; -import { intro, log } from "@clack/prompts"; +import { intro } from "@clack/prompts"; const WorkersListCommandOptions = CommonCommandOptions.extend({ env: z.enum(["prod", "staging"]), @@ -48,7 +48,7 @@ export function configureWorkersListCommand(program: Command) { async function workersListCommand(dir: string, options: unknown) { return await wrapCommandAction( - "workerBuildCommand", + "workerListCommand", WorkersListCommandOptions, options, async (opts) => { diff --git a/packages/cli-v3/src/commands/workers/run.ts b/packages/cli-v3/src/commands/workers/run.ts new file mode 100644 index 0000000000..f4e9ab7c55 --- /dev/null +++ b/packages/cli-v3/src/commands/workers/run.ts @@ -0,0 +1,151 @@ +import { Command } from "commander"; +import { printStandloneInitialBanner } from "../../utilities/initialBanner.js"; +import { + CommonCommandOptions, + commonOptions, + handleTelemetry, + wrapCommandAction, +} from "../../cli/common.js"; +import { login } from "../login.js"; +import { loadConfig } from "../../config.js"; +import { resolve } from "path"; +import { getProjectClient } from "../../utilities/session.js"; +import { logger } from "../../utilities/logger.js"; +import { z } from "zod"; +import { env } from "std-env"; +import { x } from "tinyexec"; + +const WorkersRunCommandOptions = CommonCommandOptions.extend({ + env: z.enum(["prod", "staging"]), + config: z.string().optional(), + projectRef: z.string().optional(), + token: z.string().default(env.TRIGGER_WORKER_TOKEN ?? ""), + network: z.enum(["default", "none", "host"]).default("default"), +}); +type WorkersRunCommandOptions = z.infer; + +export function configureWorkersRunCommand(program: Command) { + return commonOptions( + program + .command("run") + .description("Runs a worker locally") + .argument("[path]", "The path to the project", ".") + .option( + "-e, --env ", + "Deploy to a specific environment (currently only prod and staging are supported)", + "prod" + ) + .option("-c, --config ", "The name of the config file, found at [path]") + .option( + "-p, --project-ref ", + "The project ref. Required if there is no config file. This will override the project specified in the config file." + ) + .option("-t, --token ", "The worker token to use for authentication") + .option("--network ", "The networking mode for the container", "host") + .action(async (path, options) => { + await handleTelemetry(async () => { + await printStandloneInitialBanner(true); + await workersRunCommand(path, options); + }); + }) + ); +} + +async function workersRunCommand(dir: string, options: unknown) { + return await wrapCommandAction( + "workerRunCommand", + WorkersRunCommandOptions, + options, + async (opts) => { + return await _workersRunCommand(dir, opts); + } + ); +} + +async function _workersRunCommand(dir: string, options: WorkersRunCommandOptions) { + if (!options.token) { + throw new Error( + "You must provide a worker token to run a worker locally. Either use the `--token` flag or set the `TRIGGER_WORKER_TOKEN` environment variable." + ); + } + + logger.log("Running worker locally"); + + const authorization = await login({ + embedded: true, + defaultApiUrl: options.apiUrl, + profile: options.profile, + silent: true, + }); + + if (!authorization.ok) { + if (authorization.error === "fetch failed") { + throw new Error( + `Failed to connect to ${authorization.auth?.apiUrl}. Are you sure it's the correct URL?` + ); + } else { + throw new Error( + `You must login first. Use the \`login\` CLI command.\n\n${authorization.error}` + ); + } + } + + const projectPath = resolve(process.cwd(), dir); + + const resolvedConfig = await loadConfig({ + cwd: projectPath, + overrides: { project: options.projectRef }, + configFile: options.config, + }); + + logger.debug("Resolved config", resolvedConfig); + + const projectClient = await getProjectClient({ + accessToken: authorization.auth.accessToken, + apiUrl: authorization.auth.apiUrl, + projectRef: resolvedConfig.project, + env: options.env, + profile: options.profile, + }); + + if (!projectClient) { + throw new Error("Failed to get project client"); + } + + const deployment = await projectClient.client.deployments.unmanaged.latest(); + + if (!deployment.success) { + throw new Error("Failed to get latest deployment"); + } + + const { version, imageReference } = deployment.data; + + if (!imageReference) { + throw new Error("No image reference found for the latest deployment"); + } + + logger.log(`Version ${version}`); + logger.log(`Image: ${imageReference}`); + + const command = "docker"; + const args = [ + "run", + "--rm", + "--network", + options.network, + "-e", + `TRIGGER_WORKER_TOKEN=${options.token}`, + "-e", + `TRIGGER_API_URL=${authorization.auth.apiUrl}`, + imageReference, + ]; + + logger.debug(`Command: ${command} ${args.join(" ")}`); + logger.log(); // spacing + + const proc = x("docker", args); + + for await (const line of proc) { + logger.log(line); + } +} diff --git a/packages/cli-v3/src/utilities/configFiles.ts b/packages/cli-v3/src/utilities/configFiles.ts index 0428991af6..51720ec899 100644 --- a/packages/cli-v3/src/utilities/configFiles.ts +++ b/packages/cli-v3/src/utilities/configFiles.ts @@ -11,69 +11,101 @@ function getGlobalConfigFolderPath() { return configDir; } -//auth config file -export const UserAuthConfigSchema = z.object({ +export const DEFFAULT_PROFILE = "default"; + +const CliConfigProfileSettings = z.object({ accessToken: z.string().optional(), apiUrl: z.string().optional(), }); +type CliConfigProfileSettings = z.infer; -export type UserAuthConfig = z.infer; - -const UserAuthConfigFileSchema = z.record(UserAuthConfigSchema); +const OldCliConfigFile = z.record(CliConfigProfileSettings); +type OldCliConfigFile = z.infer; -type UserAuthConfigFile = z.infer; +const CliConfigFile = z.object({ + version: z.literal(2), + currentProfile: z.string().default(DEFFAULT_PROFILE), + profiles: z.record(CliConfigProfileSettings), +}); +type CliConfigFile = z.infer; function getAuthConfigFilePath() { return path.join(getGlobalConfigFolderPath(), "default.json"); } -export function writeAuthConfigProfile(config: UserAuthConfig, profile: string = "default") { - const existingConfig = readAuthConfigFile() || {}; +export function writeAuthConfigCurrentProfileName(profile: string) { + const existingConfig = readAuthConfigFile(); - existingConfig[profile] = config; + existingConfig.currentProfile = profile; writeAuthConfigFile(existingConfig); } -export function readAuthConfigProfile(profile: string = "default"): UserAuthConfig | undefined { - try { - const authConfigFilePath = getAuthConfigFilePath(); +export function readAuthConfigCurrentProfileName(): string { + const existingConfig = readAuthConfigFile(); + return existingConfig.currentProfile; +} - logger.debug(`Reading auth config file`, { authConfigFilePath }); +export function writeAuthConfigProfile( + settings: CliConfigProfileSettings, + profile: string = DEFFAULT_PROFILE +) { + const existingConfig = readAuthConfigFile(); - const json = readJSONFileSync(authConfigFilePath); - const parsed = UserAuthConfigFileSchema.parse(json); - return parsed[profile]; + existingConfig.profiles[profile] = settings; + + writeAuthConfigFile(existingConfig); +} + +export function readAuthConfigProfile( + profile: string = DEFFAULT_PROFILE +): CliConfigProfileSettings | undefined { + try { + const config = readAuthConfigFile(); + return config.profiles[profile]; } catch (error) { logger.debug(`Error reading auth config file: ${error}`); return undefined; } } -export function deleteAuthConfigProfile(profile: string = "default") { - const existingConfig = readAuthConfigFile() || {}; +export function deleteAuthConfigProfile(profile: string = DEFFAULT_PROFILE) { + const existingConfig = readAuthConfigFile(); - delete existingConfig[profile]; + delete existingConfig.profiles[profile]; writeAuthConfigFile(existingConfig); } -export function readAuthConfigFile(): UserAuthConfigFile | undefined { +export function readAuthConfigFile(): CliConfigFile { try { const authConfigFilePath = getAuthConfigFilePath(); logger.debug(`Reading auth config file`, { authConfigFilePath }); const json = readJSONFileSync(authConfigFilePath); - const parsed = UserAuthConfigFileSchema.parse(json); - return parsed; + + if ("currentProfile" in json) { + // This is the new format + const parsed = CliConfigFile.parse(json); + return parsed; + } + + // This is the old format and we need to convert it + const parsed = OldCliConfigFile.parse(json); + + return { + version: 2, + currentProfile: DEFFAULT_PROFILE, + profiles: parsed, + }; } catch (error) { logger.debug(`Error reading auth config file: ${error}`); - return undefined; + throw new Error(`Error reading auth config file: ${error}`); } } -export function writeAuthConfigFile(config: UserAuthConfigFile) { +export function writeAuthConfigFile(config: CliConfigFile) { const authConfigFilePath = getAuthConfigFilePath(); mkdirSync(path.dirname(authConfigFilePath), { recursive: true, diff --git a/packages/cli-v3/src/utilities/initialBanner.ts b/packages/cli-v3/src/utilities/initialBanner.ts index 034f483291..20f4735597 100644 --- a/packages/cli-v3/src/utilities/initialBanner.ts +++ b/packages/cli-v3/src/utilities/initialBanner.ts @@ -4,9 +4,32 @@ import { VERSION } from "../version.js"; import { chalkGrey, chalkRun, chalkTask, chalkWorker, logo } from "./cliOutput.js"; import { logger } from "./logger.js"; import { spinner } from "./windows.js"; +import { + DEFFAULT_PROFILE, + readAuthConfigCurrentProfileName, + readAuthConfigProfile, +} from "./configFiles.js"; +import { CLOUD_API_URL } from "../consts.js"; + +function getProfileInfo() { + const currentProfile = readAuthConfigCurrentProfileName(); + const profile = readAuthConfigProfile(currentProfile); + + if (currentProfile === DEFFAULT_PROFILE || !profile) { + return; + } + + return `Profile: ${currentProfile}${ + profile.apiUrl === CLOUD_API_URL ? "" : ` - ${profile.apiUrl}` + }`; +} export async function printInitialBanner(performUpdateCheck = true) { - const text = `\n${logo()} ${chalkGrey(`(${VERSION})`)}\n`; + const profileInfo = getProfileInfo(); + + const text = `\n${logo()} ${chalkGrey(`(${VERSION})`)}${ + profileInfo ? chalkGrey(` | ${profileInfo}`) : "" + }\n`; logger.info(text); @@ -40,19 +63,23 @@ After installation, run Trigger.dev with \`npx trigger.dev\`.` } export async function printStandloneInitialBanner(performUpdateCheck = true) { + const profileInfo = getProfileInfo(); + const profileText = profileInfo ? chalkGrey(` | ${profileInfo}`) : ""; + + let versionText = `\n${logo()} ${chalkGrey(`(${VERSION})`)}`; + if (performUpdateCheck) { const maybeNewVersion = await updateCheck(); // Log a slightly more noticeable message if this is a major bump if (maybeNewVersion !== undefined) { - logger.log(`\n${logo()} ${chalkGrey(`(${VERSION} -> ${chalk.green(maybeNewVersion)})`)}`); + versionText = `\n${logo()} ${chalkGrey(`(${VERSION} -> ${chalk.green(maybeNewVersion)})`)}`; } else { - logger.log(`\n${logo()} ${chalkGrey(`(${VERSION})`)}`); + versionText = `\n${logo()} ${chalkGrey(`(${VERSION})`)}`; } - } else { - logger.log(`\n${logo()} ${chalkGrey(`(${VERSION})`)}`); } + logger.log(`${versionText}${profileText}`); logger.log(`${chalkGrey("-".repeat(54))}`); } diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 37cc65617e..ccaabc5378 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -273,7 +273,16 @@ export const GetDeploymentResponseBody = z.object({ export type GetDeploymentResponseBody = z.infer; -export const ListWorkersResponseBody = z +export const GetLatestDeploymentResponseBody = GetDeploymentResponseBody.omit({ + worker: true, +}); +export type GetLatestDeploymentResponseBody = z.infer; + +export const CreateUploadPayloadUrlResponseBody = z.object({ + presignedUrl: z.string(), +}); + +export const WorkersListResponseBody = z .object({ type: z.string(), name: z.string(), @@ -284,11 +293,24 @@ export const ListWorkersResponseBody = z updatedAt: z.coerce.date(), }) .array(); -export type ListWorkersResponseBody = z.infer; +export type WorkersListResponseBody = z.infer; -export const CreateUploadPayloadUrlResponseBody = z.object({ - presignedUrl: z.string(), +export const WorkersCreateRequestBody = z.object({ + name: z.string().optional(), + description: z.string().optional(), +}); +export type WorkersCreateRequestBody = z.infer; + +export const WorkersCreateResponseBody = z.object({ + workerGroup: z.object({ + name: z.string(), + description: z.string().nullish(), + }), + token: z.object({ + plaintext: z.string(), + }), }); +export type WorkersCreateResponseBody = z.infer; export type CreateUploadPayloadUrlResponseBody = z.infer; From 31993c47fa1ebb6bd3d015a3cab18964f6908980 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:31:32 +0000 Subject: [PATCH 200/485] add missing node-22 cases --- packages/core/src/v3/build/runtime.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/core/src/v3/build/runtime.ts b/packages/core/src/v3/build/runtime.ts index 2504d1e3a0..d03a49c484 100644 --- a/packages/core/src/v3/build/runtime.ts +++ b/packages/core/src/v3/build/runtime.ts @@ -7,6 +7,7 @@ export const DEFAULT_RUNTIME = "node" satisfies BuildRuntime; export function binaryForRuntime(runtime: BuildRuntime): string { switch (runtime) { case "node": + case "node-22": return "node"; case "bun": return "bun"; @@ -18,6 +19,7 @@ export function binaryForRuntime(runtime: BuildRuntime): string { export function execPathForRuntime(runtime: BuildRuntime): string { switch (runtime) { case "node": + case "node-22": return process.execPath; case "bun": if (typeof process.env.BUN_INSTALL === "string") { From a9db8c3207e53aad9c3a680dcf47198b1a5da9d5 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 16:18:48 +0000 Subject: [PATCH 201/485] Better description of the priorityMs column --- internal-packages/database/prisma/schema.prisma | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index d47c405442..00eb62d5ef 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1734,7 +1734,8 @@ model TaskRun { lockedToVersion BackgroundWorker? @relation(fields: [lockedToVersionId], references: [id]) lockedToVersionId String? - /// The "priority" of the run. This is just an offset in ms for the queue timestamp + /// The "priority" of the run. This is just a negative offset in ms for the queue timestamp + /// E.g. a value of 60_000 would put the run into the queue 60s ago. priorityMs Int @default(0) concurrencyKey String? From 4f3c51da0a29211d0e6864f72862c3296dc042ab Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 16:19:24 +0000 Subject: [PATCH 202/485] Use the priority when enqueuing runs --- .../run-engine/src/engine/index.ts | 26 ++++++++++++++----- .../run-engine/src/engine/types.ts | 1 + 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 43b847a88c..1d23e49ce4 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -205,6 +205,7 @@ export class RunEngine { delayUntil, queuedAt, maxAttempts, + priorityMs, ttl, tags, parentTaskRunId, @@ -264,6 +265,7 @@ export class RunEngine { delayUntil, queuedAt, maxAttempts, + priorityMs, ttl, tags: tags.length === 0 @@ -414,7 +416,12 @@ export class RunEngine { //enqueue the run if it's not delayed if (!taskRun.delayUntil) { - await this.#enqueueRun({ run: taskRun, env: environment, tx: prisma }); + await this.#enqueueRun({ + run: taskRun, + env: environment, + timestamp: Date.now() - taskRun.priorityMs, + tx: prisma, + }); } }); @@ -2210,7 +2217,7 @@ export class RunEngine { }: { run: TaskRun; env: MinimalAuthenticatedEnvironment; - timestamp?: number; + timestamp: number; tx?: PrismaClientOrTransaction; }) { const prisma = tx ?? this.prisma; @@ -2241,7 +2248,7 @@ export class RunEngine { environmentType: env.type, queue: run.queue, concurrencyKey: run.concurrencyKey ?? undefined, - timestamp: timestamp ?? Date.now(), + timestamp, attempt: 0, }, }); @@ -2341,9 +2348,14 @@ export class RunEngine { completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), }); - //put it back in the queue, with the original timestamp - //this will prioritise it over new runs - await this.#enqueueRun({ run, env, timestamp: run.createdAt.getTime(), tx: prisma }); + //put it back in the queue, with the original timestamp (w/ priority) + //this prioritizes dequeuing waiting runs over new runs + await this.#enqueueRun({ + run, + env, + timestamp: run.createdAt.getTime() - run.priorityMs, + tx: prisma, + }); } }); } @@ -2408,7 +2420,7 @@ export class RunEngine { env: backgroundWorker.runtimeEnvironment, //add to the queue using the original run created time //this should ensure they're in the correct order in the queue - timestamp: updatedRun.createdAt.getTime(), + timestamp: updatedRun.createdAt.getTime() - updatedRun.priorityMs, tx, }); }); diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index c6d598690f..fa7e368734 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -60,6 +60,7 @@ export type TriggerParams = { delayUntil?: Date; queuedAt?: Date; maxAttempts?: number; + priorityMs?: number; ttl?: string; tags: string[]; parentTaskRunId?: string; From 683ceb44c002b118f71ef821b96eb0b377503629 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 14 Nov 2024 18:03:40 +0000 Subject: [PATCH 203/485] Added a test for priorities --- .../src/engine/tests/priority.test.ts | 144 ++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 internal-packages/run-engine/src/engine/tests/priority.test.ts diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts new file mode 100644 index 0000000000..a0df381a71 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -0,0 +1,144 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { PrismaClientOrTransaction } from "@trigger.dev/database"; +import { MinimalAuthenticatedEnvironment } from "../../shared/index.js"; +import { setTimeout } from "timers/promises"; + +describe("RunEngine priority", () => { + containerTest( + "Two runs execute in the correct order", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //the order should be 4,3,1,0,2 + // 0 1 2 3 4 + const priorities = [undefined, 500, -1200, 1000, 4000]; + + //trigger the runs + const runs = await triggerRuns({ + engine, + environment: authenticatedEnvironment, + taskIdentifier, + prisma, + priorities, + }); + expect(runs.length).toBe(priorities.length); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(priorities.length); + + //dequeue (expect 4 items because of the negative priority) + const dequeue = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 20, + }); + expect(dequeue.length).toBe(4); + expect(dequeue[0].run.friendlyId).toBe(runs[4].friendlyId); + expect(dequeue[1].run.friendlyId).toBe(runs[3].friendlyId); + expect(dequeue[2].run.friendlyId).toBe(runs[1].friendlyId); + expect(dequeue[3].run.friendlyId).toBe(runs[0].friendlyId); + + //wait 2 seconds (because of the negative priority) + await setTimeout(2_000); + const dequeue2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 20, + }); + expect(dequeue2.length).toBe(1); + expect(dequeue2[0].run.friendlyId).toBe(runs[2].friendlyId); + } finally { + engine.quit(); + } + } + ); +}); + +async function triggerRuns({ + engine, + environment, + taskIdentifier, + priorities, + prisma, +}: { + engine: RunEngine; + environment: MinimalAuthenticatedEnvironment; + taskIdentifier: string; + prisma: PrismaClientOrTransaction; + priorities: (number | undefined)[]; +}) { + const runs = []; + for (let i = 0; i < priorities.length; i++) { + runs[i] = await engine.trigger( + { + number: i, + friendlyId: generateFriendlyId("run"), + environment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${taskIdentifier}`, + isTest: false, + tags: [], + priorityMs: priorities[i], + }, + prisma + ); + } + + return runs; +} From 8f87206bcbd0086ef63691fd7d3536db5e56f06b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 18 Nov 2024 17:32:07 +0000 Subject: [PATCH 204/485] create backup before migrating to new config format --- packages/cli-v3/src/utilities/configFiles.ts | 31 +++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/packages/cli-v3/src/utilities/configFiles.ts b/packages/cli-v3/src/utilities/configFiles.ts index 51720ec899..2a06358671 100644 --- a/packages/cli-v3/src/utilities/configFiles.ts +++ b/packages/cli-v3/src/utilities/configFiles.ts @@ -33,6 +33,11 @@ function getAuthConfigFilePath() { return path.join(getGlobalConfigFolderPath(), "default.json"); } +function getAuthConfigFileBackupPath() { + // Multiple calls won't overwrite old backups + return path.join(getGlobalConfigFolderPath(), `default.json.bak-${Date.now()}`); +} + export function writeAuthConfigCurrentProfileName(profile: string) { const existingConfig = readAuthConfigFile(); @@ -92,13 +97,21 @@ export function readAuthConfigFile(): CliConfigFile { } // This is the old format and we need to convert it - const parsed = OldCliConfigFile.parse(json); + const oldConfigFormat = OldCliConfigFile.parse(json); - return { + const newConfigFormat = { version: 2, currentProfile: DEFFAULT_PROFILE, - profiles: parsed, - }; + profiles: oldConfigFormat, + } satisfies CliConfigFile; + + // Save a backup + backupOldConfigFile(oldConfigFormat); + + // Then overwrite the old config with the new format + writeAuthConfigFile(newConfigFormat); + + return newConfigFormat; } catch (error) { logger.debug(`Error reading auth config file: ${error}`); throw new Error(`Error reading auth config file: ${error}`); @@ -114,3 +127,13 @@ export function writeAuthConfigFile(config: CliConfigFile) { encoding: "utf-8", }); } + +export function backupOldConfigFile(config: OldCliConfigFile) { + const authConfigFilePath = getAuthConfigFileBackupPath(); + mkdirSync(path.dirname(authConfigFilePath), { + recursive: true, + }); + writeFileSync(path.join(authConfigFilePath), JSON.stringify(config, undefined, 2), { + encoding: "utf-8", + }); +} From 4d5f9c04e17846d5d52d99033cd2bdf267a5e15a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 10:57:03 +0000 Subject: [PATCH 205/485] first eventbus handler to complete successful run spans --- apps/webapp/app/v3/runEngine.server.ts | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index d3552bd1dc..443cd0f5a3 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -3,13 +3,14 @@ import { prisma } from "~/db.server"; import { env } from "~/env.server"; import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; +import { eventRepository } from "./eventRepository.server"; export const engine = singleton("RunEngine", createRunEngine); export type { RunEngine }; function createRunEngine() { - return new RunEngine({ + const engine = new RunEngine({ prisma, redis: { port: env.REDIS_PORT, @@ -38,4 +39,17 @@ function createRunEngine() { }, tracer, }); + + engine.eventBus.on("runSucceeded", async ({ time, run }) => { + await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: false, + output: run.output, + outputType: run.outputType, + }, + }); + }); + + return engine; } From 5423968a936edfc847e2f38a571f0c9fbcff1faf Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:11:44 +0000 Subject: [PATCH 206/485] return env vars when starting run attempts --- .../worker/workerGroupTokenService.server.ts | 101 +++++++++++++++--- packages/core/src/v3/schemas/runEngine.ts | 10 +- packages/worker/src/schemas.ts | 11 +- 3 files changed, 102 insertions(+), 20 deletions(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index c4791a7f1d..866766ca56 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -2,13 +2,28 @@ import { customAlphabet } from "nanoid"; import { WithRunEngine, WithRunEngineOptions } from "../baseService.server"; import { createHash, timingSafeEqual } from "crypto"; import { logger } from "~/services/logger.server"; -import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/database"; +import { + RuntimeEnvironment, + WorkerInstanceGroup, + WorkerInstanceGroupType, +} from "@trigger.dev/database"; import { z } from "zod"; import { HEADER_NAME } from "@trigger.dev/worker"; -import { TaskRunExecutionResult, DequeuedMessage } from "@trigger.dev/core/v3"; +import { + TaskRunExecutionResult, + DequeuedMessage, + CompleteRunAttemptResult, + StartRunAttemptResult, + ExecutionResult, + ProdTaskRunExecutionPayload, + MachinePreset, +} from "@trigger.dev/core/v3"; import { env } from "~/env.server"; import { $transaction } from "~/db.server"; import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; +import { EnvironmentVariable } from "~/v3/environmentVariables/repository"; +import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; +import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; export class WorkerGroupTokenService extends WithRunEngine { private readonly tokenPrefix = "tr_wgt_"; @@ -192,6 +207,7 @@ export class WorkerGroupTokenService extends WithRunEngine { workerGroupId: workerGroup.id, workerInstanceId: workerInstance.id, masterQueue: workerGroup.masterQueue, + environment: null, }); } @@ -229,6 +245,7 @@ export class WorkerGroupTokenService extends WithRunEngine { environmentId: workerInstance.environmentId, deploymentId: workerInstance.deployment.id, backgroundWorkerId: workerInstance.deployment.workerId, + environment: workerInstance.environment, }); } @@ -253,6 +270,7 @@ export class WorkerGroupTokenService extends WithRunEngine { }, include: { deployment: true, + environment: true, }, }); @@ -281,6 +299,7 @@ export class WorkerGroupTokenService extends WithRunEngine { include: { // This will always be empty for shared worker instances, but required for types deployment: true, + environment: true, }, }); } @@ -401,6 +420,7 @@ export class WorkerGroupTokenService extends WithRunEngine { }, include: { deployment: true, + environment: true, }, }); @@ -432,6 +452,7 @@ export type AuthenticatedWorkerInstanceOptions = WithRunEngineOptions<{ environmentId?: string; deploymentId?: string; backgroundWorkerId?: string; + environment: RuntimeEnvironment | null; }>; export class AuthenticatedWorkerInstance extends WithRunEngine { @@ -439,7 +460,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { readonly workerGroupId: string; readonly workerInstanceId: string; readonly masterQueue: string; - readonly environmentId?: string; + readonly environment: RuntimeEnvironment | null; readonly deploymentId?: string; readonly backgroundWorkerId?: string; @@ -453,7 +474,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { this.workerGroupId = opts.workerGroupId; this.workerInstanceId = opts.workerInstanceId; this.masterQueue = opts.masterQueue; - this.environmentId = opts.environmentId; + this.environment = opts.environment; this.deploymentId = opts.deploymentId; this.backgroundWorkerId = opts.backgroundWorkerId; } @@ -467,7 +488,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } - if (!this.environmentId || !this.deploymentId || !this.backgroundWorkerId) { + if (!this.environment || !this.deploymentId || !this.backgroundWorkerId) { logger.error("[AuthenticatedWorkerInstance] Missing environment or deployment", { ...this.toJSON(), }); @@ -486,7 +507,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { if (this.isLatestDeployment) { return await this._engine.dequeueFromEnvironmentMasterQueue({ consumerId: this.workerInstanceId, - environmentId: this.environmentId, + environmentId: this.environment.id, maxRunCount, backgroundWorkerId: this.backgroundWorkerId, }); @@ -510,12 +531,42 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } - async heartbeatRun({ runId, snapshotId }: { runId: string; snapshotId: string }) { - // await this._engine.heartbeatRun({ runId, snapshotId }); + async heartbeatRun({ + runId, + snapshotId, + }: { + runId: string; + snapshotId: string; + }): Promise { + return await this._engine.heartbeatRun({ runId, snapshotId }); } - async startRunAttempt({ runId, snapshotId }: { runId: string; snapshotId: string }) { - return await this._engine.startRunAttempt({ runId, snapshotId }); + async startRunAttempt({ runId, snapshotId }: { runId: string; snapshotId: string }): Promise< + StartRunAttemptResult & { + envVars: Record; + } + > { + const engineResult = await this._engine.startRunAttempt({ runId, snapshotId }); + + const defaultMachinePreset = { + name: "small-1x", + cpu: 1, + memory: 1, + centsPerMs: 0, + } satisfies MachinePreset; + + const envVars = this.environment + ? await this.getEnvVars( + this.environment, + engineResult.run.id, + engineResult.execution.machine ?? defaultMachinePreset + ) + : {}; + + return { + ...engineResult, + envVars, + }; } async completeRunAttempt({ @@ -526,7 +577,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { runId: string; snapshotId: string; completion: TaskRunExecutionResult; - }) { + }): Promise { return await this._engine.completeRunAttempt({ runId, snapshotId, completion }); } @@ -545,10 +596,36 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { workerGroupId: this.workerGroupId, workerInstanceId: this.workerInstanceId, masterQueue: this.masterQueue, - environmentId: this.environmentId!, + environmentId: this.environment?.id!, deploymentId: this.deploymentId!, }; } + + private async getEnvVars( + environment: RuntimeEnvironment, + runId: string, + machinePreset: MachinePreset + ): Promise> { + const variables = await resolveVariablesForEnvironment(environment); + + const jwt = await generateJWTTokenForEnvironment(environment, { + run_id: runId, + machine_preset: machinePreset.name, + }); + + variables.push( + ...[ + { key: "TRIGGER_JWT", value: jwt }, + { key: "TRIGGER_RUN_ID", value: runId }, + { key: "TRIGGER_MACHINE_PRESET", value: machinePreset.name }, + ] + ); + + return variables.reduce((acc: Record, curr) => { + acc[curr.key] = curr.value; + return acc; + }, {}); + } } export type WorkerGroupTokenAuthenticationResponse = diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 4343befc47..7c60a45582 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -121,9 +121,12 @@ export const DequeuedMessage = z.object({ export type DequeuedMessage = z.infer; /** The response to the Worker when starting an attempt */ -export type StartRunAttemptResult = ExecutionResult & { - execution: TaskRunExecution; -}; +export const StartRunAttemptResult = ExecutionResult.and( + z.object({ + execution: TaskRunExecution, + }) +); +export type StartRunAttemptResult = z.infer; /** The response to the Worker when completing an attempt */ const CompleteAttemptStatus = z.enum([ @@ -132,6 +135,7 @@ const CompleteAttemptStatus = z.enum([ "RETRY_QUEUED", "RETRY_IMMEDIATELY", ]); +export type CompleteAttemptStatus = z.infer; export const CompleteRunAttemptResult = z .object({ diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index 49c57433c1..cf1d601b26 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -2,6 +2,7 @@ import { z } from "zod"; import { CompleteRunAttemptResult, DequeuedMessage, + StartRunAttemptResult, TaskRunExecutionResult, } from "@trigger.dev/core/v3"; @@ -32,11 +33,11 @@ export const WorkerApiDequeueResponseBody = DequeuedMessage.array(); export type WorkerApiDequeueResponseBody = z.infer; // Attempt start -export const WorkerApiRunAttemptStartResponseBody = z.object({ - run: z.any(), - snapshot: z.any(), - execution: z.any(), -}); +export const WorkerApiRunAttemptStartResponseBody = StartRunAttemptResult.and( + z.object({ + envVars: z.record(z.string()), + }) +); export type WorkerApiRunAttemptStartResponseBody = z.infer< typeof WorkerApiRunAttemptStartResponseBody >; From 3276d2b426fd4a525443a663ab0331437147fa94 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:14:10 +0000 Subject: [PATCH 207/485] add basic worker events --- packages/worker/src/events.ts | 46 ++++++++++++++++ packages/worker/src/index.ts | 1 + packages/worker/src/workerSession.ts | 82 ++++++++++++++++++---------- 3 files changed, 99 insertions(+), 30 deletions(-) create mode 100644 packages/worker/src/events.ts diff --git a/packages/worker/src/events.ts b/packages/worker/src/events.ts new file mode 100644 index 0000000000..f22221d621 --- /dev/null +++ b/packages/worker/src/events.ts @@ -0,0 +1,46 @@ +import { + DequeuedMessage, + StartRunAttemptResult, + TaskRunExecutionResult, +} from "@trigger.dev/core/v3"; + +export type WorkerEvents = { + runQueueMessage: [ + { + time: Date; + message: DequeuedMessage; + }, + ]; + requestRunAttemptStart: [ + { + time: Date; + run: { + id: string; + }; + snapshot: { + id: string; + }; + }, + ]; + runAttemptStarted: [ + { + time: Date; + } & StartRunAttemptResult & { + envVars: Record; + }, + ]; + runAttemptCompleted: [ + { + time: Date; + run: { + id: string; + }; + snapshot: { + id: string; + }; + completion: TaskRunExecutionResult; + }, + ]; +}; + +export type WorkerEventArgs = WorkerEvents[T]; diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts index bde2dd2de4..6eb70be782 100644 --- a/packages/worker/src/index.ts +++ b/packages/worker/src/index.ts @@ -2,3 +2,4 @@ export { VERSION as WORKER_VERSION } from "./version.js"; export * from "./consts.js"; export * from "./client/http.js"; export * from "./workerSession.js"; +export * from "./events.js"; diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/workerSession.ts index 853928e90e..8ffd33b820 100644 --- a/packages/worker/src/workerSession.ts +++ b/packages/worker/src/workerSession.ts @@ -4,12 +4,15 @@ import { WorkerClientCommonOptions } from "./client/types.js"; import { WorkerWebsocketClient } from "./client/websocket.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; import { RunQueueConsumer } from "./queueConsumer.js"; +import { WorkerEventArgs, WorkerEvents } from "./events.js"; +import EventEmitter from "events"; type WorkerSessionOptions = WorkerClientCommonOptions & { heartbeatIntervalSeconds?: number; + dequeueIntervalMs?: number; }; -export class WorkerSession { +export class WorkerSession extends EventEmitter { private readonly httpClient: WorkerHttpClient; private readonly websocketClient: WorkerWebsocketClient; private readonly queueConsumer: RunQueueConsumer; @@ -17,11 +20,14 @@ export class WorkerSession { private readonly heartbeatIntervalSeconds: number; constructor(private opts: WorkerSessionOptions) { + super(); + this.httpClient = new WorkerHttpClient(opts); this.websocketClient = new WorkerWebsocketClient(opts); this.queueConsumer = new RunQueueConsumer({ client: this.httpClient, onDequeue: this.onDequeue.bind(this), + intervalMs: opts.dequeueIntervalMs, }); // TODO: This should be dynamic and set by (or at least overridden by) the platform @@ -43,6 +49,9 @@ export class WorkerSession { console.error("[WorkerSession] Failed to send heartbeat", { error }); }, }); + + this.on("requestRunAttemptStart", this.onRequestRunAttemptStart.bind(this)); + this.on("runAttemptCompleted", this.onRunAttemptCompleted.bind(this)); } private async onDequeue(messages: WorkerApiDequeueResponseBody): Promise { @@ -50,42 +59,55 @@ export class WorkerSession { console.debug("[WorkerSession] Dequeued messages with contents", messages); for (const message of messages) { - console.log("[WorkerSession] Processing message", { message }); + console.log("[WorkerSession] Emitting message", { message }); + this.emit("runQueueMessage", { + time: new Date(), + message, + }); + } + } - const start = await this.httpClient.startRun(message.run.id, message.snapshot.id); + private async onRequestRunAttemptStart( + ...[{ time, run, snapshot }]: WorkerEventArgs<"requestRunAttemptStart"> + ): Promise { + console.log("[WorkerSession] onRequestRunAttemptStart", { time, run, snapshot }); - if (!start.success) { - console.error("[WorkerSession] Failed to start run", { error: start.error }); - continue; - } + const start = await this.httpClient.startRun(run.id, snapshot.id); - console.log("[WorkerSession] Started run", { - runId: start.data.run.id, - snapshot: start.data.snapshot.id, - }); + if (!start.success) { + console.error("[WorkerSession] Failed to start run", { error: start.error }); + return; + } - const complete = await this.httpClient.completeRun( - start.data.run.id, - start.data.snapshot.id, - { - completion: { - id: start.data.run.friendlyId, - ok: true, - outputType: "application/json", - }, - } - ); + console.log("[WorkerSession] Started run", { + runId: start.data.run.id, + snapshot: start.data.snapshot.id, + }); - if (!complete.success) { - console.error("[WorkerSession] Failed to complete run", { error: complete.error }); - continue; - } + this.emit("runAttemptStarted", { + time: new Date(), + ...start.data, + }); + } - console.log("[WorkerSession] Completed run", { - runId: start.data.run.id, - result: complete.data.result, - }); + private async onRunAttemptCompleted( + ...[{ time, run, snapshot, completion }]: WorkerEventArgs<"runAttemptCompleted"> + ): Promise { + console.log("[WorkerSession] onRunAttemptCompleted", { time, run, snapshot, completion }); + + const complete = await this.httpClient.completeRun(run.id, snapshot.id, { + completion: completion, + }); + + if (!complete.success) { + console.error("[WorkerSession] Failed to complete run", { error: complete.error }); + return; } + + console.log("[WorkerSession] Completed run", { + runId: run.id, + result: complete.data.result, + }); } async start() { From dc3a2c152e99f9dfba8f01325ec475f1681b5545 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:15:01 +0000 Subject: [PATCH 208/485] unmanaged worker builds --- packages/cli-v3/package.json | 1 + packages/cli-v3/src/build/buildWorker.ts | 14 +- packages/cli-v3/src/build/bundle.ts | 43 +- packages/cli-v3/src/build/packageModules.ts | 148 ++++- packages/cli-v3/src/commands/deploy.ts | 110 +--- packages/cli-v3/src/commands/workers/build.ts | 550 ++++++++++++++++++ packages/cli-v3/src/commands/workers/index.ts | 2 + packages/cli-v3/src/deploy/buildImage.ts | 10 +- .../src/entryPoints/deploy-run-worker.ts | 18 +- .../cli-v3/src/entryPoints/dev-run-worker.ts | 18 +- .../entryPoints/unmanaged-index-controller.ts | 117 ++++ .../src/entryPoints/unmanaged-index-worker.ts | 170 ++++++ .../entryPoints/unmanaged-run-controller.ts | 172 ++++++ .../src/entryPoints/unmanaged-run-worker.ts | 468 +++++++++++++++ packages/core/package.json | 15 + .../src/v3/runtime/unmanagedRuntimeManager.ts | 81 +++ packages/core/src/v3/schemas/build.ts | 2 +- packages/core/src/v3/unmanaged/index.ts | 1 + pnpm-lock.yaml | 3 + 19 files changed, 1772 insertions(+), 171 deletions(-) create mode 100644 packages/cli-v3/src/commands/workers/build.ts create mode 100644 packages/cli-v3/src/entryPoints/unmanaged-index-controller.ts create mode 100644 packages/cli-v3/src/entryPoints/unmanaged-index-worker.ts create mode 100644 packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts create mode 100644 packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts create mode 100644 packages/core/src/v3/runtime/unmanagedRuntimeManager.ts create mode 100644 packages/core/src/v3/unmanaged/index.ts diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index c5fb330be2..1e90328487 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -89,6 +89,7 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.1.2", "@trigger.dev/core": "workspace:3.1.2", + "@trigger.dev/worker": "workspace:3.1.2", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/cli-v3/src/build/buildWorker.ts b/packages/cli-v3/src/build/buildWorker.ts index 6800f5a69b..c4fe0653d1 100644 --- a/packages/cli-v3/src/build/buildWorker.ts +++ b/packages/cli-v3/src/build/buildWorker.ts @@ -1,8 +1,5 @@ -import { CORE_VERSION } from "@trigger.dev/core/v3"; -import { DEFAULT_RUNTIME, ResolvedConfig } from "@trigger.dev/core/v3/build"; +import { ResolvedConfig } from "@trigger.dev/core/v3/build"; import { BuildManifest, BuildTarget } from "@trigger.dev/core/v3/schemas"; -import { resolveFileSources } from "../utilities/sourceFiles.js"; -import { VERSION } from "../version.js"; import { BundleResult, bundleWorker, createBuildManifestFromBundle } from "./bundle.js"; import { createBuildContext, @@ -11,13 +8,6 @@ import { resolvePluginsForContext, } from "./extensions.js"; import { createExternalsBuildExtension } from "./externals.js"; -import { - deployIndexController, - deployIndexWorker, - deployRunController, - deployRunWorker, - telemetryEntryPoint, -} from "./packageModules.js"; import { join, relative, sep } from "node:path"; import { generateContainerfile } from "../deploy/buildImage.js"; import { writeFile } from "node:fs/promises"; @@ -88,7 +78,7 @@ export async function buildWorker(options: BuildWorkerOptions) { buildManifest = await notifyExtensionOnBuildComplete(buildContext, buildManifest); - if (options.target === "deploy") { + if (options.target === "deploy" || options.target === "unmanaged") { buildManifest = options.rewritePaths ? rewriteBuildManifestPaths(buildManifest, options.destination) : buildManifest; diff --git a/packages/cli-v3/src/build/bundle.ts b/packages/cli-v3/src/build/bundle.ts index 2de7941969..b44939ebee 100644 --- a/packages/cli-v3/src/build/bundle.ts +++ b/packages/cli-v3/src/build/bundle.ts @@ -7,13 +7,11 @@ import { createFile } from "../utilities/fileSystem.js"; import { logger } from "../utilities/logger.js"; import { deployEntryPoints, - deployIndexController, - deployIndexWorker, - deployRunController, - deployRunWorker, devEntryPoints, - devIndexWorker, - devRunWorker, + getIndexControllerForTarget, + getIndexWorkerForTarget, + getRunControllerForTarget, + getRunWorkerForTarget, isIndexControllerForTarget, isIndexWorkerForTarget, isLoaderEntryPoint, @@ -21,12 +19,14 @@ import { isRunWorkerForTarget, shims, telemetryEntryPoint, + unmanagedEntryPoints, } from "./packageModules.js"; import { buildPlugins } from "./plugins.js"; import { CORE_VERSION } from "@trigger.dev/core/v3"; import { resolveFileSources } from "../utilities/sourceFiles.js"; import { copyManifestToDir } from "./manifests.js"; import { VERSION } from "../version.js"; +import { assertExhaustive } from "../utilities/assertExhaustive.js"; export interface BundleOptions { target: BuildTarget; @@ -233,10 +233,22 @@ async function getEntryPoints(target: BuildTarget, config: ResolvedConfig) { projectEntryPoints.push(config.configFile); } - if (target === "dev") { - projectEntryPoints.push(...devEntryPoints); - } else { - projectEntryPoints.push(...deployEntryPoints); + switch (target) { + case "dev": { + projectEntryPoints.push(...devEntryPoints); + break; + } + case "deploy": { + projectEntryPoints.push(...deployEntryPoints); + break; + } + case "unmanaged": { + projectEntryPoints.push(...unmanagedEntryPoints); + break; + } + default: { + assertExhaustive(target); + } } if (config.instrumentedPackageNames?.length ?? 0 > 0) { @@ -312,13 +324,10 @@ export async function createBuildManifestFromBundle({ }, outputPath: destination, indexControllerEntryPoint: - bundle.indexControllerEntryPoint ?? target === "deploy" ? deployIndexController : undefined, - indexWorkerEntryPoint: - bundle.indexWorkerEntryPoint ?? target === "deploy" ? deployIndexWorker : devIndexWorker, - runControllerEntryPoint: - bundle.runControllerEntryPoint ?? target === "deploy" ? deployRunController : undefined, - runWorkerEntryPoint: - bundle.runWorkerEntryPoint ?? target === "deploy" ? deployRunWorker : devRunWorker, + bundle.indexControllerEntryPoint ?? getIndexControllerForTarget(target), + indexWorkerEntryPoint: bundle.indexWorkerEntryPoint ?? getIndexWorkerForTarget(target), + runControllerEntryPoint: bundle.runControllerEntryPoint ?? getRunControllerForTarget(target), + runWorkerEntryPoint: bundle.runWorkerEntryPoint ?? getRunWorkerForTarget(target), loaderEntryPoint: bundle.loaderEntryPoint, configPath: bundle.configPath, customConditions: resolvedConfig.build.conditions ?? [], diff --git a/packages/cli-v3/src/build/packageModules.ts b/packages/cli-v3/src/build/packageModules.ts index 537a25c00d..a05468065a 100644 --- a/packages/cli-v3/src/build/packageModules.ts +++ b/packages/cli-v3/src/build/packageModules.ts @@ -1,10 +1,20 @@ import { BuildTarget } from "@trigger.dev/core/v3"; import { join } from "node:path"; import { sourceDir } from "../sourceDir.js"; +import { assertExhaustive } from "../utilities/assertExhaustive.js"; export const devRunWorker = join(sourceDir, "entryPoints", "dev-run-worker.js"); export const devIndexWorker = join(sourceDir, "entryPoints", "dev-index-worker.js"); +export const unmanagedRunController = join(sourceDir, "entryPoints", "unmanaged-run-controller.js"); +export const unmanagedRunWorker = join(sourceDir, "entryPoints", "unmanaged-run-worker.js"); +export const unmanagedIndexController = join( + sourceDir, + "entryPoints", + "unmanaged-index-controller.js" +); +export const unmanagedIndexWorker = join(sourceDir, "entryPoints", "unmanaged-index-worker.js"); + export const deployRunController = join(sourceDir, "entryPoints", "deploy-run-controller.js"); export const deployRunWorker = join(sourceDir, "entryPoints", "deploy-run-worker.js"); export const deployIndexController = join(sourceDir, "entryPoints", "deploy-index-controller.js"); @@ -13,6 +23,12 @@ export const deployIndexWorker = join(sourceDir, "entryPoints", "deploy-index-wo export const telemetryEntryPoint = join(sourceDir, "entryPoints", "loader.js"); export const devEntryPoints = [devRunWorker, devIndexWorker]; +export const unmanagedEntryPoints = [ + unmanagedRunController, + unmanagedRunWorker, + unmanagedIndexController, + unmanagedIndexWorker, +]; export const deployEntryPoints = [ deployRunController, deployRunWorker, @@ -40,6 +56,38 @@ function isDevIndexWorker(entryPoint: string) { ); } +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isUnmanagedRunController(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/unmanaged-run-controller.js") || + entryPoint.includes("src/entryPoints/unmanaged-run-controller.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isUnmanagedRunWorker(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/unmanaged-run-worker.js") || + entryPoint.includes("src/entryPoints/unmanaged-run-worker.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isUnmanagedIndexController(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/unmanaged-index-controller.js") || + entryPoint.includes("src/entryPoints/unmanaged-index-controller.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isUnmanagedIndexWorker(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/unmanaged-index-worker.js") || + entryPoint.includes("src/entryPoints/unmanaged-index-worker.ts") + ); +} + // IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) function isDeployIndexController(entryPoint: string) { return ( @@ -80,35 +128,107 @@ export function isLoaderEntryPoint(entryPoint: string) { } export function isRunWorkerForTarget(entryPoint: string, target: BuildTarget) { - if (target === "dev") { - return isDevRunWorker(entryPoint); - } else { - return isDeployRunWorker(entryPoint); + switch (target) { + case "dev": + return isDevRunWorker(entryPoint); + case "deploy": + return isDeployRunWorker(entryPoint); + case "unmanaged": + return isUnmanagedRunWorker(entryPoint); + default: + assertExhaustive(target); + } +} + +export function getRunWorkerForTarget(target: BuildTarget) { + switch (target) { + case "dev": + return devRunWorker; + case "deploy": + return deployRunWorker; + case "unmanaged": + return unmanagedRunWorker; + default: + assertExhaustive(target); } } export function isRunControllerForTarget(entryPoint: string, target: BuildTarget) { - if (target === "deploy") { - return isDeployRunController(entryPoint); + switch (target) { + case "dev": + return false; + case "deploy": + return isDeployRunController(entryPoint); + case "unmanaged": + return isUnmanagedRunController(entryPoint); + default: + assertExhaustive(target); } +} - return false; +export function getRunControllerForTarget(target: BuildTarget) { + switch (target) { + case "dev": + return undefined; + case "deploy": + return deployRunController; + case "unmanaged": + return unmanagedRunController; + default: + assertExhaustive(target); + } } export function isIndexWorkerForTarget(entryPoint: string, target: BuildTarget) { - if (target === "dev") { - return isDevIndexWorker(entryPoint); - } else { - return isDeployIndexWorker(entryPoint); + switch (target) { + case "dev": + return isDevIndexWorker(entryPoint); + case "deploy": + return isDeployIndexWorker(entryPoint); + case "unmanaged": + return isUnmanagedIndexWorker(entryPoint); + default: + assertExhaustive(target); + } +} + +export function getIndexWorkerForTarget(target: BuildTarget) { + switch (target) { + case "dev": + return devIndexWorker; + case "deploy": + return deployIndexWorker; + case "unmanaged": + return unmanagedIndexWorker; + default: + assertExhaustive(target); } } export function isIndexControllerForTarget(entryPoint: string, target: BuildTarget) { - if (target === "deploy") { - return isDeployIndexController(entryPoint); + switch (target) { + case "dev": + return false; + case "deploy": + return isDeployIndexController(entryPoint); + case "unmanaged": + return isUnmanagedIndexController(entryPoint); + default: + assertExhaustive(target); } +} - return false; +export function getIndexControllerForTarget(target: BuildTarget) { + switch (target) { + case "dev": + return undefined; + case "deploy": + return deployIndexController; + case "unmanaged": + return unmanagedIndexController; + default: + assertExhaustive(target); + } } export function isConfigEntryPoint(entryPoint: string) { diff --git a/packages/cli-v3/src/commands/deploy.ts b/packages/cli-v3/src/commands/deploy.ts index 7552b52e3a..921af6c2e6 100644 --- a/packages/cli-v3/src/commands/deploy.ts +++ b/packages/cli-v3/src/commands/deploy.ts @@ -1,11 +1,8 @@ import { intro, outro } from "@clack/prompts"; import { prepareDeploymentError } from "@trigger.dev/core/v3"; -import { ResolvedConfig } from "@trigger.dev/core/v3/build"; -import { BuildManifest, InitializeDeploymentResponseBody } from "@trigger.dev/core/v3/schemas"; +import { InitializeDeploymentResponseBody } from "@trigger.dev/core/v3/schemas"; import { Command, Option as CommandOption } from "commander"; -import { writeFile } from "node:fs/promises"; -import { join, relative, resolve } from "node:path"; -import { readPackageJSON, writePackageJSON } from "pkg-types"; +import { resolve } from "node:path"; import { z } from "zod"; import { CliApiClient } from "../apiClient.js"; import { buildWorker } from "../build/buildWorker.js"; @@ -17,7 +14,7 @@ import { wrapCommandAction, } from "../cli/common.js"; import { loadConfig } from "../config.js"; -import { buildImage, generateContainerfile } from "../deploy/buildImage.js"; +import { buildImage } from "../deploy/buildImage.js"; import { checkLogsForErrors, checkLogsForWarnings, @@ -25,10 +22,8 @@ import { printWarnings, saveLogs, } from "../deploy/logs.js"; -import { buildManifestToJSON } from "../utilities/buildManifest.js"; import { chalkError, cliLink, isLinksSupported, prettyError } from "../utilities/cliOutput.js"; import { loadDotEnvVars } from "../utilities/dotEnv.js"; -import { writeJSONFile } from "../utilities/fileSystem.js"; import { printStandloneInitialBanner } from "../utilities/initialBanner.js"; import { logger } from "../utilities/logger.js"; import { getProjectClient } from "../utilities/session.js"; @@ -50,7 +45,6 @@ const DeployCommandOptions = CommonCommandOptions.extend({ push: z.boolean().default(false), config: z.string().optional(), projectRef: z.string().optional(), - apiUrl: z.string().optional(), saveLogs: z.boolean().default(false), skipUpdateCheck: z.boolean().default(false), noCache: z.boolean().default(false), @@ -447,104 +441,6 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { ); } -function rewriteBuildManifestPaths( - buildManifest: BuildManifest, - destinationDir: string -): BuildManifest { - return { - ...buildManifest, - files: buildManifest.files.map((file) => ({ - ...file, - entry: cleanEntryPath(file.entry), - out: rewriteOutputPath(destinationDir, file.out), - })), - outputPath: rewriteOutputPath(destinationDir, buildManifest.outputPath), - configPath: rewriteOutputPath(destinationDir, buildManifest.configPath), - runControllerEntryPoint: buildManifest.runControllerEntryPoint - ? rewriteOutputPath(destinationDir, buildManifest.runControllerEntryPoint) - : undefined, - runWorkerEntryPoint: rewriteOutputPath(destinationDir, buildManifest.runWorkerEntryPoint), - indexControllerEntryPoint: buildManifest.indexControllerEntryPoint - ? rewriteOutputPath(destinationDir, buildManifest.indexControllerEntryPoint) - : undefined, - indexWorkerEntryPoint: rewriteOutputPath(destinationDir, buildManifest.indexWorkerEntryPoint), - loaderEntryPoint: buildManifest.loaderEntryPoint - ? rewriteOutputPath(destinationDir, buildManifest.loaderEntryPoint) - : undefined, - }; -} - -async function writeProjectFiles( - buildManifest: BuildManifest, - resolvedConfig: ResolvedConfig, - outputPath: string -) { - // Step 1. Read the package.json file - const packageJson = await readProjectPackageJson(resolvedConfig.packageJsonPath); - - if (!packageJson) { - throw new Error("Could not read the package.json file"); - } - - const dependencies = - buildManifest.externals?.reduce( - (acc, external) => { - acc[external.name] = external.version; - - return acc; - }, - {} as Record - ) ?? {}; - - // Step 3: Write the resolved dependencies to the package.json file - await writePackageJSON(join(outputPath, "package.json"), { - ...packageJson, - name: packageJson.name ?? "trigger-project", - dependencies: { - ...dependencies, - }, - trustedDependencies: Object.keys(dependencies), - devDependencies: {}, - peerDependencies: {}, - scripts: {}, - }); - - await writeJSONFile(join(outputPath, "build.json"), buildManifestToJSON(buildManifest)); - await writeContainerfile(outputPath, buildManifest); -} - -async function readProjectPackageJson(packageJsonPath: string) { - const packageJson = await readPackageJSON(packageJsonPath); - - return packageJson; -} - -// Remove any query parameters from the entry path -// For example, src/trigger/ai.ts?sentryProxyModule=true -> src/trigger/ai.ts -function cleanEntryPath(entry: string): string { - return entry.split("?")[0]!; -} - -function rewriteOutputPath(destinationDir: string, filePath: string) { - return `/app/${relative(destinationDir, filePath)}`; -} - -async function writeContainerfile(outputPath: string, buildManifest: BuildManifest) { - if (!buildManifest.runControllerEntryPoint || !buildManifest.indexControllerEntryPoint) { - throw new Error("Something went wrong with the build. Aborting deployment. [code 7789]"); - } - - const containerfile = await generateContainerfile({ - runtime: buildManifest.runtime, - entrypoint: buildManifest.runControllerEntryPoint, - build: buildManifest.build, - image: buildManifest.image, - indexScript: buildManifest.indexControllerEntryPoint, - }); - - await writeFile(join(outputPath, "Containerfile"), containerfile); -} - export async function syncEnvVarsWithServer( apiClient: CliApiClient, projectRef: string, diff --git a/packages/cli-v3/src/commands/workers/build.ts b/packages/cli-v3/src/commands/workers/build.ts new file mode 100644 index 0000000000..e28896de9b --- /dev/null +++ b/packages/cli-v3/src/commands/workers/build.ts @@ -0,0 +1,550 @@ +import { intro, outro, log } from "@clack/prompts"; +import { parseDockerImageReference, prepareDeploymentError } from "@trigger.dev/core/v3"; +import { InitializeDeploymentResponseBody } from "@trigger.dev/core/v3/schemas"; +import { Command, Option as CommandOption } from "commander"; +import { resolve } from "node:path"; +import { z } from "zod"; +import { CliApiClient } from "../../apiClient.js"; +import { buildWorker } from "../../build/buildWorker.js"; +import { + CommonCommandOptions, + commonOptions, + handleTelemetry, + SkipLoggingError, + wrapCommandAction, +} from "../../cli/common.js"; +import { loadConfig } from "../../config.js"; +import { buildImage } from "../../deploy/buildImage.js"; +import { + checkLogsForErrors, + checkLogsForWarnings, + printErrors, + printWarnings, + saveLogs, +} from "../../deploy/logs.js"; +import { chalkError, cliLink, isLinksSupported, prettyError } from "../../utilities/cliOutput.js"; +import { loadDotEnvVars } from "../../utilities/dotEnv.js"; +import { printStandloneInitialBanner } from "../../utilities/initialBanner.js"; +import { logger } from "../../utilities/logger.js"; +import { getProjectClient } from "../../utilities/session.js"; +import { getTmpDir } from "../../utilities/tempDirectories.js"; +import { spinner } from "../../utilities/windows.js"; +import { login } from "../login.js"; +import { updateTriggerPackages } from "../update.js"; +import { resolveAlwaysExternal } from "../../build/externals.js"; + +const WorkersBuildCommandOptions = CommonCommandOptions.extend({ + // docker build options + load: z.boolean().default(false), + platform: z.enum(["linux/amd64", "linux/arm64"]).default("linux/amd64"), + network: z.enum(["default", "none", "host"]).optional(), + tag: z.string().optional(), + push: z.boolean().default(false), + noCache: z.boolean().default(false), + // trigger options + local: z.boolean().default(false), // TODO: default to true when webapp has no remote build support + dryRun: z.boolean().default(false), + skipSyncEnvVars: z.boolean().default(false), + env: z.enum(["prod", "staging"]), + config: z.string().optional(), + projectRef: z.string().optional(), + apiUrl: z.string().optional(), + saveLogs: z.boolean().default(false), + skipUpdateCheck: z.boolean().default(false), + envFile: z.string().optional(), +}); + +type WorkersBuildCommandOptions = z.infer; + +type Deployment = InitializeDeploymentResponseBody; + +export function configureWorkersBuildCommand(program: Command) { + return commonOptions( + program + .command("build") + .description("Build a self-hosted worker image") + .argument("[path]", "The path to the project", ".") + .option( + "-e, --env ", + "Deploy to a specific environment (currently only prod and staging are supported)", + "prod" + ) + .option("--skip-update-check", "Skip checking for @trigger.dev package updates") + .option("-c, --config ", "The name of the config file, found at [path]") + .option( + "-p, --project-ref ", + "The project ref. Required if there is no config file. This will override the project specified in the config file." + ) + .option( + "--skip-sync-env-vars", + "Skip syncing environment variables when using the syncEnvVars extension." + ) + .option( + "--env-file ", + "Path to the .env file to load into the CLI process. Defaults to .env in the project directory." + ) + ) + .addOption( + new CommandOption( + "--dry-run", + "This will only create the build context without actually building the image. This can be useful for debugging." + ).hideHelp() + ) + .addOption( + new CommandOption( + "--no-cache", + "Do not use any build cache. This will significantly slow down the build process but can be useful to fix caching issues." + ).hideHelp() + ) + .option("--local", "Force building the image locally.") + .option("--push", "Push the image to the configured registry.") + .option( + "-t, --tag ", + "Specify the full name of the resulting image with an optional tag. The tag will always be overridden for remote builds." + ) + .option("--load", "Load the built image into your local docker") + .option( + "--network ", + "The networking mode for RUN instructions when using --local", + "host" + ) + .option( + "--platform ", + "The platform to build the deployment image for", + "linux/amd64" + ) + .option("--save-logs", "If provided, will save logs even for successful builds") + .action(async (path, options) => { + await handleTelemetry(async () => { + await printStandloneInitialBanner(true); + await workersBuildCommand(path, options); + }); + }); +} + +async function workersBuildCommand(dir: string, options: unknown) { + return await wrapCommandAction( + "workerBuildCommand", + WorkersBuildCommandOptions, + options, + async (opts) => { + return await _workerBuildCommand(dir, opts); + } + ); +} + +async function _workerBuildCommand(dir: string, options: WorkersBuildCommandOptions) { + intro("Building worker image"); + + if (!options.skipUpdateCheck) { + await updateTriggerPackages(dir, { ...options }, true, true); + } + + const projectPath = resolve(process.cwd(), dir); + + const authorization = await login({ + embedded: true, + defaultApiUrl: options.apiUrl, + profile: options.profile, + }); + + if (!authorization.ok) { + if (authorization.error === "fetch failed") { + throw new Error( + `Failed to connect to ${authorization.auth?.apiUrl}. Are you sure it's the correct URL?` + ); + } else { + throw new Error( + `You must login first. Use the \`login\` CLI command.\n\n${authorization.error}` + ); + } + } + + const resolvedConfig = await loadConfig({ + cwd: projectPath, + overrides: { project: options.projectRef }, + configFile: options.config, + }); + + logger.debug("Resolved config", resolvedConfig); + + const projectClient = await getProjectClient({ + accessToken: authorization.auth.accessToken, + apiUrl: authorization.auth.apiUrl, + projectRef: resolvedConfig.project, + env: options.env, + profile: options.profile, + }); + + if (!projectClient) { + throw new Error("Failed to get project client"); + } + + const serverEnvVars = await projectClient.client.getEnvironmentVariables(resolvedConfig.project); + loadDotEnvVars(resolvedConfig.workingDir, options.envFile); + + const destination = getTmpDir(resolvedConfig.workingDir, "build", options.dryRun); + + const $buildSpinner = spinner(); + + const forcedExternals = await resolveAlwaysExternal(projectClient.client); + + const buildManifest = await buildWorker({ + target: "unmanaged", + environment: options.env, + destination: destination.path, + resolvedConfig, + rewritePaths: true, + envVars: serverEnvVars.success ? serverEnvVars.data.variables : {}, + forcedExternals, + listener: { + onBundleStart() { + $buildSpinner.start("Building project"); + }, + onBundleComplete(result) { + $buildSpinner.stop("Successfully built project"); + + logger.debug("Bundle result", result); + }, + }, + }); + + logger.debug("Successfully built project to", destination.path); + + if (options.dryRun) { + logger.info(`Dry run complete. View the built project at ${destination.path}`); + return; + } + + const tagParts = parseDockerImageReference(options.tag ?? ""); + + // Account for empty strings to preserve existing behavior + const registry = tagParts.registry ? tagParts.registry : undefined; + const namespace = tagParts.repo ? tagParts.repo : undefined; + + const deploymentResponse = await projectClient.client.initializeDeployment({ + contentHash: buildManifest.contentHash, + userId: authorization.userId, + selfHosted: options.local, + registryHost: registry, + namespace: namespace, + type: "UNMANAGED", + }); + + if (!deploymentResponse.success) { + throw new Error(`Failed to start deployment: ${deploymentResponse.error}`); + } + + const deployment = deploymentResponse.data; + + let local = options.local; + + // If the deployment doesn't have any externalBuildData, then we can't use the remote image builder + if (!deployment.externalBuildData && !options.local) { + log.warn( + "This webapp instance does not support remote builds, falling back to local build. Please use the `--local` flag to skip this warning." + ); + local = true; + } + + if ( + buildManifest.deploy.sync && + buildManifest.deploy.sync.env && + Object.keys(buildManifest.deploy.sync.env).length > 0 + ) { + const numberOfEnvVars = Object.keys(buildManifest.deploy.sync.env).length; + const vars = numberOfEnvVars === 1 ? "var" : "vars"; + + if (!options.skipSyncEnvVars) { + const $spinner = spinner(); + $spinner.start(`Syncing ${numberOfEnvVars} env ${vars} with the server`); + const success = await syncEnvVarsWithServer( + projectClient.client, + resolvedConfig.project, + options.env, + buildManifest.deploy.sync.env + ); + + if (!success) { + await failDeploy( + projectClient.client, + deployment, + { + name: "SyncEnvVarsError", + message: `Failed to sync ${numberOfEnvVars} env ${vars} with the server`, + }, + "", + $spinner + ); + } else { + $spinner.stop(`Successfully synced ${numberOfEnvVars} env ${vars} with the server`); + } + } else { + logger.log( + "Skipping syncing env vars. The environment variables in your project have changed, but the --skip-sync-env-vars flag was provided." + ); + } + } + + const version = deployment.version; + + const deploymentLink = cliLink( + "View deployment", + `${authorization.dashboardUrl}/projects/v3/${resolvedConfig.project}/deployments/${deployment.shortCode}` + ); + + const testLink = cliLink( + "Test tasks", + `${authorization.dashboardUrl}/projects/v3/${resolvedConfig.project}/test?environment=${ + options.env === "prod" ? "prod" : "stg" + }` + ); + + const $spinner = spinner(); + + if (isLinksSupported) { + $spinner.start(`Building worker version ${version} ${deploymentLink}`); + } else { + $spinner.start(`Building worker version ${version}`); + } + + const buildResult = await buildImage({ + selfHosted: local, + buildPlatform: options.platform, + noCache: options.noCache, + push: options.push, + registryHost: registry, + registry: registry, + deploymentId: deployment.id, + deploymentVersion: deployment.version, + imageTag: deployment.imageTag, + loadImage: options.load, + contentHash: deployment.contentHash, + externalBuildId: deployment.externalBuildData?.buildId, + externalBuildToken: deployment.externalBuildData?.buildToken, + externalBuildProjectId: deployment.externalBuildData?.projectId, + projectId: projectClient.id, + projectRef: resolvedConfig.project, + apiUrl: projectClient.client.apiURL, + apiKey: projectClient.client.accessToken!, + authAccessToken: authorization.auth.accessToken, + compilationPath: destination.path, + buildEnvVars: buildManifest.build.env, + network: options.network, + }); + + logger.debug("Build result", buildResult); + + const warnings = checkLogsForWarnings(buildResult.logs); + + if (!warnings.ok) { + await failDeploy( + projectClient.client, + deployment, + { name: "BuildError", message: warnings.summary }, + buildResult.logs, + $spinner, + warnings.warnings, + warnings.errors + ); + + throw new SkipLoggingError("Failed to build image"); + } + + if (!buildResult.ok) { + await failDeploy( + projectClient.client, + deployment, + { name: "BuildError", message: buildResult.error }, + buildResult.logs, + $spinner, + warnings.warnings + ); + + throw new SkipLoggingError("Failed to build image"); + } + + // Index the deployment + // const runtime = new UnmanagedWorkerRuntime({ + // name: projectClient.name, + // config: resolvedConfig, + // args: { + // ...options, + // debugOtel: false, + // }, + // client: projectClient.client, + // dashboardUrl: authorization.dashboardUrl, + // }); + // await runtime.init(); + + // console.log("buildManifest", buildManifest); + + // await runtime.initializeWorker(buildManifest); + + const getDeploymentResponse = await projectClient.client.getDeployment(deployment.id); + + if (!getDeploymentResponse.success) { + await failDeploy( + projectClient.client, + deployment, + { name: "DeploymentError", message: getDeploymentResponse.error }, + buildResult.logs, + $spinner + ); + + throw new SkipLoggingError("Failed to get deployment with worker"); + } + + const deploymentWithWorker = getDeploymentResponse.data; + + if (!deploymentWithWorker.worker) { + await failDeploy( + projectClient.client, + deployment, + { name: "DeploymentError", message: "Failed to get deployment with worker" }, + buildResult.logs, + $spinner + ); + + throw new SkipLoggingError("Failed to get deployment with worker"); + } + + $spinner.stop(`Successfully built worker version ${version}`); + + const taskCount = deploymentWithWorker.worker?.tasks.length ?? 0; + + log.message(`Detected ${taskCount} task${taskCount === 1 ? "" : "s"}`); + + if (taskCount > 0) { + logger.table( + deploymentWithWorker.worker.tasks.map((task) => ({ + id: task.slug, + export: task.exportName, + path: task.filePath, + })) + ); + } + + outro( + `Version ${version} built and ready to deploy: ${buildResult.image} ${ + isLinksSupported ? `| ${deploymentLink} | ${testLink}` : "" + }` + ); +} + +export async function syncEnvVarsWithServer( + apiClient: CliApiClient, + projectRef: string, + environmentSlug: string, + envVars: Record +) { + const uploadResult = await apiClient.importEnvVars(projectRef, environmentSlug, { + variables: envVars, + override: true, + }); + + return uploadResult.success; +} + +async function failDeploy( + client: CliApiClient, + deployment: Deployment, + error: { name: string; message: string }, + logs: string, + $spinner: ReturnType, + warnings?: string[], + errors?: string[] +) { + $spinner.stop(`Failed to deploy project`); + + const doOutputLogs = async (prefix: string = "Error") => { + if (logs.trim() !== "") { + const logPath = await saveLogs(deployment.shortCode, logs); + + printWarnings(warnings); + printErrors(errors); + + checkLogsForErrors(logs); + + outro( + `${chalkError(`${prefix}:`)} ${ + error.message + }. Full build logs have been saved to ${logPath}` + ); + } else { + outro(`${chalkError(`${prefix}:`)} ${error.message}.`); + } + }; + + const exitCommand = (message: string) => { + throw new SkipLoggingError(message); + }; + + const deploymentResponse = await client.getDeployment(deployment.id); + + if (!deploymentResponse.success) { + logger.debug(`Failed to get deployment with worker: ${deploymentResponse.error}`); + } else { + const serverDeployment = deploymentResponse.data; + + switch (serverDeployment.status) { + case "PENDING": + case "DEPLOYING": + case "BUILDING": { + await doOutputLogs(); + + await client.failDeployment(deployment.id, { + error, + }); + + exitCommand("Failed to deploy project"); + + break; + } + case "CANCELED": { + await doOutputLogs("Canceled"); + + exitCommand("Failed to deploy project"); + + break; + } + case "FAILED": { + const errorData = serverDeployment.errorData + ? prepareDeploymentError(serverDeployment.errorData) + : undefined; + + if (errorData) { + prettyError(errorData.name, errorData.stack, errorData.stderr); + + if (logs.trim() !== "") { + const logPath = await saveLogs(deployment.shortCode, logs); + + outro(`Aborting deployment. Full build logs have been saved to ${logPath}`); + } else { + outro(`Aborting deployment`); + } + } else { + await doOutputLogs("Failed"); + } + + exitCommand("Failed to deploy project"); + + break; + } + case "DEPLOYED": { + await doOutputLogs("Deployed with errors"); + + exitCommand("Deployed with errors"); + + break; + } + case "TIMED_OUT": { + await doOutputLogs("TimedOut"); + + exitCommand("Timed out"); + + break; + } + } + } +} diff --git a/packages/cli-v3/src/commands/workers/index.ts b/packages/cli-v3/src/commands/workers/index.ts index 0cc3c7d84a..84ef7ba2ef 100644 --- a/packages/cli-v3/src/commands/workers/index.ts +++ b/packages/cli-v3/src/commands/workers/index.ts @@ -1,4 +1,5 @@ import { Command } from "commander"; +import { configureWorkersBuildCommand } from "./build.js"; import { configureWorkersListCommand } from "./list.js"; import { configureWorkersCreateCommand } from "./create.js"; import { configureWorkersRunCommand } from "./run.js"; @@ -6,6 +7,7 @@ import { configureWorkersRunCommand } from "./run.js"; export function configureWorkersCommand(program: Command) { const workers = program.command("workers").description("Subcommands for managing workers"); + configureWorkersBuildCommand(workers); configureWorkersListCommand(workers); configureWorkersCreateCommand(workers); configureWorkersRunCommand(workers); diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index 646f1d3cc2..deb44e9f3a 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -20,7 +20,7 @@ export interface BuildImageOptions { loadImage?: boolean; // Flattened properties from nested structures - registryHost: string; + registryHost?: string; authAccessToken: string; imageTag: string; deploymentId: string; @@ -94,6 +94,12 @@ export async function buildImage({ ); } + if (!registryHost) { + throw new Error( + "Failed to initialize deployment. The deployment does not have a registry host. To deploy this project, you must use the --self-hosted or --local flag to build and push the image yourself." + ); + } + return depotBuildImage({ registryHost, auth: authAccessToken, @@ -262,7 +268,7 @@ async function depotBuildImage(options: DepotBuildImageOptions): Promise>; + +async function indexDeployment({ + cliApiClient, + projectRef, + deploymentId, + buildManifest, +}: BootstrapResult) { + const stdout: string[] = []; + const stderr: string[] = []; + + try { + const $env = await cliApiClient.getEnvironmentVariables(projectRef); + + if (!$env.success) { + throw new Error(`Failed to fetch environment variables: ${$env.error}`); + } + + const workerManifest = await indexWorkerManifest({ + runtime: buildManifest.runtime, + indexWorkerPath: buildManifest.indexWorkerEntryPoint, + buildManifestPath: "./build.json", + nodeOptions: execOptionsForRuntime(buildManifest.runtime, buildManifest), + env: $env.data.variables, + otelHookExclude: buildManifest.otelImportHook?.exclude, + otelHookInclude: buildManifest.otelImportHook?.include, + handleStdout(data) { + stdout.push(data); + }, + handleStderr(data) { + if (!data.includes("DeprecationWarning")) { + stderr.push(data); + } + }, + }); + + console.log("Writing index.json", process.cwd()); + + await writeFile(join(process.cwd(), "index.json"), JSON.stringify(workerManifest, null, 2)); + + const sourceFiles = resolveSourceFiles(buildManifest.sources, workerManifest.tasks); + + const backgroundWorkerBody: CreateBackgroundWorkerRequestBody = { + localOnly: true, + metadata: { + contentHash: buildManifest.contentHash, + packageVersion: buildManifest.packageVersion, + cliPackageVersion: buildManifest.cliPackageVersion, + tasks: workerManifest.tasks, + sourceFiles, + }, + supportsLazyAttempts: true, + }; + + await cliApiClient.createDeploymentBackgroundWorker(deploymentId, backgroundWorkerBody); + } catch (error) { + const serialiedIndexError = serializeIndexingError(error, stderr.join("\n")); + + console.error("Failed to index deployment", serialiedIndexError); + + await cliApiClient.failDeployment(deploymentId, { error: serialiedIndexError }); + + process.exit(1); + } +} + +const results = await bootstrap(); + +await indexDeployment(results); diff --git a/packages/cli-v3/src/entryPoints/unmanaged-index-worker.ts b/packages/cli-v3/src/entryPoints/unmanaged-index-worker.ts new file mode 100644 index 0000000000..2ef18444eb --- /dev/null +++ b/packages/cli-v3/src/entryPoints/unmanaged-index-worker.ts @@ -0,0 +1,170 @@ +import { + BuildManifest, + type HandleErrorFunction, + indexerToWorkerMessages, + taskCatalog, + type TaskManifest, + TriggerConfig, +} from "@trigger.dev/core/v3"; +import { + StandardTaskCatalog, + TracingDiagnosticLogLevel, + TracingSDK, +} from "@trigger.dev/core/v3/workers"; +import { sendMessageInCatalog, ZodSchemaParsedError } from "@trigger.dev/core/v3/zodMessageHandler"; +import { readFile } from "node:fs/promises"; +import sourceMapSupport from "source-map-support"; +import { registerTasks } from "../indexing/registerTasks.js"; +import { env } from "std-env"; +import { normalizeImportPath } from "../utilities/normalizeImportPath.js"; + +sourceMapSupport.install({ + handleUncaughtExceptions: false, + environment: "node", + hookRequire: false, +}); + +process.on("uncaughtException", function (error, origin) { + if (error instanceof Error) { + process.send && + process.send({ + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { name: error.name, message: error.message, stack: error.stack }, + origin, + }, + version: "v1", + }); + } else { + process.send && + process.send({ + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { + name: "Error", + message: typeof error === "string" ? error : JSON.stringify(error), + }, + origin, + }, + version: "v1", + }); + } +}); + +taskCatalog.setGlobalTaskCatalog(new StandardTaskCatalog()); + +async function importConfig( + configPath: string +): Promise<{ config: TriggerConfig; handleError?: HandleErrorFunction }> { + const configModule = await import(normalizeImportPath(configPath)); + + const config = configModule?.default ?? configModule?.config; + + return { + config, + handleError: configModule?.handleError, + }; +} + +async function loadBuildManifest() { + const manifestContents = await readFile(env.TRIGGER_BUILD_MANIFEST_PATH!, "utf-8"); + const raw = JSON.parse(manifestContents); + + return BuildManifest.parse(raw); +} + +async function bootstrap() { + const buildManifest = await loadBuildManifest(); + + const { config } = await importConfig(buildManifest.configPath); + + // This needs to run or the PrismaInstrumentation will throw an error + const tracingSDK = new TracingSDK({ + url: env.OTEL_EXPORTER_OTLP_ENDPOINT ?? "http://0.0.0.0:4318", + instrumentations: config.instrumentations ?? [], + diagLogLevel: (env.OTEL_LOG_LEVEL as TracingDiagnosticLogLevel) ?? "none", + forceFlushTimeoutMillis: 30_000, + }); + + const importErrors = await registerTasks(buildManifest); + + return { + tracingSDK, + config, + buildManifest, + importErrors, + }; +} + +const { buildManifest, importErrors, config } = await bootstrap(); + +let tasks = taskCatalog.listTaskManifests(); + +// If the config has retry defaults, we need to apply them to all tasks that don't have any retry settings +if (config.retries?.default) { + tasks = tasks.map((task) => { + if (!task.retry) { + return { + ...task, + retry: config.retries?.default, + } satisfies TaskManifest; + } + + return task; + }); +} + +// If the config has a maxDuration, we need to apply it to all tasks that don't have a maxDuration +if (typeof config.maxDuration === "number") { + tasks = tasks.map((task) => { + if (typeof task.maxDuration !== "number") { + return { + ...task, + maxDuration: config.maxDuration, + } satisfies TaskManifest; + } + + return task; + }); +} + +await sendMessageInCatalog( + indexerToWorkerMessages, + "INDEX_COMPLETE", + { + manifest: { + tasks, + configPath: buildManifest.configPath, + runtime: buildManifest.runtime, + workerEntryPoint: buildManifest.runWorkerEntryPoint, + controllerEntryPoint: buildManifest.runControllerEntryPoint, + loaderEntryPoint: buildManifest.loaderEntryPoint, + customConditions: buildManifest.customConditions, + }, + importErrors, + }, + async (msg) => { + process.send?.(msg); + } +).catch((err) => { + if (err instanceof ZodSchemaParsedError) { + return sendMessageInCatalog( + indexerToWorkerMessages, + "TASKS_FAILED_TO_PARSE", + { zodIssues: err.error.issues, tasks }, + async (msg) => { + process.send?.(msg); + } + ); + } else { + console.error("Failed to send TASKS_READY message", err); + } + + return; +}); + +await new Promise((resolve) => { + setTimeout(() => { + resolve(); + }, 10); +}); diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts new file mode 100644 index 0000000000..6d3f4c0ec8 --- /dev/null +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -0,0 +1,172 @@ +import { logger } from "../utilities/logger.js"; +import { TaskRunProcess } from "../executions/taskRunProcess.js"; +import { env as stdEnv } from "std-env"; +import { z } from "zod"; +import { CLOUD_API_URL } from "../consts.js"; +import { randomUUID } from "crypto"; +import { readJSONFile } from "../utilities/fileSystem.js"; +import { WorkerManifest } from "@trigger.dev/core/v3"; +import { WorkerSession } from "@trigger.dev/worker"; + +const Env = z.object({ + TRIGGER_API_URL: z.string().default(CLOUD_API_URL), + TRIGGER_CONTENT_HASH: z.string(), + TRIGGER_WORKER_TOKEN: z.string(), + TRIGGER_WORKER_INSTANCE_NAME: z.string().default(randomUUID()), + TRIGGER_DEPLOYMENT_ID: z.string(), + TRIGGER_DEPLOYMENT_VERSION: z.string(), + NODE_ENV: z.string().default("production"), + NODE_EXTRA_CA_CERTS: z.string().optional(), + OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), +}); + +const env = Env.parse(stdEnv); + +logger.loggerLevel = "debug"; +logger.debug("Creating unmanaged worker", { env }); + +class UnmanagedWorker { + private readonly session: WorkerSession; + private taskRunProcess?: TaskRunProcess; + + constructor(private workerManifest: WorkerManifest) { + this.session = new WorkerSession({ + workerToken: env.TRIGGER_WORKER_TOKEN, + apiUrl: env.TRIGGER_API_URL, + instanceName: env.TRIGGER_WORKER_INSTANCE_NAME, + deploymentId: env.TRIGGER_DEPLOYMENT_ID, + dequeueIntervalMs: 1000, + }); + + const traceContext = new Map>(); + + this.session.on("runQueueMessage", async ({ time, message }) => { + logger.debug("[UnmanagedWorker] Received runQueueMessage", { time, message }); + + traceContext.set(message.run.id, message.run.traceContext); + + this.session.emit("requestRunAttemptStart", { + time: new Date(), + run: { + id: message.run.id, + }, + snapshot: { + id: message.snapshot.id, + }, + }); + }); + + this.session.on("runAttemptStarted", async ({ time, run, snapshot, execution, envVars }) => { + const taskRunEnv = { + ...gatherProcessEnv(), + ...envVars, + }; + + this.taskRunProcess = new TaskRunProcess({ + workerManifest: this.workerManifest, + env: taskRunEnv, + serverWorker: { + id: "unmanaged", + contentHash: env.TRIGGER_CONTENT_HASH, + version: env.TRIGGER_DEPLOYMENT_VERSION, + }, + payload: { + execution, + // TODO: The run engine could return this when the run is started + traceContext: traceContext.get(run.id) ?? {}, + }, + messageId: run.id, + }); + + try { + await this.taskRunProcess.initialize(); + + logger.log("executing task run process", { + attemptId: execution.attempt.id, + runId: execution.run.id, + }); + + const completion = await this.taskRunProcess.execute(); + + logger.log("completed", completion); + + try { + await this.taskRunProcess.cleanup(true); + } catch (error) { + logger.error("Failed to cleanup task run process, submitting completion anyway", { + error, + }); + } + + this.session.emit("runAttemptCompleted", { + time: new Date(), + run: { + id: run.id, + }, + snapshot: { + id: snapshot.id, + }, + completion, + }); + } catch (error) { + logger.error("Failed to complete lazy attempt", { + error, + }); + + this.session.emit("runAttemptCompleted", { + time: new Date(), + run: { + id: run.id, + }, + snapshot: { + id: snapshot.id, + }, + completion: { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + }, + }); + } + }); + + process.on("SIGTERM", async () => { + logger.debug("[UnmanagedWorker] Received SIGTERM, stopping worker"); + await this.stop(); + }); + } + + async start() { + logger.debug("[UnmanagedWorker] Starting up"); + await this.session.start(); + } + + async stop() { + logger.debug("[UnmanagedWorker] Shutting down"); + await this.session.stop(); + } +} + +const workerManifest = await loadWorkerManifest(); + +const prodWorker = new UnmanagedWorker(workerManifest); +await prodWorker.start(); + +function gatherProcessEnv(): Record { + const $env = { + NODE_ENV: env.NODE_ENV, + NODE_EXTRA_CA_CERTS: env.NODE_EXTRA_CA_CERTS, + OTEL_EXPORTER_OTLP_ENDPOINT: env.OTEL_EXPORTER_OTLP_ENDPOINT, + }; + + // Filter out undefined values + return Object.fromEntries( + Object.entries($env).filter(([key, value]) => value !== undefined) + ) as Record; +} + +async function loadWorkerManifest() { + const manifest = await readJSONFile("./index.json"); + return WorkerManifest.parse(manifest); +} diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts new file mode 100644 index 0000000000..1cec91e530 --- /dev/null +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts @@ -0,0 +1,468 @@ +import type { Tracer } from "@opentelemetry/api"; +import type { Logger } from "@opentelemetry/api-logs"; +import { + clock, + ExecutorToWorkerMessageCatalog, + type HandleErrorFunction, + logger, + LogLevel, + runMetadata, + runtime, + taskCatalog, + TaskRunErrorCodes, + TaskRunExecution, + timeout, + TriggerConfig, + WorkerManifest, + WorkerToExecutorMessageCatalog, +} from "@trigger.dev/core/v3"; +import { UnmanagedRuntimeManager } from "@trigger.dev/core/v3/unmanaged"; +import { TriggerTracer } from "@trigger.dev/core/v3/tracer"; +import { + ConsoleInterceptor, + DevUsageManager, + DurableClock, + getEnvVar, + getNumberEnvVar, + logLevels, + OtelTaskLogger, + ProdUsageManager, + StandardMetadataManager, + StandardTaskCatalog, + TaskExecutor, + TracingDiagnosticLogLevel, + TracingSDK, + usage, + UsageTimeoutManager, +} from "@trigger.dev/core/v3/workers"; +import { ZodIpcConnection } from "@trigger.dev/core/v3/zodIpc"; +import { readFile } from "node:fs/promises"; +import { setInterval, setTimeout } from "node:timers/promises"; +import sourceMapSupport from "source-map-support"; +import { env } from "std-env"; +import { normalizeImportPath } from "../utilities/normalizeImportPath.js"; +import { VERSION } from "../version.js"; + +sourceMapSupport.install({ + handleUncaughtExceptions: false, + environment: "node", + hookRequire: false, +}); + +process.on("uncaughtException", function (error, origin) { + if (error instanceof Error) { + process.send && + process.send({ + type: "EVENT", + message: { + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { name: error.name, message: error.message, stack: error.stack }, + origin, + }, + version: "v1", + }, + }); + } else { + process.send && + process.send({ + type: "EVENT", + message: { + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { + name: "Error", + message: typeof error === "string" ? error : JSON.stringify(error), + }, + origin, + }, + version: "v1", + }, + }); + } +}); + +const usageIntervalMs = getEnvVar("USAGE_HEARTBEAT_INTERVAL_MS"); +const usageEventUrl = getEnvVar("USAGE_EVENT_URL"); +const triggerJWT = getEnvVar("TRIGGER_JWT"); +const heartbeatIntervalMs = getEnvVar("HEARTBEAT_INTERVAL_MS"); + +const devUsageManager = new DevUsageManager(); +const prodUsageManager = new ProdUsageManager(devUsageManager, { + heartbeatIntervalMs: usageIntervalMs ? parseInt(usageIntervalMs, 10) : undefined, + url: usageEventUrl, + jwt: triggerJWT, +}); + +usage.setGlobalUsageManager(prodUsageManager); +timeout.setGlobalManager(new UsageTimeoutManager(devUsageManager)); + +taskCatalog.setGlobalTaskCatalog(new StandardTaskCatalog()); +const durableClock = new DurableClock(); +clock.setGlobalClock(durableClock); +const runMetadataManager = new StandardMetadataManager(); +runMetadata.setGlobalManager(runMetadataManager); + +const triggerLogLevel = getEnvVar("TRIGGER_LOG_LEVEL"); + +async function importConfig( + configPath: string +): Promise<{ config: TriggerConfig; handleError?: HandleErrorFunction }> { + const configModule = await import(configPath); + + const config = configModule?.default ?? configModule?.config; + + return { + config, + handleError: configModule?.handleError, + }; +} + +async function loadWorkerManifest() { + const manifestContents = await readFile("./index.json", "utf-8"); + const raw = JSON.parse(manifestContents); + + return WorkerManifest.parse(raw); +} + +async function bootstrap() { + const workerManifest = await loadWorkerManifest(); + + const { config, handleError } = await importConfig( + normalizeImportPath(workerManifest.configPath) + ); + + const tracingSDK = new TracingSDK({ + url: env.OTEL_EXPORTER_OTLP_ENDPOINT ?? "http://0.0.0.0:4318", + instrumentations: config.instrumentations ?? [], + diagLogLevel: (env.OTEL_LOG_LEVEL as TracingDiagnosticLogLevel) ?? "none", + forceFlushTimeoutMillis: 30_000, + }); + + const otelTracer: Tracer = tracingSDK.getTracer("trigger-dev-worker", VERSION); + const otelLogger: Logger = tracingSDK.getLogger("trigger-dev-worker", VERSION); + + const tracer = new TriggerTracer({ tracer: otelTracer, logger: otelLogger }); + const consoleInterceptor = new ConsoleInterceptor( + otelLogger, + typeof config.enableConsoleLogging === "boolean" ? config.enableConsoleLogging : true + ); + + const configLogLevel = triggerLogLevel ?? config.logLevel ?? "info"; + + const otelTaskLogger = new OtelTaskLogger({ + logger: otelLogger, + tracer: tracer, + level: logLevels.includes(configLogLevel as any) ? (configLogLevel as LogLevel) : "info", + }); + + logger.setGlobalTaskLogger(otelTaskLogger); + + for (const task of workerManifest.tasks) { + taskCatalog.registerTaskFileMetadata(task.id, { + exportName: task.exportName, + filePath: task.filePath, + entryPoint: task.entryPoint, + }); + } + + return { + tracer, + tracingSDK, + consoleInterceptor, + config, + handleErrorFn: handleError, + workerManifest, + }; +} + +let _execution: TaskRunExecution | undefined; +let _isRunning = false; +let _tracingSDK: TracingSDK | undefined; + +const zodIpc = new ZodIpcConnection({ + listenSchema: WorkerToExecutorMessageCatalog, + emitSchema: ExecutorToWorkerMessageCatalog, + process, + handlers: { + EXECUTE_TASK_RUN: async ({ execution, traceContext, metadata }, sender) => { + console.log(`[${new Date().toISOString()}] Received EXECUTE_TASK_RUN`, execution); + + if (_isRunning) { + console.error("Worker is already running a task"); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.TASK_ALREADY_RUNNING, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + try { + const { tracer, tracingSDK, consoleInterceptor, config, handleErrorFn, workerManifest } = + await bootstrap(); + + _tracingSDK = tracingSDK; + + const taskManifest = workerManifest.tasks.find((t) => t.id === execution.task.id); + + if (!taskManifest) { + console.error(`Could not find task ${execution.task.id}`); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_FIND_TASK, + message: `Could not find task ${execution.task.id}. Make sure the task is exported and the ID is correct.`, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + try { + const beforeImport = performance.now(); + await import(normalizeImportPath(taskManifest.entryPoint)); + const durationMs = performance.now() - beforeImport; + + console.log( + `Imported task ${execution.task.id} [${taskManifest.entryPoint}] in ${durationMs}ms` + ); + } catch (err) { + console.error(`Failed to import task ${execution.task.id}`, err); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_IMPORT_TASK, + message: err instanceof Error ? err.message : String(err), + stackTrace: err instanceof Error ? err.stack : undefined, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + process.title = `trigger-dev-worker: ${execution.task.id} ${execution.run.id}`; + + // Import the task module + const task = taskCatalog.getTask(execution.task.id); + + if (!task) { + console.error(`Could not find task ${execution.task.id}`); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_FIND_EXECUTOR, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + const executor = new TaskExecutor(task, { + tracer, + tracingSDK, + consoleInterceptor, + config, + handleErrorFn, + }); + + try { + _execution = execution; + _isRunning = true; + + runMetadataManager.startPeriodicFlush( + getNumberEnvVar("TRIGGER_RUN_METADATA_FLUSH_INTERVAL", 1000) + ); + + const measurement = usage.start(); + + // This lives outside of the executor because this will eventually be moved to the controller level + const signal = execution.run.maxDuration + ? timeout.abortAfterTimeout(execution.run.maxDuration) + : undefined; + + signal?.addEventListener("abort", async (e) => { + if (_isRunning) { + _isRunning = false; + _execution = undefined; + + const usageSample = usage.stop(measurement); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.MAX_DURATION_EXCEEDED, + message: + signal.reason instanceof Error + ? signal.reason.message + : String(signal.reason), + }, + usage: { + durationMs: usageSample.cpuTime, + }, + }, + }); + } + }); + + const { result } = await executor.execute( + execution, + metadata, + traceContext, + measurement, + signal + ); + + const usageSample = usage.stop(measurement); + + if (_isRunning) { + return sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ...result, + usage: { + durationMs: usageSample.cpuTime, + }, + }, + }); + } + } finally { + _execution = undefined; + _isRunning = false; + } + } catch (err) { + console.error("Failed to execute task", err); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.CONFIGURED_INCORRECTLY, + }, + usage: { + durationMs: 0, + }, + }, + }); + } + }, + TASK_RUN_COMPLETED_NOTIFICATION: async () => { + await unmanagedWorkerRuntime.completeWaitpoints([]); + }, + WAIT_COMPLETED_NOTIFICATION: async () => { + await unmanagedWorkerRuntime.completeWaitpoints([]); + }, + FLUSH: async ({ timeoutInMs }, sender) => { + await flushAll(timeoutInMs); + }, + }, +}); + +async function flushAll(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.all([ + flushUsage(timeoutInMs), + flushTracingSDK(timeoutInMs), + flushMetadata(timeoutInMs), + ]); + + const duration = performance.now() - now; + + console.log(`Flushed all in ${duration}ms`); +} + +async function flushUsage(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([prodUsageManager.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed usage in ${duration}ms`); +} + +async function flushTracingSDK(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([_tracingSDK?.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed tracingSDK in ${duration}ms`); +} + +async function flushMetadata(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([runMetadataManager.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed runMetadata in ${duration}ms`); +} + +const unmanagedWorkerRuntime = new UnmanagedRuntimeManager(); + +runtime.setGlobalRuntimeManager(unmanagedWorkerRuntime); + +process.title = "trigger-unmanaged-worker"; + +const heartbeatInterval = parseInt(heartbeatIntervalMs ?? "30000", 10); + +for await (const _ of setInterval(heartbeatInterval)) { + if (_isRunning && _execution) { + try { + await zodIpc.send("TASK_HEARTBEAT", { id: _execution.attempt.id }); + } catch (err) { + console.error("Failed to send HEARTBEAT message", err); + } + } +} + +console.log(`[${new Date().toISOString()}] Executor started`); diff --git a/packages/core/package.json b/packages/core/package.json index a0265644cf..efd5673892 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -55,6 +55,7 @@ "./v3/utils/timers": "./src/v3/utils/timers.ts", "./v3/dev": "./src/v3/dev/index.ts", "./v3/prod": "./src/v3/prod/index.ts", + "./v3/unmanaged": "./src/v3/unmanaged/index.ts", "./v3/workers": "./src/v3/workers/index.ts", "./v3/schemas": "./src/v3/schemas/index.ts" }, @@ -160,6 +161,9 @@ "v3/prod": [ "dist/commonjs/v3/prod/index.d.ts" ], + "v3/unmanaged": [ + "dist/commonjs/v3/unmanaged/index.d.ts" + ], "v3/workers": [ "dist/commonjs/v3/workers/index.d.ts" ], @@ -602,6 +606,17 @@ "default": "./dist/commonjs/v3/prod/index.js" } }, + "./v3/unmanaged": { + "import": { + "@triggerdotdev/source": "./src/v3/unmanaged/index.ts", + "types": "./dist/esm/v3/unmanaged/index.d.ts", + "default": "./dist/esm/v3/unmanaged/index.js" + }, + "require": { + "types": "./dist/commonjs/v3/unmanaged/index.d.ts", + "default": "./dist/commonjs/v3/unmanaged/index.js" + } + }, "./v3/workers": { "import": { "@triggerdotdev/source": "./src/v3/workers/index.ts", diff --git a/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts b/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts new file mode 100644 index 0000000000..88b0350590 --- /dev/null +++ b/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts @@ -0,0 +1,81 @@ +import { + BatchTaskRunExecutionResult, + TaskRunContext, + TaskRunExecutionResult, +} from "../schemas/index.js"; +import { RuntimeManager } from "./manager.js"; +import { unboundedTimeout } from "../utils/timers.js"; + +type Waitpoint = any; + +export class UnmanagedRuntimeManager implements RuntimeManager { + private readonly waitpoints: Map = new Map(); + + _taskWaits: Map void }> = new Map(); + + _batchWaits: Map< + string, + { resolve: (value: BatchTaskRunExecutionResult) => void; reject: (err?: any) => void } + > = new Map(); + + disable(): void { + // do nothing + } + + async waitForDuration(ms: number): Promise { + await unboundedTimeout(ms); + } + + async waitUntil(date: Date): Promise { + return this.waitForDuration(date.getTime() - Date.now()); + } + + async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { + const promise = new Promise((resolve) => { + this._taskWaits.set(params.id, { resolve }); + }); + + return await promise; + } + + async waitForBatch(params: { + id: string; + runs: string[]; + ctx: TaskRunContext; + }): Promise { + if (!params.runs.length) { + return Promise.resolve({ id: params.id, items: [] }); + } + + const promise = Promise.all( + params.runs.map((runId) => { + return new Promise((resolve, reject) => { + this._taskWaits.set(runId, { resolve }); + }); + }) + ); + + const results = await promise; + + return { + id: params.id, + items: results, + }; + } + + async completeWaitpoints(waitpoints: Waitpoint[]): Promise { + await Promise.all(waitpoints.map((waitpoint) => this.completeWaitpoint(waitpoint))); + } + + private completeWaitpoint(waitpoint: Waitpoint): void { + const wait = this._taskWaits.get(waitpoint.id); + + if (!wait) { + return; + } + + wait.resolve(waitpoint.completion); + + this._taskWaits.delete(waitpoint.id); + } +} diff --git a/packages/core/src/v3/schemas/build.ts b/packages/core/src/v3/schemas/build.ts index de1a1b35f7..0b9431656d 100644 --- a/packages/core/src/v3/schemas/build.ts +++ b/packages/core/src/v3/schemas/build.ts @@ -9,7 +9,7 @@ export const BuildExternal = z.object({ export type BuildExternal = z.infer; -export const BuildTarget = z.enum(["dev", "deploy"]); +export const BuildTarget = z.enum(["dev", "deploy", "unmanaged"]); export type BuildTarget = z.infer; diff --git a/packages/core/src/v3/unmanaged/index.ts b/packages/core/src/v3/unmanaged/index.ts new file mode 100644 index 0000000000..57123d4bb5 --- /dev/null +++ b/packages/core/src/v3/unmanaged/index.ts @@ -0,0 +1 @@ +export { UnmanagedRuntimeManager } from "../runtime/unmanagedRuntimeManager.js"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 541791a32e..a632844117 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1148,6 +1148,9 @@ importers: '@trigger.dev/core': specifier: workspace:3.1.2 version: link:../core + '@trigger.dev/worker': + specifier: workspace:3.1.2 + version: link:../worker c12: specifier: ^1.11.1 version: 1.11.1(magicast@0.3.4) From 65f471cc85bbd656877874641fa3c5ca74724760 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:16:49 +0000 Subject: [PATCH 209/485] bump worker version --- packages/worker/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/worker/package.json b/packages/worker/package.json index 41182733ca..bfe1b05747 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.1.2", + "version": "3.2.0", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { From f084df1a1709ade945f0719462e4578fad4cc326 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:31:04 +0000 Subject: [PATCH 210/485] update lockfile after worker bump --- pnpm-lock.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 361406c0a6..f72ed7e5c7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.2.0 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.1.2 + specifier: workspace:3.2.0 version: link:../worker c12: specifier: ^1.11.1 From 9003b1808266e4041a08b3f6f888ff4a99582b8e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 21 Nov 2024 15:32:04 +0000 Subject: [PATCH 211/485] add v2 run cancellation --- apps/webapp/app/v3/runEngine.server.ts | 14 ++ .../app/v3/services/cancelTaskRun.server.ts | 201 +++--------------- .../app/v3/services/cancelTaskRunV1.server.ts | 186 ++++++++++++++++ 3 files changed, 233 insertions(+), 168 deletions(-) create mode 100644 apps/webapp/app/v3/services/cancelTaskRunV1.server.ts diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 443cd0f5a3..8bdeddd939 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -4,6 +4,7 @@ import { env } from "~/env.server"; import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; import { eventRepository } from "./eventRepository.server"; +import { createJsonErrorObject } from "@trigger.dev/core/v3"; export const engine = singleton("RunEngine", createRunEngine); @@ -51,5 +52,18 @@ function createRunEngine() { }); }); + engine.eventBus.on("runCancelled", async ({ time, run }) => { + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: run.friendlyId, + }); + + await Promise.all( + inProgressEvents.map((event) => { + const error = createJsonErrorObject(run.error); + return eventRepository.cancelEvent(event, time, error.message); + }) + ); + }); + return engine; } diff --git a/apps/webapp/app/v3/services/cancelTaskRun.server.ts b/apps/webapp/app/v3/services/cancelTaskRun.server.ts index a0f37ab23b..32387994fc 100644 --- a/apps/webapp/app/v3/services/cancelTaskRun.server.ts +++ b/apps/webapp/app/v3/services/cancelTaskRun.server.ts @@ -1,27 +1,7 @@ -import { type Prisma, type TaskRun } from "@trigger.dev/database"; -import assertNever from "assert-never"; -import { logger } from "~/services/logger.server"; -import { eventRepository } from "../eventRepository.server"; -import { socketIo } from "../handleSocketIo.server"; -import { devPubSub } from "../marqs/devPubSub.server"; -import { CANCELLABLE_ATTEMPT_STATUSES, isCancellableRunStatus } from "../taskStatus"; +import { RunEngineVersion, type TaskRun } from "@trigger.dev/database"; import { BaseService } from "./baseService.server"; -import { CancelAttemptService } from "./cancelAttempt.server"; -import { CancelTaskAttemptDependenciesService } from "./cancelTaskAttemptDependencies.server"; -import { FinalizeTaskRunService } from "./finalizeTaskRun.server"; - -type ExtendedTaskRun = Prisma.TaskRunGetPayload<{ - include: { - runtimeEnvironment: true; - lockedToVersion: true; - }; -}>; - -type ExtendedTaskRunAttempt = Prisma.TaskRunAttemptGetPayload<{ - include: { - backgroundWorker: true; - }; -}>; +import { CancelTaskRunServiceV1 } from "./cancelTaskRunV1.server"; +import { engine } from "../runEngine.server"; export type CancelTaskRunServiceOptions = { reason?: string; @@ -29,158 +9,43 @@ export type CancelTaskRunServiceOptions = { cancelledAt?: Date; }; -export class CancelTaskRunService extends BaseService { - public async call(taskRun: TaskRun, options?: CancelTaskRunServiceOptions) { - const opts = { - reason: "Task run was cancelled by user", - cancelAttempts: true, - cancelledAt: new Date(), - ...options, - }; +type CancelTaskRunServiceResult = { + id: string; +}; - // Make sure the task run is in a cancellable state - if (!isCancellableRunStatus(taskRun.status)) { - logger.error("Task run is not in a cancellable state", { - runId: taskRun.id, - status: taskRun.status, - }); - return; +export class CancelTaskRunService extends BaseService { + public async call( + taskRun: TaskRun, + options?: CancelTaskRunServiceOptions + ): Promise { + if (taskRun.engine === RunEngineVersion.V1) { + return await this.callV1(taskRun, options); + } else { + return await this.callV2(taskRun, options); } + } - const finalizeService = new FinalizeTaskRunService(); - const cancelledTaskRun = await finalizeService.call({ - id: taskRun.id, - status: "CANCELED", - completedAt: opts.cancelledAt, - include: { - attempts: { - where: { - status: { - in: CANCELLABLE_ATTEMPT_STATUSES, - }, - }, - include: { - backgroundWorker: true, - dependencies: { - include: { - taskRun: true, - }, - }, - batchTaskRunItems: { - include: { - taskRun: true, - }, - }, - }, - }, - runtimeEnvironment: true, - lockedToVersion: true, - }, - attemptStatus: "CANCELED", - error: { - type: "STRING_ERROR", - raw: opts.reason, - }, - }); - - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: taskRun.friendlyId, - }); + private async callV1( + taskRun: TaskRun, + options?: CancelTaskRunServiceOptions + ): Promise { + const service = new CancelTaskRunServiceV1(this._prisma); + return await service.call(taskRun, options); + } - logger.debug("Cancelling in-progress events", { - inProgressEvents: inProgressEvents.map((event) => event.id), + private async callV2( + taskRun: TaskRun, + options?: CancelTaskRunServiceOptions + ): Promise { + const result = await engine.cancelRun({ + runId: taskRun.id, + completedAt: options?.cancelledAt, + reason: options?.reason, + tx: this._prisma, }); - await Promise.all( - inProgressEvents.map((event) => { - return eventRepository.cancelEvent(event, opts.cancelledAt, opts.reason); - }) - ); - - // Cancel any in progress attempts - if (opts.cancelAttempts) { - await this.#cancelPotentiallyRunningAttempts(cancelledTaskRun, cancelledTaskRun.attempts); - await this.#cancelRemainingRunWorkers(cancelledTaskRun); - } - return { - id: cancelledTaskRun.id, + id: result.run.id, }; } - - async #cancelPotentiallyRunningAttempts( - run: ExtendedTaskRun, - attempts: ExtendedTaskRunAttempt[] - ) { - for (const attempt of attempts) { - await CancelTaskAttemptDependenciesService.enqueue(attempt.id, this._prisma); - - if (run.runtimeEnvironment.type === "DEVELOPMENT") { - // Signal the task run attempt to stop - await devPubSub.publish( - `backgroundWorker:${attempt.backgroundWorkerId}:${attempt.id}`, - "CANCEL_ATTEMPT", - { - attemptId: attempt.friendlyId, - backgroundWorkerId: attempt.backgroundWorker.friendlyId, - taskRunId: run.friendlyId, - } - ); - } else { - switch (attempt.status) { - case "EXECUTING": { - // We need to send a cancel message to the coordinator - socketIo.coordinatorNamespace.emit("REQUEST_ATTEMPT_CANCELLATION", { - version: "v1", - attemptId: attempt.id, - attemptFriendlyId: attempt.friendlyId, - }); - - break; - } - case "PENDING": - case "PAUSED": { - logger.debug("Cancelling pending or paused attempt", { - attempt, - }); - - const service = new CancelAttemptService(); - - await service.call( - attempt.friendlyId, - run.id, - new Date(), - "Task run was cancelled by user" - ); - - break; - } - case "CANCELED": - case "COMPLETED": - case "FAILED": { - // Do nothing - break; - } - default: { - assertNever(attempt.status); - } - } - } - } - } - - async #cancelRemainingRunWorkers(run: ExtendedTaskRun) { - if (run.runtimeEnvironment.type === "DEVELOPMENT") { - // Nothing to do - return; - } - - // Broadcast cancel message to all coordinators - socketIo.coordinatorNamespace.emit("REQUEST_RUN_CANCELLATION", { - version: "v1", - runId: run.id, - // Give the attempts some time to exit gracefully. If the runs supports lazy attempts, it also supports exit delays. - delayInMs: run.lockedToVersion?.supportsLazyAttempts ? 5_000 : undefined, - }); - } } diff --git a/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts b/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts new file mode 100644 index 0000000000..f25a0da86c --- /dev/null +++ b/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts @@ -0,0 +1,186 @@ +import { type Prisma, type TaskRun } from "@trigger.dev/database"; +import assertNever from "assert-never"; +import { logger } from "~/services/logger.server"; +import { eventRepository } from "../eventRepository.server"; +import { socketIo } from "../handleSocketIo.server"; +import { devPubSub } from "../marqs/devPubSub.server"; +import { CANCELLABLE_ATTEMPT_STATUSES, isCancellableRunStatus } from "../taskStatus"; +import { BaseService } from "./baseService.server"; +import { CancelAttemptService } from "./cancelAttempt.server"; +import { CancelTaskAttemptDependenciesService } from "./cancelTaskAttemptDependencies.server"; +import { FinalizeTaskRunService } from "./finalizeTaskRun.server"; + +type ExtendedTaskRun = Prisma.TaskRunGetPayload<{ + include: { + runtimeEnvironment: true; + lockedToVersion: true; + }; +}>; + +type ExtendedTaskRunAttempt = Prisma.TaskRunAttemptGetPayload<{ + include: { + backgroundWorker: true; + }; +}>; + +export type CancelTaskRunServiceOptions = { + reason?: string; + cancelAttempts?: boolean; + cancelledAt?: Date; +}; + +export class CancelTaskRunServiceV1 extends BaseService { + public async call(taskRun: TaskRun, options?: CancelTaskRunServiceOptions) { + const opts = { + reason: "Task run was cancelled by user", + cancelAttempts: true, + cancelledAt: new Date(), + ...options, + }; + + // Make sure the task run is in a cancellable state + if (!isCancellableRunStatus(taskRun.status)) { + logger.error("Task run is not in a cancellable state", { + runId: taskRun.id, + status: taskRun.status, + }); + return; + } + + const finalizeService = new FinalizeTaskRunService(); + const cancelledTaskRun = await finalizeService.call({ + id: taskRun.id, + status: "CANCELED", + completedAt: opts.cancelledAt, + include: { + attempts: { + where: { + status: { + in: CANCELLABLE_ATTEMPT_STATUSES, + }, + }, + include: { + backgroundWorker: true, + dependencies: { + include: { + taskRun: true, + }, + }, + batchTaskRunItems: { + include: { + taskRun: true, + }, + }, + }, + }, + runtimeEnvironment: true, + lockedToVersion: true, + }, + attemptStatus: "CANCELED", + error: { + type: "STRING_ERROR", + raw: opts.reason, + }, + }); + + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: taskRun.friendlyId, + }); + + logger.debug("Cancelling in-progress events", { + inProgressEvents: inProgressEvents.map((event) => event.id), + }); + + await Promise.all( + inProgressEvents.map((event) => { + return eventRepository.cancelEvent(event, opts.cancelledAt, opts.reason); + }) + ); + + // Cancel any in progress attempts + if (opts.cancelAttempts) { + await this.#cancelPotentiallyRunningAttempts(cancelledTaskRun, cancelledTaskRun.attempts); + await this.#cancelRemainingRunWorkers(cancelledTaskRun); + } + + return { + id: cancelledTaskRun.id, + }; + } + + async #cancelPotentiallyRunningAttempts( + run: ExtendedTaskRun, + attempts: ExtendedTaskRunAttempt[] + ) { + for (const attempt of attempts) { + await CancelTaskAttemptDependenciesService.enqueue(attempt.id, this._prisma); + + if (run.runtimeEnvironment.type === "DEVELOPMENT") { + // Signal the task run attempt to stop + await devPubSub.publish( + `backgroundWorker:${attempt.backgroundWorkerId}:${attempt.id}`, + "CANCEL_ATTEMPT", + { + attemptId: attempt.friendlyId, + backgroundWorkerId: attempt.backgroundWorker.friendlyId, + taskRunId: run.friendlyId, + } + ); + } else { + switch (attempt.status) { + case "EXECUTING": { + // We need to send a cancel message to the coordinator + socketIo.coordinatorNamespace.emit("REQUEST_ATTEMPT_CANCELLATION", { + version: "v1", + attemptId: attempt.id, + attemptFriendlyId: attempt.friendlyId, + }); + + break; + } + case "PENDING": + case "PAUSED": { + logger.debug("Cancelling pending or paused attempt", { + attempt, + }); + + const service = new CancelAttemptService(); + + await service.call( + attempt.friendlyId, + run.id, + new Date(), + "Task run was cancelled by user" + ); + + break; + } + case "CANCELED": + case "COMPLETED": + case "FAILED": { + // Do nothing + break; + } + default: { + assertNever(attempt.status); + } + } + } + } + } + + async #cancelRemainingRunWorkers(run: ExtendedTaskRun) { + if (run.runtimeEnvironment.type === "DEVELOPMENT") { + // Nothing to do + return; + } + + // Broadcast cancel message to all coordinators + socketIo.coordinatorNamespace.emit("REQUEST_RUN_CANCELLATION", { + version: "v1", + runId: run.id, + // Give the attempts some time to exit gracefully. If the runs supports lazy attempts, it also supports exit delays. + delayInMs: run.lockedToVersion?.supportsLazyAttempts ? 5_000 : undefined, + }); + } +} From d11612e87589feb6b62ca7a922789ab92926f0a3 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 22 Nov 2024 14:57:44 +0000 Subject: [PATCH 212/485] surface master queues in admin view --- .../app/presenters/v3/SpanPresenter.server.ts | 4 ++++ .../route.tsx | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index f921d89e5b..db519bc8dd 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -84,6 +84,8 @@ export class SpanPresenter extends BasePresenter { }, }, engine: true, + masterQueue: true, + secondaryMasterQueue: true, //status + duration status: true, startedAt: true, @@ -314,6 +316,8 @@ export class SpanPresenter extends BasePresenter { metadata, maxDurationInSeconds: getMaxDuration(run.maxDurationInSeconds), engine: run.engine, + masterQueue: run.masterQueue, + secondaryMasterQueue: run.secondaryMasterQueue, }; } diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 78a84f6bd1..f6b5345284 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -49,6 +49,7 @@ import { TaskRunStatusCombo } from "~/components/runs/v3/TaskRunStatus"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { useSearchParams } from "~/hooks/useSearchParam"; +import { useHasAdminAccess } from "~/hooks/useUser"; import { redirectWithErrorMessage } from "~/models/message.server"; import { Span, SpanPresenter, SpanRun } from "~/presenters/v3/SpanPresenter.server"; import { logger } from "~/services/logger.server"; @@ -413,6 +414,7 @@ function RunBody({ }) { const organization = useOrganization(); const project = useProject(); + const isAdmin = useHasAdminAccess(); const { value, replace } = useSearchParams(); const tab = value("tab"); @@ -619,6 +621,18 @@ function RunBody({ Engine version {run.engine} + {isAdmin && ( + <> + + Primary master queue + {run.masterQueue} + + + Secondary master queue + {run.secondaryMasterQueue} + + + )} Test run From 99a33e8f305299053a64bdadd0958416cc4df922 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 16:23:56 +0000 Subject: [PATCH 213/485] run engine compatibility flag and deploy scaffold --- packages/cli-v3/src/build/buildWorker.ts | 2 +- packages/cli-v3/src/build/bundle.ts | 5 + packages/cli-v3/src/build/packageModules.ts | 59 +++ packages/cli-v3/src/commands/deploy.ts | 3 +- packages/cli-v3/src/config.ts | 1 + .../entryPoints/managed-index-controller.ts | 117 +++++ .../src/entryPoints/managed-index-worker.ts | 170 +++++++ .../src/entryPoints/managed-run-controller.ts | 172 +++++++ .../src/entryPoints/managed-run-worker.ts | 468 ++++++++++++++++++ packages/core/src/v3/build/resolvedConfig.ts | 3 +- packages/core/src/v3/config.ts | 3 + .../src/v3/runtime/managedRuntimeManager.ts | 81 +++ packages/core/src/v3/schemas/build.ts | 2 +- 13 files changed, 1082 insertions(+), 4 deletions(-) create mode 100644 packages/cli-v3/src/entryPoints/managed-index-controller.ts create mode 100644 packages/cli-v3/src/entryPoints/managed-index-worker.ts create mode 100644 packages/cli-v3/src/entryPoints/managed-run-controller.ts create mode 100644 packages/cli-v3/src/entryPoints/managed-run-worker.ts create mode 100644 packages/core/src/v3/runtime/managedRuntimeManager.ts diff --git a/packages/cli-v3/src/build/buildWorker.ts b/packages/cli-v3/src/build/buildWorker.ts index c4fe0653d1..7c049ff966 100644 --- a/packages/cli-v3/src/build/buildWorker.ts +++ b/packages/cli-v3/src/build/buildWorker.ts @@ -78,7 +78,7 @@ export async function buildWorker(options: BuildWorkerOptions) { buildManifest = await notifyExtensionOnBuildComplete(buildContext, buildManifest); - if (options.target === "deploy" || options.target === "unmanaged") { + if (options.target !== "dev") { buildManifest = options.rewritePaths ? rewriteBuildManifestPaths(buildManifest, options.destination) : buildManifest; diff --git a/packages/cli-v3/src/build/bundle.ts b/packages/cli-v3/src/build/bundle.ts index b44939ebee..c125fb46e3 100644 --- a/packages/cli-v3/src/build/bundle.ts +++ b/packages/cli-v3/src/build/bundle.ts @@ -19,6 +19,7 @@ import { isRunWorkerForTarget, shims, telemetryEntryPoint, + managedEntryPoints, unmanagedEntryPoints, } from "./packageModules.js"; import { buildPlugins } from "./plugins.js"; @@ -242,6 +243,10 @@ async function getEntryPoints(target: BuildTarget, config: ResolvedConfig) { projectEntryPoints.push(...deployEntryPoints); break; } + case "managed": { + projectEntryPoints.push(...managedEntryPoints); + break; + } case "unmanaged": { projectEntryPoints.push(...unmanagedEntryPoints); break; diff --git a/packages/cli-v3/src/build/packageModules.ts b/packages/cli-v3/src/build/packageModules.ts index a05468065a..1224404c51 100644 --- a/packages/cli-v3/src/build/packageModules.ts +++ b/packages/cli-v3/src/build/packageModules.ts @@ -6,6 +6,11 @@ import { assertExhaustive } from "../utilities/assertExhaustive.js"; export const devRunWorker = join(sourceDir, "entryPoints", "dev-run-worker.js"); export const devIndexWorker = join(sourceDir, "entryPoints", "dev-index-worker.js"); +export const managedRunController = join(sourceDir, "entryPoints", "managed-run-controller.js"); +export const managedRunWorker = join(sourceDir, "entryPoints", "managed-run-worker.js"); +export const managedIndexController = join(sourceDir, "entryPoints", "managed-index-controller.js"); +export const managedIndexWorker = join(sourceDir, "entryPoints", "managed-index-worker.js"); + export const unmanagedRunController = join(sourceDir, "entryPoints", "unmanaged-run-controller.js"); export const unmanagedRunWorker = join(sourceDir, "entryPoints", "unmanaged-run-worker.js"); export const unmanagedIndexController = join( @@ -23,6 +28,12 @@ export const deployIndexWorker = join(sourceDir, "entryPoints", "deploy-index-wo export const telemetryEntryPoint = join(sourceDir, "entryPoints", "loader.js"); export const devEntryPoints = [devRunWorker, devIndexWorker]; +export const managedEntryPoints = [ + managedRunController, + managedRunWorker, + managedIndexController, + managedIndexWorker, +]; export const unmanagedEntryPoints = [ unmanagedRunController, unmanagedRunWorker, @@ -56,6 +67,38 @@ function isDevIndexWorker(entryPoint: string) { ); } +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isManagedRunController(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/managed-run-controller.js") || + entryPoint.includes("src/entryPoints/managed-run-controller.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isManagedRunWorker(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/managed-run-worker.js") || + entryPoint.includes("src/entryPoints/managed-run-worker.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isManagedIndexController(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/managed-index-controller.js") || + entryPoint.includes("src/entryPoints/managed-index-controller.ts") + ); +} + +// IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) +function isManagedIndexWorker(entryPoint: string) { + return ( + entryPoint.includes("dist/esm/entryPoints/managed-index-worker.js") || + entryPoint.includes("src/entryPoints/managed-index-worker.ts") + ); +} + // IMPORTANT: this may look like it should not work on Windows, but it does (and changing to using path.join will break stuff) function isUnmanagedRunController(entryPoint: string) { return ( @@ -133,6 +176,8 @@ export function isRunWorkerForTarget(entryPoint: string, target: BuildTarget) { return isDevRunWorker(entryPoint); case "deploy": return isDeployRunWorker(entryPoint); + case "managed": + return isManagedRunWorker(entryPoint); case "unmanaged": return isUnmanagedRunWorker(entryPoint); default: @@ -146,6 +191,8 @@ export function getRunWorkerForTarget(target: BuildTarget) { return devRunWorker; case "deploy": return deployRunWorker; + case "managed": + return managedRunWorker; case "unmanaged": return unmanagedRunWorker; default: @@ -159,6 +206,8 @@ export function isRunControllerForTarget(entryPoint: string, target: BuildTarget return false; case "deploy": return isDeployRunController(entryPoint); + case "managed": + return isManagedRunController(entryPoint); case "unmanaged": return isUnmanagedRunController(entryPoint); default: @@ -172,6 +221,8 @@ export function getRunControllerForTarget(target: BuildTarget) { return undefined; case "deploy": return deployRunController; + case "managed": + return managedRunController; case "unmanaged": return unmanagedRunController; default: @@ -185,6 +236,8 @@ export function isIndexWorkerForTarget(entryPoint: string, target: BuildTarget) return isDevIndexWorker(entryPoint); case "deploy": return isDeployIndexWorker(entryPoint); + case "managed": + return isManagedIndexWorker(entryPoint); case "unmanaged": return isUnmanagedIndexWorker(entryPoint); default: @@ -198,6 +251,8 @@ export function getIndexWorkerForTarget(target: BuildTarget) { return devIndexWorker; case "deploy": return deployIndexWorker; + case "managed": + return managedIndexWorker; case "unmanaged": return unmanagedIndexWorker; default: @@ -211,6 +266,8 @@ export function isIndexControllerForTarget(entryPoint: string, target: BuildTarg return false; case "deploy": return isDeployIndexController(entryPoint); + case "managed": + return isManagedIndexController(entryPoint); case "unmanaged": return isUnmanagedIndexController(entryPoint); default: @@ -224,6 +281,8 @@ export function getIndexControllerForTarget(target: BuildTarget) { return undefined; case "deploy": return deployIndexController; + case "managed": + return managedIndexController; case "unmanaged": return unmanagedIndexController; default: diff --git a/packages/cli-v3/src/commands/deploy.ts b/packages/cli-v3/src/commands/deploy.ts index 24424235b0..9188259d98 100644 --- a/packages/cli-v3/src/commands/deploy.ts +++ b/packages/cli-v3/src/commands/deploy.ts @@ -212,7 +212,7 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { const forcedExternals = await resolveAlwaysExternal(projectClient.client); const buildManifest = await buildWorker({ - target: "deploy", + target: resolvedConfig.compatibilityFlags.includes("run_engine_v2") ? "managed" : "deploy", environment: options.env, destination: destination.path, resolvedConfig, @@ -244,6 +244,7 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { selfHosted: options.selfHosted, registryHost: options.registry, namespace: options.namespace, + type: "MANAGED", }); if (!deploymentResponse.success) { diff --git a/packages/cli-v3/src/config.ts b/packages/cli-v3/src/config.ts index fc52cc3d9a..0786186854 100644 --- a/packages/cli-v3/src/config.ts +++ b/packages/cli-v3/src/config.ts @@ -178,6 +178,7 @@ async function resolveConfig( external: [], conditions: [], }, + compatibilityFlags: [], } ) as ResolvedConfig; // TODO: For some reason, without this, there is a weird type error complaining about tsconfigPath being string | nullish, which can't be assigned to string | undefined diff --git a/packages/cli-v3/src/entryPoints/managed-index-controller.ts b/packages/cli-v3/src/entryPoints/managed-index-controller.ts new file mode 100644 index 0000000000..5822acecd5 --- /dev/null +++ b/packages/cli-v3/src/entryPoints/managed-index-controller.ts @@ -0,0 +1,117 @@ +import { + BuildManifest, + CreateBackgroundWorkerRequestBody, + serializeIndexingError, +} from "@trigger.dev/core/v3"; +import { readFile, writeFile } from "node:fs/promises"; +import { join } from "node:path"; +import { env } from "std-env"; +import { CliApiClient } from "../apiClient.js"; +import { indexWorkerManifest } from "../indexing/indexWorkerManifest.js"; +import { resolveSourceFiles } from "../utilities/sourceFiles.js"; +import { execOptionsForRuntime } from "@trigger.dev/core/v3/build"; + +async function loadBuildManifest() { + const manifestContents = await readFile("./build.json", "utf-8"); + const raw = JSON.parse(manifestContents); + + return BuildManifest.parse(raw); +} + +async function bootstrap() { + const buildManifest = await loadBuildManifest(); + + if (typeof env.TRIGGER_API_URL !== "string") { + console.error("TRIGGER_API_URL is not set"); + process.exit(1); + } + + const cliApiClient = new CliApiClient(env.TRIGGER_API_URL, env.TRIGGER_SECRET_KEY); + + if (!env.TRIGGER_PROJECT_REF) { + console.error("TRIGGER_PROJECT_REF is not set"); + process.exit(1); + } + + if (!env.TRIGGER_DEPLOYMENT_ID) { + console.error("TRIGGER_DEPLOYMENT_ID is not set"); + process.exit(1); + } + + return { + buildManifest, + cliApiClient, + projectRef: env.TRIGGER_PROJECT_REF, + deploymentId: env.TRIGGER_DEPLOYMENT_ID, + }; +} + +type BootstrapResult = Awaited>; + +async function indexDeployment({ + cliApiClient, + projectRef, + deploymentId, + buildManifest, +}: BootstrapResult) { + const stdout: string[] = []; + const stderr: string[] = []; + + try { + const $env = await cliApiClient.getEnvironmentVariables(projectRef); + + if (!$env.success) { + throw new Error(`Failed to fetch environment variables: ${$env.error}`); + } + + const workerManifest = await indexWorkerManifest({ + runtime: buildManifest.runtime, + indexWorkerPath: buildManifest.indexWorkerEntryPoint, + buildManifestPath: "./build.json", + nodeOptions: execOptionsForRuntime(buildManifest.runtime, buildManifest), + env: $env.data.variables, + otelHookExclude: buildManifest.otelImportHook?.exclude, + otelHookInclude: buildManifest.otelImportHook?.include, + handleStdout(data) { + stdout.push(data); + }, + handleStderr(data) { + if (!data.includes("DeprecationWarning")) { + stderr.push(data); + } + }, + }); + + console.log("Writing index.json", process.cwd()); + + await writeFile(join(process.cwd(), "index.json"), JSON.stringify(workerManifest, null, 2)); + + const sourceFiles = resolveSourceFiles(buildManifest.sources, workerManifest.tasks); + + const backgroundWorkerBody: CreateBackgroundWorkerRequestBody = { + localOnly: true, + metadata: { + contentHash: buildManifest.contentHash, + packageVersion: buildManifest.packageVersion, + cliPackageVersion: buildManifest.cliPackageVersion, + tasks: workerManifest.tasks, + sourceFiles, + }, + supportsLazyAttempts: true, + }; + + await cliApiClient.createDeploymentBackgroundWorker(deploymentId, backgroundWorkerBody); + } catch (error) { + const serialiedIndexError = serializeIndexingError(error, stderr.join("\n")); + + console.error("Failed to index deployment", serialiedIndexError); + + await cliApiClient.failDeployment(deploymentId, { error: serialiedIndexError }); + + process.exit(1); + } +} + +const results = await bootstrap(); + +await indexDeployment(results); diff --git a/packages/cli-v3/src/entryPoints/managed-index-worker.ts b/packages/cli-v3/src/entryPoints/managed-index-worker.ts new file mode 100644 index 0000000000..2ef18444eb --- /dev/null +++ b/packages/cli-v3/src/entryPoints/managed-index-worker.ts @@ -0,0 +1,170 @@ +import { + BuildManifest, + type HandleErrorFunction, + indexerToWorkerMessages, + taskCatalog, + type TaskManifest, + TriggerConfig, +} from "@trigger.dev/core/v3"; +import { + StandardTaskCatalog, + TracingDiagnosticLogLevel, + TracingSDK, +} from "@trigger.dev/core/v3/workers"; +import { sendMessageInCatalog, ZodSchemaParsedError } from "@trigger.dev/core/v3/zodMessageHandler"; +import { readFile } from "node:fs/promises"; +import sourceMapSupport from "source-map-support"; +import { registerTasks } from "../indexing/registerTasks.js"; +import { env } from "std-env"; +import { normalizeImportPath } from "../utilities/normalizeImportPath.js"; + +sourceMapSupport.install({ + handleUncaughtExceptions: false, + environment: "node", + hookRequire: false, +}); + +process.on("uncaughtException", function (error, origin) { + if (error instanceof Error) { + process.send && + process.send({ + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { name: error.name, message: error.message, stack: error.stack }, + origin, + }, + version: "v1", + }); + } else { + process.send && + process.send({ + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { + name: "Error", + message: typeof error === "string" ? error : JSON.stringify(error), + }, + origin, + }, + version: "v1", + }); + } +}); + +taskCatalog.setGlobalTaskCatalog(new StandardTaskCatalog()); + +async function importConfig( + configPath: string +): Promise<{ config: TriggerConfig; handleError?: HandleErrorFunction }> { + const configModule = await import(normalizeImportPath(configPath)); + + const config = configModule?.default ?? configModule?.config; + + return { + config, + handleError: configModule?.handleError, + }; +} + +async function loadBuildManifest() { + const manifestContents = await readFile(env.TRIGGER_BUILD_MANIFEST_PATH!, "utf-8"); + const raw = JSON.parse(manifestContents); + + return BuildManifest.parse(raw); +} + +async function bootstrap() { + const buildManifest = await loadBuildManifest(); + + const { config } = await importConfig(buildManifest.configPath); + + // This needs to run or the PrismaInstrumentation will throw an error + const tracingSDK = new TracingSDK({ + url: env.OTEL_EXPORTER_OTLP_ENDPOINT ?? "http://0.0.0.0:4318", + instrumentations: config.instrumentations ?? [], + diagLogLevel: (env.OTEL_LOG_LEVEL as TracingDiagnosticLogLevel) ?? "none", + forceFlushTimeoutMillis: 30_000, + }); + + const importErrors = await registerTasks(buildManifest); + + return { + tracingSDK, + config, + buildManifest, + importErrors, + }; +} + +const { buildManifest, importErrors, config } = await bootstrap(); + +let tasks = taskCatalog.listTaskManifests(); + +// If the config has retry defaults, we need to apply them to all tasks that don't have any retry settings +if (config.retries?.default) { + tasks = tasks.map((task) => { + if (!task.retry) { + return { + ...task, + retry: config.retries?.default, + } satisfies TaskManifest; + } + + return task; + }); +} + +// If the config has a maxDuration, we need to apply it to all tasks that don't have a maxDuration +if (typeof config.maxDuration === "number") { + tasks = tasks.map((task) => { + if (typeof task.maxDuration !== "number") { + return { + ...task, + maxDuration: config.maxDuration, + } satisfies TaskManifest; + } + + return task; + }); +} + +await sendMessageInCatalog( + indexerToWorkerMessages, + "INDEX_COMPLETE", + { + manifest: { + tasks, + configPath: buildManifest.configPath, + runtime: buildManifest.runtime, + workerEntryPoint: buildManifest.runWorkerEntryPoint, + controllerEntryPoint: buildManifest.runControllerEntryPoint, + loaderEntryPoint: buildManifest.loaderEntryPoint, + customConditions: buildManifest.customConditions, + }, + importErrors, + }, + async (msg) => { + process.send?.(msg); + } +).catch((err) => { + if (err instanceof ZodSchemaParsedError) { + return sendMessageInCatalog( + indexerToWorkerMessages, + "TASKS_FAILED_TO_PARSE", + { zodIssues: err.error.issues, tasks }, + async (msg) => { + process.send?.(msg); + } + ); + } else { + console.error("Failed to send TASKS_READY message", err); + } + + return; +}); + +await new Promise((resolve) => { + setTimeout(() => { + resolve(); + }, 10); +}); diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts new file mode 100644 index 0000000000..1f7d637846 --- /dev/null +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -0,0 +1,172 @@ +import { logger } from "../utilities/logger.js"; +import { TaskRunProcess } from "../executions/taskRunProcess.js"; +import { env as stdEnv } from "std-env"; +import { z } from "zod"; +import { CLOUD_API_URL } from "../consts.js"; +import { randomUUID } from "crypto"; +import { readJSONFile } from "../utilities/fileSystem.js"; +import { WorkerManifest } from "@trigger.dev/core/v3"; +import { WorkerSession } from "@trigger.dev/worker"; + +const Env = z.object({ + TRIGGER_API_URL: z.string().default(CLOUD_API_URL), + TRIGGER_CONTENT_HASH: z.string(), + TRIGGER_WORKER_TOKEN: z.string(), + TRIGGER_WORKER_INSTANCE_NAME: z.string().default(randomUUID()), + TRIGGER_DEPLOYMENT_ID: z.string(), + TRIGGER_DEPLOYMENT_VERSION: z.string(), + NODE_ENV: z.string().default("production"), + NODE_EXTRA_CA_CERTS: z.string().optional(), + OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), +}); + +const env = Env.parse(stdEnv); + +logger.loggerLevel = "debug"; +logger.debug("Creating unmanaged worker", { env }); + +class ManagedWorker { + private readonly session: WorkerSession; + private taskRunProcess?: TaskRunProcess; + + constructor(private workerManifest: WorkerManifest) { + this.session = new WorkerSession({ + workerToken: env.TRIGGER_WORKER_TOKEN, + apiUrl: env.TRIGGER_API_URL, + instanceName: env.TRIGGER_WORKER_INSTANCE_NAME, + deploymentId: env.TRIGGER_DEPLOYMENT_ID, + dequeueIntervalMs: 1000, + }); + + const traceContext = new Map>(); + + this.session.on("runQueueMessage", async ({ time, message }) => { + logger.debug("[UnmanagedWorker] Received runQueueMessage", { time, message }); + + traceContext.set(message.run.id, message.run.traceContext); + + this.session.emit("requestRunAttemptStart", { + time: new Date(), + run: { + id: message.run.id, + }, + snapshot: { + id: message.snapshot.id, + }, + }); + }); + + this.session.on("runAttemptStarted", async ({ time, run, snapshot, execution, envVars }) => { + const taskRunEnv = { + ...gatherProcessEnv(), + ...envVars, + }; + + this.taskRunProcess = new TaskRunProcess({ + workerManifest: this.workerManifest, + env: taskRunEnv, + serverWorker: { + id: "unmanaged", + contentHash: env.TRIGGER_CONTENT_HASH, + version: env.TRIGGER_DEPLOYMENT_VERSION, + }, + payload: { + execution, + // TODO: The run engine could return this when the run is started + traceContext: traceContext.get(run.id) ?? {}, + }, + messageId: run.id, + }); + + try { + await this.taskRunProcess.initialize(); + + logger.log("executing task run process", { + attemptId: execution.attempt.id, + runId: execution.run.id, + }); + + const completion = await this.taskRunProcess.execute(); + + logger.log("completed", completion); + + try { + await this.taskRunProcess.cleanup(true); + } catch (error) { + logger.error("Failed to cleanup task run process, submitting completion anyway", { + error, + }); + } + + this.session.emit("runAttemptCompleted", { + time: new Date(), + run: { + id: run.id, + }, + snapshot: { + id: snapshot.id, + }, + completion, + }); + } catch (error) { + logger.error("Failed to complete lazy attempt", { + error, + }); + + this.session.emit("runAttemptCompleted", { + time: new Date(), + run: { + id: run.id, + }, + snapshot: { + id: snapshot.id, + }, + completion: { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + }, + }); + } + }); + + process.on("SIGTERM", async () => { + logger.debug("[UnmanagedWorker] Received SIGTERM, stopping worker"); + await this.stop(); + }); + } + + async start() { + logger.debug("[UnmanagedWorker] Starting up"); + await this.session.start(); + } + + async stop() { + logger.debug("[UnmanagedWorker] Shutting down"); + await this.session.stop(); + } +} + +const workerManifest = await loadWorkerManifest(); + +const prodWorker = new ManagedWorker(workerManifest); +await prodWorker.start(); + +function gatherProcessEnv(): Record { + const $env = { + NODE_ENV: env.NODE_ENV, + NODE_EXTRA_CA_CERTS: env.NODE_EXTRA_CA_CERTS, + OTEL_EXPORTER_OTLP_ENDPOINT: env.OTEL_EXPORTER_OTLP_ENDPOINT, + }; + + // Filter out undefined values + return Object.fromEntries( + Object.entries($env).filter(([key, value]) => value !== undefined) + ) as Record; +} + +async function loadWorkerManifest() { + const manifest = await readJSONFile("./index.json"); + return WorkerManifest.parse(manifest); +} diff --git a/packages/cli-v3/src/entryPoints/managed-run-worker.ts b/packages/cli-v3/src/entryPoints/managed-run-worker.ts new file mode 100644 index 0000000000..69c22bc5cc --- /dev/null +++ b/packages/cli-v3/src/entryPoints/managed-run-worker.ts @@ -0,0 +1,468 @@ +import type { Tracer } from "@opentelemetry/api"; +import type { Logger } from "@opentelemetry/api-logs"; +import { + clock, + ExecutorToWorkerMessageCatalog, + type HandleErrorFunction, + logger, + LogLevel, + runMetadata, + runtime, + taskCatalog, + TaskRunErrorCodes, + TaskRunExecution, + timeout, + TriggerConfig, + WorkerManifest, + WorkerToExecutorMessageCatalog, +} from "@trigger.dev/core/v3"; +import { UnmanagedRuntimeManager } from "@trigger.dev/core/v3/unmanaged"; +import { TriggerTracer } from "@trigger.dev/core/v3/tracer"; +import { + ConsoleInterceptor, + DevUsageManager, + DurableClock, + getEnvVar, + getNumberEnvVar, + logLevels, + OtelTaskLogger, + ProdUsageManager, + StandardMetadataManager, + StandardTaskCatalog, + TaskExecutor, + TracingDiagnosticLogLevel, + TracingSDK, + usage, + UsageTimeoutManager, +} from "@trigger.dev/core/v3/workers"; +import { ZodIpcConnection } from "@trigger.dev/core/v3/zodIpc"; +import { readFile } from "node:fs/promises"; +import { setInterval, setTimeout } from "node:timers/promises"; +import sourceMapSupport from "source-map-support"; +import { env } from "std-env"; +import { normalizeImportPath } from "../utilities/normalizeImportPath.js"; +import { VERSION } from "../version.js"; + +sourceMapSupport.install({ + handleUncaughtExceptions: false, + environment: "node", + hookRequire: false, +}); + +process.on("uncaughtException", function (error, origin) { + if (error instanceof Error) { + process.send && + process.send({ + type: "EVENT", + message: { + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { name: error.name, message: error.message, stack: error.stack }, + origin, + }, + version: "v1", + }, + }); + } else { + process.send && + process.send({ + type: "EVENT", + message: { + type: "UNCAUGHT_EXCEPTION", + payload: { + error: { + name: "Error", + message: typeof error === "string" ? error : JSON.stringify(error), + }, + origin, + }, + version: "v1", + }, + }); + } +}); + +const usageIntervalMs = getEnvVar("USAGE_HEARTBEAT_INTERVAL_MS"); +const usageEventUrl = getEnvVar("USAGE_EVENT_URL"); +const triggerJWT = getEnvVar("TRIGGER_JWT"); +const heartbeatIntervalMs = getEnvVar("HEARTBEAT_INTERVAL_MS"); + +const devUsageManager = new DevUsageManager(); +const prodUsageManager = new ProdUsageManager(devUsageManager, { + heartbeatIntervalMs: usageIntervalMs ? parseInt(usageIntervalMs, 10) : undefined, + url: usageEventUrl, + jwt: triggerJWT, +}); + +usage.setGlobalUsageManager(prodUsageManager); +timeout.setGlobalManager(new UsageTimeoutManager(devUsageManager)); + +taskCatalog.setGlobalTaskCatalog(new StandardTaskCatalog()); +const durableClock = new DurableClock(); +clock.setGlobalClock(durableClock); +const runMetadataManager = new StandardMetadataManager(); +runMetadata.setGlobalManager(runMetadataManager); + +const triggerLogLevel = getEnvVar("TRIGGER_LOG_LEVEL"); + +async function importConfig( + configPath: string +): Promise<{ config: TriggerConfig; handleError?: HandleErrorFunction }> { + const configModule = await import(configPath); + + const config = configModule?.default ?? configModule?.config; + + return { + config, + handleError: configModule?.handleError, + }; +} + +async function loadWorkerManifest() { + const manifestContents = await readFile("./index.json", "utf-8"); + const raw = JSON.parse(manifestContents); + + return WorkerManifest.parse(raw); +} + +async function bootstrap() { + const workerManifest = await loadWorkerManifest(); + + const { config, handleError } = await importConfig( + normalizeImportPath(workerManifest.configPath) + ); + + const tracingSDK = new TracingSDK({ + url: env.OTEL_EXPORTER_OTLP_ENDPOINT ?? "http://0.0.0.0:4318", + instrumentations: config.instrumentations ?? [], + diagLogLevel: (env.OTEL_LOG_LEVEL as TracingDiagnosticLogLevel) ?? "none", + forceFlushTimeoutMillis: 30_000, + }); + + const otelTracer: Tracer = tracingSDK.getTracer("trigger-dev-worker", VERSION); + const otelLogger: Logger = tracingSDK.getLogger("trigger-dev-worker", VERSION); + + const tracer = new TriggerTracer({ tracer: otelTracer, logger: otelLogger }); + const consoleInterceptor = new ConsoleInterceptor( + otelLogger, + typeof config.enableConsoleLogging === "boolean" ? config.enableConsoleLogging : true + ); + + const configLogLevel = triggerLogLevel ?? config.logLevel ?? "info"; + + const otelTaskLogger = new OtelTaskLogger({ + logger: otelLogger, + tracer: tracer, + level: logLevels.includes(configLogLevel as any) ? (configLogLevel as LogLevel) : "info", + }); + + logger.setGlobalTaskLogger(otelTaskLogger); + + for (const task of workerManifest.tasks) { + taskCatalog.registerTaskFileMetadata(task.id, { + exportName: task.exportName, + filePath: task.filePath, + entryPoint: task.entryPoint, + }); + } + + return { + tracer, + tracingSDK, + consoleInterceptor, + config, + handleErrorFn: handleError, + workerManifest, + }; +} + +let _execution: TaskRunExecution | undefined; +let _isRunning = false; +let _tracingSDK: TracingSDK | undefined; + +const zodIpc = new ZodIpcConnection({ + listenSchema: WorkerToExecutorMessageCatalog, + emitSchema: ExecutorToWorkerMessageCatalog, + process, + handlers: { + EXECUTE_TASK_RUN: async ({ execution, traceContext, metadata }, sender) => { + console.log(`[${new Date().toISOString()}] Received EXECUTE_TASK_RUN`, execution); + + if (_isRunning) { + console.error("Worker is already running a task"); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.TASK_ALREADY_RUNNING, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + try { + const { tracer, tracingSDK, consoleInterceptor, config, handleErrorFn, workerManifest } = + await bootstrap(); + + _tracingSDK = tracingSDK; + + const taskManifest = workerManifest.tasks.find((t) => t.id === execution.task.id); + + if (!taskManifest) { + console.error(`Could not find task ${execution.task.id}`); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_FIND_TASK, + message: `Could not find task ${execution.task.id}. Make sure the task is exported and the ID is correct.`, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + try { + const beforeImport = performance.now(); + await import(normalizeImportPath(taskManifest.entryPoint)); + const durationMs = performance.now() - beforeImport; + + console.log( + `Imported task ${execution.task.id} [${taskManifest.entryPoint}] in ${durationMs}ms` + ); + } catch (err) { + console.error(`Failed to import task ${execution.task.id}`, err); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_IMPORT_TASK, + message: err instanceof Error ? err.message : String(err), + stackTrace: err instanceof Error ? err.stack : undefined, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + process.title = `trigger-dev-worker: ${execution.task.id} ${execution.run.id}`; + + // Import the task module + const task = taskCatalog.getTask(execution.task.id); + + if (!task) { + console.error(`Could not find task ${execution.task.id}`); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.COULD_NOT_FIND_EXECUTOR, + }, + usage: { + durationMs: 0, + }, + }, + }); + + return; + } + + const executor = new TaskExecutor(task, { + tracer, + tracingSDK, + consoleInterceptor, + config, + handleErrorFn, + }); + + try { + _execution = execution; + _isRunning = true; + + runMetadataManager.startPeriodicFlush( + getNumberEnvVar("TRIGGER_RUN_METADATA_FLUSH_INTERVAL", 1000) + ); + + const measurement = usage.start(); + + // This lives outside of the executor because this will eventually be moved to the controller level + const signal = execution.run.maxDuration + ? timeout.abortAfterTimeout(execution.run.maxDuration) + : undefined; + + signal?.addEventListener("abort", async (e) => { + if (_isRunning) { + _isRunning = false; + _execution = undefined; + + const usageSample = usage.stop(measurement); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.MAX_DURATION_EXCEEDED, + message: + signal.reason instanceof Error + ? signal.reason.message + : String(signal.reason), + }, + usage: { + durationMs: usageSample.cpuTime, + }, + }, + }); + } + }); + + const { result } = await executor.execute( + execution, + metadata, + traceContext, + measurement, + signal + ); + + const usageSample = usage.stop(measurement); + + if (_isRunning) { + return sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ...result, + usage: { + durationMs: usageSample.cpuTime, + }, + }, + }); + } + } finally { + _execution = undefined; + _isRunning = false; + } + } catch (err) { + console.error("Failed to execute task", err); + + await sender.send("TASK_RUN_COMPLETED", { + execution, + result: { + ok: false, + id: execution.run.id, + error: { + type: "INTERNAL_ERROR", + code: TaskRunErrorCodes.CONFIGURED_INCORRECTLY, + }, + usage: { + durationMs: 0, + }, + }, + }); + } + }, + TASK_RUN_COMPLETED_NOTIFICATION: async () => { + await managedWorkerRuntime.completeWaitpoints([]); + }, + WAIT_COMPLETED_NOTIFICATION: async () => { + await managedWorkerRuntime.completeWaitpoints([]); + }, + FLUSH: async ({ timeoutInMs }, sender) => { + await flushAll(timeoutInMs); + }, + }, +}); + +async function flushAll(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.all([ + flushUsage(timeoutInMs), + flushTracingSDK(timeoutInMs), + flushMetadata(timeoutInMs), + ]); + + const duration = performance.now() - now; + + console.log(`Flushed all in ${duration}ms`); +} + +async function flushUsage(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([prodUsageManager.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed usage in ${duration}ms`); +} + +async function flushTracingSDK(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([_tracingSDK?.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed tracingSDK in ${duration}ms`); +} + +async function flushMetadata(timeoutInMs: number = 10_000) { + const now = performance.now(); + + await Promise.race([runMetadataManager.flush(), setTimeout(timeoutInMs)]); + + const duration = performance.now() - now; + + console.log(`Flushed runMetadata in ${duration}ms`); +} + +const managedWorkerRuntime = new UnmanagedRuntimeManager(); + +runtime.setGlobalRuntimeManager(managedWorkerRuntime); + +process.title = "trigger-managed-worker"; + +const heartbeatInterval = parseInt(heartbeatIntervalMs ?? "30000", 10); + +for await (const _ of setInterval(heartbeatInterval)) { + if (_isRunning && _execution) { + try { + await zodIpc.send("TASK_HEARTBEAT", { id: _execution.attempt.id }); + } catch (err) { + console.error("Failed to send HEARTBEAT message", err); + } + } +} + +console.log(`[${new Date().toISOString()}] Executor started`); diff --git a/packages/core/src/v3/build/resolvedConfig.ts b/packages/core/src/v3/build/resolvedConfig.ts index 674a7fce14..b2536fdff0 100644 --- a/packages/core/src/v3/build/resolvedConfig.ts +++ b/packages/core/src/v3/build/resolvedConfig.ts @@ -1,6 +1,6 @@ import { type Defu } from "defu"; import type { Prettify } from "ts-essentials"; -import { TriggerConfig } from "../config.js"; +import { CompatibilityFlag, TriggerConfig } from "../config.js"; import { BuildRuntime } from "../schemas/build.js"; import { ResolveEnvironmentVariablesFunction } from "../types/index.js"; @@ -16,6 +16,7 @@ export type ResolvedConfig = Prettify< build: { jsx: { factory: string; fragment: string; automatic: true }; } & Omit, "jsx">; + compatibilityFlags: CompatibilityFlag[]; }, ] > & { diff --git a/packages/core/src/v3/config.ts b/packages/core/src/v3/config.ts index 42ca53c616..30afdaf91a 100644 --- a/packages/core/src/v3/config.ts +++ b/packages/core/src/v3/config.ts @@ -10,6 +10,8 @@ import type { } from "./types/index.js"; import type { BuildRuntime, RetryOptions } from "./index.js"; +export type CompatibilityFlag = "run_engine_v2"; + export type TriggerConfig = { /** * @default "node" @@ -23,6 +25,7 @@ export type TriggerConfig = { enabledInDev?: boolean; default?: RetryOptions; }; + compatibilityFlags?: Array; /** * The default machine preset to use for your deployed trigger.dev tasks. You can override this on a per-task basis. * @default "small-1x" diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts new file mode 100644 index 0000000000..88b0350590 --- /dev/null +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -0,0 +1,81 @@ +import { + BatchTaskRunExecutionResult, + TaskRunContext, + TaskRunExecutionResult, +} from "../schemas/index.js"; +import { RuntimeManager } from "./manager.js"; +import { unboundedTimeout } from "../utils/timers.js"; + +type Waitpoint = any; + +export class UnmanagedRuntimeManager implements RuntimeManager { + private readonly waitpoints: Map = new Map(); + + _taskWaits: Map void }> = new Map(); + + _batchWaits: Map< + string, + { resolve: (value: BatchTaskRunExecutionResult) => void; reject: (err?: any) => void } + > = new Map(); + + disable(): void { + // do nothing + } + + async waitForDuration(ms: number): Promise { + await unboundedTimeout(ms); + } + + async waitUntil(date: Date): Promise { + return this.waitForDuration(date.getTime() - Date.now()); + } + + async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { + const promise = new Promise((resolve) => { + this._taskWaits.set(params.id, { resolve }); + }); + + return await promise; + } + + async waitForBatch(params: { + id: string; + runs: string[]; + ctx: TaskRunContext; + }): Promise { + if (!params.runs.length) { + return Promise.resolve({ id: params.id, items: [] }); + } + + const promise = Promise.all( + params.runs.map((runId) => { + return new Promise((resolve, reject) => { + this._taskWaits.set(runId, { resolve }); + }); + }) + ); + + const results = await promise; + + return { + id: params.id, + items: results, + }; + } + + async completeWaitpoints(waitpoints: Waitpoint[]): Promise { + await Promise.all(waitpoints.map((waitpoint) => this.completeWaitpoint(waitpoint))); + } + + private completeWaitpoint(waitpoint: Waitpoint): void { + const wait = this._taskWaits.get(waitpoint.id); + + if (!wait) { + return; + } + + wait.resolve(waitpoint.completion); + + this._taskWaits.delete(waitpoint.id); + } +} diff --git a/packages/core/src/v3/schemas/build.ts b/packages/core/src/v3/schemas/build.ts index 0b9431656d..d8a8d8545b 100644 --- a/packages/core/src/v3/schemas/build.ts +++ b/packages/core/src/v3/schemas/build.ts @@ -9,7 +9,7 @@ export const BuildExternal = z.object({ export type BuildExternal = z.infer; -export const BuildTarget = z.enum(["dev", "deploy", "unmanaged"]); +export const BuildTarget = z.enum(["dev", "deploy", "managed", "unmanaged"]); export type BuildTarget = z.infer; From ac7076b0c45e5667144c2124f134b26df164e50c Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 16:24:26 +0000 Subject: [PATCH 214/485] checkpoint event indexes --- internal-packages/database/prisma/schema.prisma | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 00eb62d5ef..56c53e8a7e 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2601,6 +2601,9 @@ model CheckpointRestoreEvent { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt + + @@index([runId]) + @@index([checkpointId]) } enum CheckpointRestoreEventType { From 66edfe28771f8fd76e29e7e5a9f1d85164d57548 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 18:24:52 +0000 Subject: [PATCH 215/485] shared -> managed worker --- .../route.tsx | 6 +++++- .../v3/services/initializeDeployment.server.ts | 4 ++-- .../app/v3/services/rollbackDeployment.server.ts | 2 +- .../services/worker/workerGroupService.server.ts | 4 ++-- .../worker/workerGroupTokenService.server.ts | 16 ++++++++-------- apps/webapp/test/workerGroup.test.ts | 4 ++-- internal-packages/database/prisma/schema.prisma | 4 ++-- packages/core/src/v3/schemas/api.ts | 2 +- 8 files changed, 23 insertions(+), 19 deletions(-) diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx index ea717a975c..b35d1c1ac7 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.deployments/route.tsx @@ -6,6 +6,7 @@ import { } from "@heroicons/react/20/solid"; import { Outlet, useLocation, useParams } from "@remix-run/react"; import { LoaderFunctionArgs } from "@remix-run/server-runtime"; +import { WorkerInstanceGroupType } from "@trigger.dev/database"; import { typedjson, useTypedLoaderData } from "remix-typedjson"; import { z } from "zod"; import { UserAvatar } from "~/components/UserProfilePhoto"; @@ -285,7 +286,10 @@ function DeploymentActionsCell({ const project = useProject(); const canRollback = - deployment.type === "SHARED" && !deployment.isCurrent && deployment.isDeployed; + deployment.type === WorkerInstanceGroupType.MANAGED && + !deployment.isCurrent && + deployment.isDeployed; + const canRetryIndexing = deployment.isLatest && deploymentIndexingIsRetryable(deployment); if (!canRollback && !canRetryIndexing) { diff --git a/apps/webapp/app/v3/services/initializeDeployment.server.ts b/apps/webapp/app/v3/services/initializeDeployment.server.ts index da0e1c68ca..d8b621dad8 100644 --- a/apps/webapp/app/v3/services/initializeDeployment.server.ts +++ b/apps/webapp/app/v3/services/initializeDeployment.server.ts @@ -66,9 +66,9 @@ export class InitializeDeploymentService extends BaseService { const unmanagedImageTag = unmanagedImageParts.join("/"); - const defaultType = WorkerInstanceGroupType.SHARED; + const defaultType = WorkerInstanceGroupType.MANAGED; const deploymentType = payload.type ?? defaultType; - const isShared = deploymentType === WorkerInstanceGroupType.SHARED; + const isShared = deploymentType === WorkerInstanceGroupType.MANAGED; logger.debug("Creating deployment", { environmentId: environment.id, diff --git a/apps/webapp/app/v3/services/rollbackDeployment.server.ts b/apps/webapp/app/v3/services/rollbackDeployment.server.ts index a7c1ffb86e..b5ee41f1b6 100644 --- a/apps/webapp/app/v3/services/rollbackDeployment.server.ts +++ b/apps/webapp/app/v3/services/rollbackDeployment.server.ts @@ -11,7 +11,7 @@ export class RollbackDeploymentService extends BaseService { return; } - if (deployment.type !== WorkerInstanceGroupType.SHARED) { + if (deployment.type !== WorkerInstanceGroupType.MANAGED) { logger.error("Can only roll back shared deployments", { id: deployment.id, type: deployment.type, diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index 28a2bb139d..f8fde5b04c 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -30,7 +30,7 @@ export class WorkerGroupService extends WithRunEngine { data: { projectId, organizationId, - type: projectId ? WorkerInstanceGroupType.UNMANAGED : WorkerInstanceGroupType.SHARED, + type: projectId ? WorkerInstanceGroupType.UNMANAGED : WorkerInstanceGroupType.MANAGED, masterQueue: this.generateMasterQueueName({ projectId, name }), tokenId: token.id, description, @@ -92,7 +92,7 @@ export class WorkerGroupService extends WithRunEngine { where: { OR: [ { - type: WorkerInstanceGroupType.SHARED, + type: WorkerInstanceGroupType.MANAGED, }, { projectId, diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 866766ca56..a2290e255d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -153,7 +153,7 @@ export class WorkerGroupTokenService extends WithRunEngine { return; } - if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + if (workerGroup.type === WorkerInstanceGroupType.MANAGED) { const managedWorkerSecret = request.headers.get(HEADER_NAME.WORKER_MANAGED_SECRET); if (!managedWorkerSecret) { @@ -199,11 +199,11 @@ export class WorkerGroupTokenService extends WithRunEngine { return; } - if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + if (workerGroup.type === WorkerInstanceGroupType.MANAGED) { return new AuthenticatedWorkerInstance({ prisma: this._prisma, engine: this._engine, - type: WorkerInstanceGroupType.SHARED, + type: WorkerInstanceGroupType.MANAGED, workerGroupId: workerGroup.id, workerInstanceId: workerInstance.id, masterQueue: workerGroup.masterQueue, @@ -278,7 +278,7 @@ export class WorkerGroupTokenService extends WithRunEngine { return workerInstance; } - if (workerGroup.type === WorkerInstanceGroupType.SHARED) { + if (workerGroup.type === WorkerInstanceGroupType.MANAGED) { if (deploymentId) { logger.warn( "[WorkerGroupTokenService] Shared worker group instances should not authenticate with a deployment ID", @@ -480,7 +480,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { } async dequeue(maxRunCount = 10): Promise { - if (this.type === WorkerInstanceGroupType.SHARED) { + if (this.type === WorkerInstanceGroupType.MANAGED) { return await this._engine.dequeueFromMasterQueue({ consumerId: this.workerInstanceId, masterQueue: this.masterQueue, @@ -582,9 +582,9 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { } toJSON(): WorkerGroupTokenAuthenticationResponse { - if (this.type === WorkerInstanceGroupType.SHARED) { + if (this.type === WorkerInstanceGroupType.MANAGED) { return { - type: WorkerInstanceGroupType.SHARED, + type: WorkerInstanceGroupType.MANAGED, workerGroupId: this.workerGroupId, workerInstanceId: this.workerInstanceId, masterQueue: this.masterQueue, @@ -630,7 +630,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { export type WorkerGroupTokenAuthenticationResponse = | { - type: typeof WorkerInstanceGroupType.SHARED; + type: typeof WorkerInstanceGroupType.MANAGED; workerGroupId: string; workerInstanceId: string; masterQueue: string; diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts index 5aad0ad538..828dfe39a4 100644 --- a/apps/webapp/test/workerGroup.test.ts +++ b/apps/webapp/test/workerGroup.test.ts @@ -26,7 +26,7 @@ describe("worker", () => { describe("auth", { concurrent: true, timeout: 10000 }, () => { containerTest("should fail", async ({ prisma }) => { const { workerGroup, token } = await setupWorkerGroup({ prisma }); - expect(workerGroup.type).toBe(WorkerInstanceGroupType.SHARED); + expect(workerGroup.type).toBe(WorkerInstanceGroupType.MANAGED); const missingToken = new Request("https://example.com", { headers: { @@ -63,7 +63,7 @@ describe("worker", () => { containerTest("should succeed", async ({ prisma }) => { const { workerGroup, token } = await setupWorkerGroup({ prisma }); - expect(workerGroup.type).toBe(WorkerInstanceGroupType.SHARED); + expect(workerGroup.type).toBe(WorkerInstanceGroupType.MANAGED); const request = new Request("https://example.com", { headers: { diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 56c53e8a7e..6feb2c294b 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2129,7 +2129,7 @@ model WorkerInstance { } enum WorkerInstanceGroupType { - SHARED + MANAGED @map("SHARED") UNMANAGED } @@ -2624,7 +2624,7 @@ model WorkerDeployment { externalBuildData Json? status WorkerDeploymentStatus @default(PENDING) - type WorkerInstanceGroupType @default(SHARED) + type WorkerInstanceGroupType @default(MANAGED) project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index ccaabc5378..47e372f18a 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -213,7 +213,7 @@ export const InitializeDeploymentRequestBody = z.object({ registryHost: z.string().optional(), selfHosted: z.boolean().optional(), namespace: z.string().optional(), - type: z.enum(["SHARED", "UNMANAGED"]).optional(), + type: z.enum(["MANAGED", "UNMANAGED"]).optional(), }); export type InitializeDeploymentRequestBody = z.infer; From f25a9716c57308262ea98dc50893412e9110c38a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 18:52:05 +0000 Subject: [PATCH 216/485] add snapshot creation debug task events --- .../route.tsx | 5 +- apps/webapp/app/v3/eventRepository.server.ts | 19 +++- apps/webapp/app/v3/runEngine.server.ts | 94 +++++++++++++++---- .../database/prisma/schema.prisma | 1 + .../run-engine/src/engine/index.ts | 12 +++ 5 files changed, 108 insertions(+), 23 deletions(-) diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx index 6b975a1a14..baa4f3055e 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx @@ -65,7 +65,7 @@ import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { useReplaceSearchParams } from "~/hooks/useReplaceSearchParams"; import { Shortcut, useShortcutKeys } from "~/hooks/useShortcutKeys"; -import { useUser } from "~/hooks/useUser"; +import { useHasAdminAccess, useUser } from "~/hooks/useUser"; import { Run, RunPresenter } from "~/presenters/v3/RunPresenter.server"; import { requireUserId } from "~/services/session.server"; import { cn } from "~/utils/cn"; @@ -262,6 +262,7 @@ export default function Page() { function TraceView({ run, trace, maximumLiveReloadingSetting, resizable }: LoaderData) { const organization = useOrganization(); const project = useProject(); + const isAdmin = useHasAdminAccess(); const { searchParams, replaceSearchParam } = useReplaceSearchParams(); const selectedSpanId = searchParams.get("span") ?? undefined; @@ -302,7 +303,7 @@ function TraceView({ run, trace, maximumLiveReloadingSetting, resizable }: Loade !event.data.isDebug)} parentRunFriendlyId={parentRunFriendlyId} onSelectedIdChanged={(selectedSpan) => { //instantly close the panel if no span is selected diff --git a/apps/webapp/app/v3/eventRepository.server.ts b/apps/webapp/app/v3/eventRepository.server.ts index c9c2e8e231..b4fc5fb0e8 100644 --- a/apps/webapp/app/v3/eventRepository.server.ts +++ b/apps/webapp/app/v3/eventRepository.server.ts @@ -56,6 +56,7 @@ export type TraceAttributes = Partial< | "attemptId" | "isError" | "isCancelled" + | "isDebug" | "runId" | "runIsTest" | "output" @@ -119,6 +120,7 @@ export type QueriedEvent = Prisma.TaskEventGetPayload<{ isError: true; isPartial: true; isCancelled: true; + isDebug: true; level: true; events: true; environmentType: true; @@ -164,6 +166,7 @@ export type SpanSummary = { isError: boolean; isPartial: boolean; isCancelled: boolean; + isDebug: boolean; level: NonNullable; environmentType: CreatableEventEnvironmentType; }; @@ -397,6 +400,7 @@ export class EventRepository { isError: true, isPartial: true, isCancelled: true, + isDebug: true, level: true, events: true, environmentType: true, @@ -460,6 +464,7 @@ export class EventRepository { isError: event.isError, isPartial: ancestorCancelled ? false : event.isPartial, isCancelled: event.isCancelled === true ? true : event.isPartial && ancestorCancelled, + isDebug: event.isDebug, startTime: getDateFromNanoseconds(event.startTime), level: event.level, events: event.events, @@ -505,6 +510,7 @@ export class EventRepository { isError: true, isPartial: true, isCancelled: true, + isDebug: true, level: true, events: true, environmentType: true, @@ -744,11 +750,13 @@ export class EventRepository { }); } - public async recordEvent(message: string, options: TraceEventOptions) { + public async recordEvent(message: string, options: TraceEventOptions & { duration?: number }) { const propagatedContext = extractContextFromCarrier(options.context ?? {}); const startTime = options.startTime ?? getNowInNanoseconds(); - const duration = options.endTime ? calculateDurationFromStart(startTime, options.endTime) : 100; + const duration = + options.duration ?? + (options.endTime ? calculateDurationFromStart(startTime, options.endTime) : 100); const traceId = propagatedContext?.traceparent?.traceId ?? this.generateTraceId(); const parentId = propagatedContext?.traceparent?.spanId; @@ -772,8 +780,10 @@ export class EventRepository { ...options.attributes.metadata, }; + const isDebug = options.attributes.isDebug; + const style = { - [SemanticInternalAttributes.STYLE_ICON]: "play", + [SemanticInternalAttributes.STYLE_ICON]: isDebug ? "warn" : "play", }; if (!options.attributes.runId) { @@ -788,11 +798,12 @@ export class EventRepository { message: message, serviceName: "api server", serviceNamespace: "trigger.dev", - level: "TRACE", + level: isDebug ? "WARN" : "TRACE", kind: options.kind, status: "OK", startTime, isPartial: false, + isDebug, duration, // convert to nanoseconds environmentId: options.environment.id, environmentType: options.environment.type, diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 8bdeddd939..bf277f7288 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -5,6 +5,7 @@ import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; import { eventRepository } from "./eventRepository.server"; import { createJsonErrorObject } from "@trigger.dev/core/v3"; +import { logger } from "~/services/logger.server"; export const engine = singleton("RunEngine", createRunEngine); @@ -42,27 +43,86 @@ function createRunEngine() { }); engine.eventBus.on("runSucceeded", async ({ time, run }) => { - await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: false, - output: run.output, - outputType: run.outputType, - }, - }); + try { + await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: false, + output: run.output, + outputType: run.outputType, + }, + }); + } catch (error) { + logger.error("[runSucceeded] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } }); engine.eventBus.on("runCancelled", async ({ time, run }) => { - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: run.friendlyId, - }); + try { + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: run.friendlyId, + }); + + await Promise.all( + inProgressEvents.map((event) => { + const error = createJsonErrorObject(run.error); + return eventRepository.cancelEvent(event, time, error.message); + }) + ); + } catch (error) { + logger.error("[runCancelled] Failed to cancel event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } + }); + + engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { + try { + const foundRun = await prisma.taskRun.findUnique({ + where: { + id: run.id, + }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + }, + }, + }, + }); + + if (!foundRun) { + logger.error("Failed to find run", { runId: run.id }); + return; + } - await Promise.all( - inProgressEvents.map((event) => { - const error = createJsonErrorObject(run.error); - return eventRepository.cancelEvent(event, time, error.message); - }) - ); + await eventRepository.recordEvent( + `[ExecutionSnapshot] ${snapshot.executionStatus} - ${snapshot.description}`, + { + environment: foundRun.runtimeEnvironment, + taskSlug: foundRun.taskIdentifier, + context: foundRun.traceContext as Record, + attributes: { + runId: foundRun.friendlyId, + isDebug: true, + properties: { + snapshot, + }, + }, + duration: 0, + } + ); + } catch (error) { + logger.error("[executionSnapshotCreated] Failed to record event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } }); return engine; diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 6feb2c294b..4234c05b1c 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2311,6 +2311,7 @@ model TaskEvent { isError Boolean @default(false) isPartial Boolean @default(false) isCancelled Boolean @default(false) + isDebug Boolean @default(false) serviceName String serviceNamespace String diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 1d23e49ce4..42557840aa 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1973,6 +1973,18 @@ export class RunEngine { const newSnapshot = await getLatestExecutionSnapshot(prisma, runId); await this.runQueue.acknowledgeMessage(run.project.organizationId, runId); + // We need to manually emit this as we created the final snapshot as part of the task run update + this.eventBus.emit("executionSnapshotCreated", { + time: newSnapshot.createdAt, + run: { + id: newSnapshot.runId, + }, + snapshot: { + ...newSnapshot, + completedWaitpointIds: newSnapshot.completedWaitpoints.map((wp) => wp.id), + }, + }); + if (!run.associatedWaitpoint) { throw new ServiceValidationError("No associated waitpoint found", 400); } From 26a72dd2f65a1d3700ae8d83e2ac68c9c26437d8 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 19:09:30 +0000 Subject: [PATCH 217/485] add worker instance metadata --- .../app/routes/api.v1.worker-actions.connect.ts | 16 +++++++++++----- .../worker/workerGroupTokenService.server.ts | 11 +++++++++++ internal-packages/database/prisma/schema.prisma | 4 +++- packages/worker/src/client/http.ts | 6 +++++- packages/worker/src/schemas.ts | 5 +++++ packages/worker/src/workerSession.ts | 8 +++++++- 6 files changed, 42 insertions(+), 8 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts index 0500fc032c..40972d78d1 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts @@ -1,10 +1,16 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiConnectResponseBody } from "@trigger.dev/worker/schemas"; -import { createLoaderWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; +import { + WorkerApiConnectRequestBody, + WorkerApiConnectResponseBody, +} from "@trigger.dev/worker/schemas"; +import { createActionWorkerApiRoute } from "~/services/routeBuiilders/apiBuilder.server"; -export const loader = createLoaderWorkerApiRoute( - {}, - async ({ authenticatedWorker }): Promise> => { +export const action = createActionWorkerApiRoute( + { + body: WorkerApiConnectRequestBody, + }, + async ({ authenticatedWorker, body }): Promise> => { + await authenticatedWorker.connect(body.metadata); return json({ ok: true }); } ); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index a2290e255d..faa7e9fda8 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -479,6 +479,17 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { this.backgroundWorkerId = opts.backgroundWorkerId; } + async connect(metadata: Record): Promise { + await this._prisma.workerInstance.update({ + where: { + id: this.workerInstanceId, + }, + data: { + metadata, + }, + }); + } + async dequeue(maxRunCount = 10): Promise { if (this.type === WorkerInstanceGroupType.MANAGED) { return await this._engine.dequeueFromMasterQueue({ diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 4234c05b1c..81cb3b52b5 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2102,6 +2102,8 @@ model WorkerInstance { /// If unmanged, it will be prefixed with the deployment ID e.g. "deploy-123-worker-1" resourceIdentifier String + metadata Json? + workerGroup WorkerInstanceGroup @relation(fields: [workerGroupId], references: [id]) workerGroupId String @@ -2129,7 +2131,7 @@ model WorkerInstance { } enum WorkerInstanceGroupType { - MANAGED @map("SHARED") + MANAGED @map("SHARED") UNMANAGED } diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts index 74bd6f39af..931f500c41 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/client/http.ts @@ -1,6 +1,7 @@ import { z } from "zod"; import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; import { + WorkerApiConnectRequestBody, WorkerApiConnectResponseBody, WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody, @@ -39,14 +40,17 @@ export class WorkerHttpClient { } } - async connect() { + async connect(body: WorkerApiConnectRequestBody) { return wrapZodFetch( WorkerApiConnectResponseBody, `${this.apiURL}/api/v1/worker-actions/connect`, { + method: "POST", headers: { ...this.defaultHeaders, + "Content-Type": "application/json", }, + body: JSON.stringify(body), } ); } diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index cf1d601b26..96e7efaf38 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -24,6 +24,11 @@ export const WorkerApiHeartbeatResponseBody = z.object({ }); export type WorkerApiHeartbeatResponseBody = z.infer; +export const WorkerApiConnectRequestBody = z.object({ + metadata: z.record(z.any()), +}); +export type WorkerApiConnectRequestBody = z.infer; + export const WorkerApiConnectResponseBody = z.object({ ok: z.literal(true), }); diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/workerSession.ts index 8ffd33b820..c8fe04cfc9 100644 --- a/packages/worker/src/workerSession.ts +++ b/packages/worker/src/workerSession.ts @@ -6,6 +6,7 @@ import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./s import { RunQueueConsumer } from "./queueConsumer.js"; import { WorkerEventArgs, WorkerEvents } from "./events.js"; import EventEmitter from "events"; +import { VERSION } from "./version.js"; type WorkerSessionOptions = WorkerClientCommonOptions & { heartbeatIntervalSeconds?: number; @@ -111,7 +112,12 @@ export class WorkerSession extends EventEmitter { } async start() { - const connect = await this.httpClient.connect(); + const connect = await this.httpClient.connect({ + metadata: { + workerVersion: VERSION, + }, + }); + if (!connect.success) { console.error("[WorkerSession] Failed to connect via HTTP client", { error: connect.error }); throw new Error("[WorkerSession] Failed to connect via HTTP client"); From ffcd387bfb4c26f804948bac8903f30a5fedacc2 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 24 Nov 2024 19:10:18 +0000 Subject: [PATCH 218/485] remove random package.json --- packages/trigger-sdk/src/package.json | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 packages/trigger-sdk/src/package.json diff --git a/packages/trigger-sdk/src/package.json b/packages/trigger-sdk/src/package.json deleted file mode 100644 index 5bbefffbab..0000000000 --- a/packages/trigger-sdk/src/package.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "type": "commonjs" -} From 91879969747d28c6aed7f2dee9f151ecbeb8d8fb Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 25 Nov 2024 18:04:04 +0000 Subject: [PATCH 219/485] Revert "checkpoint event indexes" This reverts commit ac7076b0c45e5667144c2124f134b26df164e50c. --- internal-packages/database/prisma/schema.prisma | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 81cb3b52b5..efe734f44d 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2604,9 +2604,6 @@ model CheckpointRestoreEvent { createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - - @@index([runId]) - @@index([checkpointId]) } enum CheckpointRestoreEventType { From da74e88b679bb13a1f746aed68997f179e15aa65 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:17:19 +0000 Subject: [PATCH 220/485] managed worker wip --- .../services/initializeDeployment.server.ts | 18 +- .../database/prisma/schema.prisma | 10 +- .../run-engine/src/engine/index.ts | 1 + packages/cli-v3/src/commands/deploy.ts | 6 +- .../src/entryPoints/managed-run-controller.ts | 263 +++++++++++------- .../entryPoints/unmanaged-run-controller.ts | 23 +- packages/core/src/v3/apps/http.ts | 28 ++ packages/core/src/v3/schemas/api.ts | 2 +- packages/core/src/v3/schemas/common.ts | 6 +- packages/worker/src/client/http.ts | 20 +- packages/worker/src/client/util.ts | 19 +- packages/worker/src/client/websocket.ts | 4 +- packages/worker/src/consts.ts | 4 + packages/worker/src/index.ts | 2 + packages/worker/src/schemas.ts | 28 +- packages/worker/src/util.ts | 13 + packages/worker/src/workerSession.ts | 5 +- packages/worker/src/workload/http.ts | 126 +++++++++ packages/worker/src/workload/types.ts | 4 + packages/worker/src/workload/util.ts | 11 + packages/worker/src/workload/websocket.ts | 52 ++++ 21 files changed, 485 insertions(+), 160 deletions(-) create mode 100644 packages/worker/src/util.ts create mode 100644 packages/worker/src/workload/http.ts create mode 100644 packages/worker/src/workload/types.ts create mode 100644 packages/worker/src/workload/util.ts create mode 100644 packages/worker/src/workload/websocket.ts diff --git a/apps/webapp/app/v3/services/initializeDeployment.server.ts b/apps/webapp/app/v3/services/initializeDeployment.server.ts index d8b621dad8..fa6247c580 100644 --- a/apps/webapp/app/v3/services/initializeDeployment.server.ts +++ b/apps/webapp/app/v3/services/initializeDeployment.server.ts @@ -7,7 +7,7 @@ import { calculateNextBuildVersion } from "../utils/calculateNextBuildVersion"; import { BaseService } from "./baseService.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; import { env } from "~/env.server"; -import { WorkerInstanceGroupType } from "@trigger.dev/database"; +import { WorkerDeploymentType } from "@trigger.dev/database"; import { logger } from "~/services/logger.server"; const nanoid = customAlphabet("1234567890abcdefghijklmnopqrstuvwxyz", 8); @@ -66,18 +66,16 @@ export class InitializeDeploymentService extends BaseService { const unmanagedImageTag = unmanagedImageParts.join("/"); - const defaultType = WorkerInstanceGroupType.MANAGED; - const deploymentType = payload.type ?? defaultType; - const isShared = deploymentType === WorkerInstanceGroupType.MANAGED; + const isManaged = payload.type === WorkerDeploymentType.MANAGED; logger.debug("Creating deployment", { environmentId: environment.id, projectId: environment.projectId, version: nextVersion, triggeredById: triggeredBy?.id, - type: deploymentType, - imageTag: isShared ? sharedImageTag : unmanagedImageTag, - imageReference: isShared ? undefined : unmanagedImageTag, + type: payload.type, + imageTag: isManaged ? sharedImageTag : unmanagedImageTag, + imageReference: isManaged ? undefined : unmanagedImageTag, }); const deployment = await this._prisma.workerDeployment.create({ @@ -91,8 +89,8 @@ export class InitializeDeploymentService extends BaseService { projectId: environment.projectId, externalBuildData, triggeredById: triggeredBy?.id, - type: deploymentType, - imageReference: isShared ? undefined : unmanagedImageTag, + type: payload.type, + imageReference: isManaged ? undefined : unmanagedImageTag, }, }); @@ -105,7 +103,7 @@ export class InitializeDeploymentService extends BaseService { return { deployment, - imageTag: isShared ? sharedImageTag : unmanagedImageTag, + imageTag: isManaged ? sharedImageTag : unmanagedImageTag, }; }); } diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index efe734f44d..8daf8773ab 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2611,6 +2611,12 @@ enum CheckpointRestoreEventType { RESTORE } +enum WorkerDeploymentType { + MANAGED + UNMANAGED + V1 +} + model WorkerDeployment { id String @id @default(cuid()) @@ -2623,8 +2629,8 @@ model WorkerDeployment { externalBuildData Json? - status WorkerDeploymentStatus @default(PENDING) - type WorkerInstanceGroupType @default(MANAGED) + status WorkerDeploymentStatus @default(PENDING) + type WorkerDeploymentType @default(V1) project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 42557840aa..3e919a9930 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1041,6 +1041,7 @@ export class RunEngine { costInCents: run.costInCents, /** @deprecated */ baseCostInCents: run.baseCostInCents, + traceContext: run.traceContext as Record, }, queue: { id: queue.friendlyId, diff --git a/packages/cli-v3/src/commands/deploy.ts b/packages/cli-v3/src/commands/deploy.ts index 9188259d98..91caa8f523 100644 --- a/packages/cli-v3/src/commands/deploy.ts +++ b/packages/cli-v3/src/commands/deploy.ts @@ -211,8 +211,10 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { const forcedExternals = await resolveAlwaysExternal(projectClient.client); + const isRunEngineV2 = resolvedConfig.compatibilityFlags.includes("run_engine_v2"); + const buildManifest = await buildWorker({ - target: resolvedConfig.compatibilityFlags.includes("run_engine_v2") ? "managed" : "deploy", + target: isRunEngineV2 ? "managed" : "deploy", environment: options.env, destination: destination.path, resolvedConfig, @@ -244,7 +246,7 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { selfHosted: options.selfHosted, registryHost: options.registry, namespace: options.namespace, - type: "MANAGED", + type: isRunEngineV2 ? "MANAGED" : "V1", }); if (!deploymentResponse.success) { diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 1f7d637846..849aee3fc9 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -5,16 +5,21 @@ import { z } from "zod"; import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; -import { WorkerManifest } from "@trigger.dev/core/v3"; -import { WorkerSession } from "@trigger.dev/worker"; +import { HeartbeatService, WorkerManifest } from "@trigger.dev/core/v3"; +import { WorkloadHttpClient } from "@trigger.dev/worker"; const Env = z.object({ - TRIGGER_API_URL: z.string().default(CLOUD_API_URL), + TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), TRIGGER_CONTENT_HASH: z.string(), - TRIGGER_WORKER_TOKEN: z.string(), - TRIGGER_WORKER_INSTANCE_NAME: z.string().default(randomUUID()), + TRIGGER_WORKER_API_URL: z.string().url(), + TRIGGER_WORKLOAD_CONTROLLER_ID: z.string().default(randomUUID()), TRIGGER_DEPLOYMENT_ID: z.string(), TRIGGER_DEPLOYMENT_VERSION: z.string(), + TRIGGER_ENV_ID: z.string(), + // This is only useful for cold starts + TRIGGER_RUN_ID: z.string().optional(), + // This is only useful for cold starts + TRIGGER_SNAPSHOT_ID: z.string().optional(), NODE_ENV: z.string().default("production"), NODE_EXTRA_CA_CERTS: z.string().optional(), OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), @@ -23,134 +28,194 @@ const Env = z.object({ const env = Env.parse(stdEnv); logger.loggerLevel = "debug"; -logger.debug("Creating unmanaged worker", { env }); -class ManagedWorker { - private readonly session: WorkerSession; +type ManagedRunControllerOptions = { + workerManifest: WorkerManifest; + heartbeatIntervalSeconds?: number; +}; + +class ManagedRunController { private taskRunProcess?: TaskRunProcess; - constructor(private workerManifest: WorkerManifest) { - this.session = new WorkerSession({ - workerToken: env.TRIGGER_WORKER_TOKEN, - apiUrl: env.TRIGGER_API_URL, - instanceName: env.TRIGGER_WORKER_INSTANCE_NAME, + private workerManifest: WorkerManifest; + private readonly httpClient: WorkloadHttpClient; + private readonly heartbeatService: HeartbeatService; + private readonly heartbeatIntervalSeconds: number; + + private runId?: string; + private snapshotId?: string; + + constructor(opts: ManagedRunControllerOptions) { + logger.debug("[ManagedRunController] Creating controller", { env }); + + this.workerManifest = opts.workerManifest; + // TODO: This should be dynamic and set by (or at least overridden by) the managed worker / platform + this.heartbeatIntervalSeconds = opts.heartbeatIntervalSeconds || 30; + + this.runId = env.TRIGGER_RUN_ID; + this.snapshotId = env.TRIGGER_SNAPSHOT_ID; + + this.httpClient = new WorkloadHttpClient({ + workerApiUrl: env.TRIGGER_WORKER_API_URL, deploymentId: env.TRIGGER_DEPLOYMENT_ID, - dequeueIntervalMs: 1000, }); - const traceContext = new Map>(); + this.heartbeatService = new HeartbeatService({ + heartbeat: async () => { + if (!this.runId || !this.snapshotId) { + logger.debug("[ManagedRunController] Skipping heartbeat, no run ID or snapshot ID"); + return; + } - this.session.on("runQueueMessage", async ({ time, message }) => { - logger.debug("[UnmanagedWorker] Received runQueueMessage", { time, message }); + console.debug("[ManagedRunController] Sending heartbeat"); - traceContext.set(message.run.id, message.run.traceContext); + const response = await this.httpClient.heartbeat(this.runId, this.snapshotId, { + cpu: 0, + memory: 0, + }); - this.session.emit("requestRunAttemptStart", { - time: new Date(), - run: { - id: message.run.id, - }, - snapshot: { - id: message.snapshot.id, - }, - }); + if (!response.success) { + console.error("[ManagedRunController] Heartbeat failed", { error: response.error }); + } + }, + intervalMs: this.heartbeatIntervalSeconds * 1000, + leadingEdge: false, + onError: async (error) => { + console.error("[ManagedRunController] Failed to send heartbeat", { error }); + }, }); - this.session.on("runAttemptStarted", async ({ time, run, snapshot, execution, envVars }) => { - const taskRunEnv = { - ...gatherProcessEnv(), - ...envVars, - }; - - this.taskRunProcess = new TaskRunProcess({ - workerManifest: this.workerManifest, - env: taskRunEnv, - serverWorker: { - id: "unmanaged", - contentHash: env.TRIGGER_CONTENT_HASH, - version: env.TRIGGER_DEPLOYMENT_VERSION, - }, - payload: { - execution, - // TODO: The run engine could return this when the run is started - traceContext: traceContext.get(run.id) ?? {}, - }, - messageId: run.id, + process.on("SIGTERM", async () => { + logger.debug("[ManagedRunController] Received SIGTERM, stopping worker"); + await this.stop(); + }); + } + + async start() { + logger.debug("[ManagedRunController] Starting up"); + + // TODO: remove this after testing + setTimeout(() => { + // exit after 5 minutes + console.error("[ManagedRunController] Exiting after 5 minutes"); + process.exit(1); + }, 60 * 5000); + + this.heartbeatService.start(); + + if (!this.runId || !this.snapshotId) { + logger.debug("[ManagedRunController] Missing run ID or snapshot ID", { + runId: this.runId, + snapshotId: this.snapshotId, }); + process.exit(1); + } - try { - await this.taskRunProcess.initialize(); + const start = await this.httpClient.startRunAttempt(this.runId, this.snapshotId); - logger.log("executing task run process", { - attemptId: execution.attempt.id, - runId: execution.run.id, - }); + if (!start.success) { + console.error("[ManagedRunController] Failed to start run", { error: start.error }); + process.exit(1); + } + + logger.debug("[ManagedRunController] Started run", { + runId: start.data.run.id, + snapshot: start.data.snapshot.id, + }); - const completion = await this.taskRunProcess.execute(); + const { run, snapshot, execution, envVars } = start.data; + + const taskRunEnv = { + ...gatherProcessEnv(), + ...envVars, + }; + + this.taskRunProcess = new TaskRunProcess({ + workerManifest: this.workerManifest, + env: taskRunEnv, + serverWorker: { + id: "unmanaged", + contentHash: env.TRIGGER_CONTENT_HASH, + version: env.TRIGGER_DEPLOYMENT_VERSION, + }, + payload: { + execution, + traceContext: execution.run.traceContext ?? {}, + }, + messageId: run.id, + }); - logger.log("completed", completion); + try { + await this.taskRunProcess.initialize(); - try { - await this.taskRunProcess.cleanup(true); - } catch (error) { - logger.error("Failed to cleanup task run process, submitting completion anyway", { - error, - }); - } + logger.log("executing task run process", { + attemptId: execution.attempt.id, + runId: execution.run.id, + }); - this.session.emit("runAttemptCompleted", { - time: new Date(), - run: { - id: run.id, - }, - snapshot: { - id: snapshot.id, - }, - completion, - }); + const completion = await this.taskRunProcess.execute(); + + logger.log("Completed run", completion); + + try { + await this.taskRunProcess.cleanup(true); } catch (error) { - logger.error("Failed to complete lazy attempt", { + console.error("Failed to cleanup task run process, submitting completion anyway", { error, }); + } + + const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + completion, + }); - this.session.emit("runAttemptCompleted", { - time: new Date(), - run: { - id: run.id, - }, - snapshot: { - id: snapshot.id, - }, - completion: { - id: execution.run.id, - ok: false, - retry: undefined, - error: TaskRunProcess.parseExecuteError(error), - }, + if (!completionResult.success) { + console.error("Failed to submit completion", { + error: completionResult.error, }); + process.exit(1); } - }); - process.on("SIGTERM", async () => { - logger.debug("[UnmanagedWorker] Received SIGTERM, stopping worker"); - await this.stop(); - }); - } + logger.log("Completion submitted", completionResult.data.result); + } catch (error) { + console.error("Error while executing attempt", { + error, + }); - async start() { - logger.debug("[UnmanagedWorker] Starting up"); - await this.session.start(); + const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + completion: { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + }, + }); + + if (!completionResult.success) { + console.error("Failed to submit completion after error", { + error: completionResult.error, + }); + process.exit(1); + } + + logger.log("completed run", completionResult.data.result); + } } async stop() { - logger.debug("[UnmanagedWorker] Shutting down"); - await this.session.stop(); + logger.debug("[ManagedRunController] Shutting down"); + + if (this.taskRunProcess) { + await this.taskRunProcess.cleanup(true); + } + + this.heartbeatService.stop(); } } const workerManifest = await loadWorkerManifest(); -const prodWorker = new ManagedWorker(workerManifest); +const prodWorker = new ManagedRunController({ workerManifest }); await prodWorker.start(); function gatherProcessEnv(): Record { diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index 6d3f4c0ec8..ab35ef7638 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -25,7 +25,7 @@ const env = Env.parse(stdEnv); logger.loggerLevel = "debug"; logger.debug("Creating unmanaged worker", { env }); -class UnmanagedWorker { +class UnmanagedRunController { private readonly session: WorkerSession; private taskRunProcess?: TaskRunProcess; @@ -38,12 +38,8 @@ class UnmanagedWorker { dequeueIntervalMs: 1000, }); - const traceContext = new Map>(); - this.session.on("runQueueMessage", async ({ time, message }) => { - logger.debug("[UnmanagedWorker] Received runQueueMessage", { time, message }); - - traceContext.set(message.run.id, message.run.traceContext); + logger.debug("[UnmanagedRunController] Received runQueueMessage", { time, message }); this.session.emit("requestRunAttemptStart", { time: new Date(), @@ -72,8 +68,7 @@ class UnmanagedWorker { }, payload: { execution, - // TODO: The run engine could return this when the run is started - traceContext: traceContext.get(run.id) ?? {}, + traceContext: execution.run.traceContext ?? {}, }, messageId: run.id, }); @@ -93,7 +88,7 @@ class UnmanagedWorker { try { await this.taskRunProcess.cleanup(true); } catch (error) { - logger.error("Failed to cleanup task run process, submitting completion anyway", { + console.error("Failed to cleanup task run process, submitting completion anyway", { error, }); } @@ -109,7 +104,7 @@ class UnmanagedWorker { completion, }); } catch (error) { - logger.error("Failed to complete lazy attempt", { + console.error("Failed to complete lazy attempt", { error, }); @@ -132,25 +127,25 @@ class UnmanagedWorker { }); process.on("SIGTERM", async () => { - logger.debug("[UnmanagedWorker] Received SIGTERM, stopping worker"); + logger.debug("[UnmanagedRunController] Received SIGTERM, stopping worker"); await this.stop(); }); } async start() { - logger.debug("[UnmanagedWorker] Starting up"); + logger.debug("[UnmanagedRunController] Starting up"); await this.session.start(); } async stop() { - logger.debug("[UnmanagedWorker] Shutting down"); + logger.debug("[UnmanagedRunController] Shutting down"); await this.session.stop(); } } const workerManifest = await loadWorkerManifest(); -const prodWorker = new UnmanagedWorker(workerManifest); +const prodWorker = new UnmanagedRunController(workerManifest); await prodWorker.start(); function gatherProcessEnv(): Record { diff --git a/packages/core/src/v3/apps/http.ts b/packages/core/src/v3/apps/http.ts index 70de522c00..970b3473f2 100644 --- a/packages/core/src/v3/apps/http.ts +++ b/packages/core/src/v3/apps/http.ts @@ -14,6 +14,34 @@ export const getTextBody = (req: IncomingMessage) => }); }); +export async function getJsonBody(req: IncomingMessage): Promise { + return new Promise((resolve, reject) => { + let body = ""; + + req.on("data", (chunk) => { + body += chunk.toString(); + }); + + req.on("end", () => { + console.log("got body", body); + resolve(safeJsonParse(body)); + }); + }); +} + +function safeJsonParse(text: string) { + if (!text) { + return null; + } + + try { + return JSON.parse(text); + } catch (error) { + console.error("Failed to parse JSON", { error, text }); + return null; + } +} + export class HttpReply { constructor(private response: Parameters[1]) {} diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 47e372f18a..0e1afb2d36 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -213,7 +213,7 @@ export const InitializeDeploymentRequestBody = z.object({ registryHost: z.string().optional(), selfHosted: z.boolean().optional(), namespace: z.string().optional(), - type: z.enum(["MANAGED", "UNMANAGED"]).optional(), + type: z.enum(["MANAGED", "UNMANAGED", "V1"]).optional(), }); export type InitializeDeploymentRequestBody = z.infer; diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 814b6e2dca..971d8d7f86 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -225,7 +225,11 @@ export const TaskRunExecutionBatch = z.object({ export const TaskRunExecution = z.object({ task: TaskRunExecutionTask, attempt: TaskRunExecutionAttempt, - run: TaskRun, + run: TaskRun.and( + z.object({ + traceContext: z.record(z.unknown()).optional(), + }) + ), queue: TaskRunExecutionQueue, environment: TaskRunExecutionEnvironment, organization: TaskRunExecutionOrganization, diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts index 931f500c41..b6e3da5564 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/client/http.ts @@ -11,23 +11,23 @@ import { WorkerApiRunAttemptStartResponseBody, } from "../schemas.js"; import { WorkerClientCommonOptions } from "./types.js"; -import { getDefaultHeaders } from "./util.js"; +import { getDefaultWorkerHeaders } from "./util.js"; type WorkerHttpClientOptions = WorkerClientCommonOptions; export class WorkerHttpClient { - private readonly apiURL: string; + private readonly apiUrl: string; private readonly workerToken: string; private readonly instanceName: string; private readonly defaultHeaders: Record; constructor(opts: WorkerHttpClientOptions) { - this.apiURL = opts.apiUrl.replace(/\/$/, ""); + this.apiUrl = opts.apiUrl.replace(/\/$/, ""); this.workerToken = opts.workerToken; this.instanceName = opts.instanceName; - this.defaultHeaders = getDefaultHeaders(opts); + this.defaultHeaders = getDefaultWorkerHeaders(opts); - if (!this.apiURL) { + if (!this.apiUrl) { throw new Error("apiURL is required and needs to be a non-empty string"); } @@ -43,7 +43,7 @@ export class WorkerHttpClient { async connect(body: WorkerApiConnectRequestBody) { return wrapZodFetch( WorkerApiConnectResponseBody, - `${this.apiURL}/api/v1/worker-actions/connect`, + `${this.apiUrl}/api/v1/worker-actions/connect`, { method: "POST", headers: { @@ -58,7 +58,7 @@ export class WorkerHttpClient { async heartbeat(body: WorkerApiHeartbeatRequestBody) { return wrapZodFetch( WorkerApiHeartbeatResponseBody, - `${this.apiURL}/api/v1/worker-actions/heartbeat`, + `${this.apiUrl}/api/v1/worker-actions/heartbeat`, { method: "POST", headers: { @@ -73,7 +73,7 @@ export class WorkerHttpClient { async dequeue() { return wrapZodFetch( WorkerApiDequeueResponseBody, - `${this.apiURL}/api/v1/worker-actions/dequeue`, + `${this.apiUrl}/api/v1/worker-actions/dequeue`, { headers: { ...this.defaultHeaders, @@ -85,7 +85,7 @@ export class WorkerHttpClient { async startRun(runId: string, snapshotId: string) { return wrapZodFetch( WorkerApiRunAttemptStartResponseBody, - `${this.apiURL}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, { method: "POST", headers: { @@ -102,7 +102,7 @@ export class WorkerHttpClient { ) { return wrapZodFetch( WorkerApiRunAttemptCompleteResponseBody, - `${this.apiURL}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, { method: "POST", headers: { diff --git a/packages/worker/src/client/util.ts b/packages/worker/src/client/util.ts index ac82d1dc56..953d71766d 100644 --- a/packages/worker/src/client/util.ts +++ b/packages/worker/src/client/util.ts @@ -1,21 +1,10 @@ import { HEADER_NAME } from "../consts.js"; +import { createHeaders } from "../util.js"; import { WorkerClientCommonOptions } from "./types.js"; -/** Will ignore headers with falsey values */ -function createHeaders(headersInit: Record) { - const headers = new Headers(); - - for (const [key, value] of Object.entries(headersInit)) { - if (!value) { - continue; - } - headers.set(key, value); - } - - return Object.fromEntries(headers.entries()); -} - -export function getDefaultHeaders(options: WorkerClientCommonOptions): Record { +export function getDefaultWorkerHeaders( + options: WorkerClientCommonOptions +): Record { return createHeaders({ Authorization: `Bearer ${options.workerToken}`, [HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, diff --git a/packages/worker/src/client/websocket.ts b/packages/worker/src/client/websocket.ts index f87eda3f81..4df6633e02 100644 --- a/packages/worker/src/client/websocket.ts +++ b/packages/worker/src/client/websocket.ts @@ -1,7 +1,7 @@ import { ZodSocketConnection } from "@trigger.dev/core/v3/zodSocket"; import { PlatformToWorkerMessages, WorkerToPlatformMessages } from "../messages.js"; import { WorkerClientCommonOptions } from "./types.js"; -import { getDefaultHeaders } from "./util.js"; +import { getDefaultWorkerHeaders } from "./util.js"; type WorkerWebsocketClientOptions = WorkerClientCommonOptions; @@ -13,7 +13,7 @@ export class WorkerWebsocketClient { >; constructor(private opts: WorkerWebsocketClientOptions) { - this.defaultHeaders = getDefaultHeaders(opts); + this.defaultHeaders = getDefaultWorkerHeaders(opts); } start() { diff --git a/packages/worker/src/consts.ts b/packages/worker/src/consts.ts index 8aac362e44..7f1c475301 100644 --- a/packages/worker/src/consts.ts +++ b/packages/worker/src/consts.ts @@ -3,3 +3,7 @@ export const HEADER_NAME = { WORKER_DEPLOYMENT_ID: "x-trigger-worker-deployment-id", WORKER_MANAGED_SECRET: "x-trigger-worker-managed-secret", }; + +export const WORKLOAD_HEADER_NAME = { + WORKLOAD_DEPLOYMENT_ID: "x-trigger-workload-deployment-id", +}; diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts index 6eb70be782..d4845c3ca2 100644 --- a/packages/worker/src/index.ts +++ b/packages/worker/src/index.ts @@ -3,3 +3,5 @@ export * from "./consts.js"; export * from "./client/http.js"; export * from "./workerSession.js"; export * from "./events.js"; +export * from "./workload/http.js"; +export * from "./schemas.js"; diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index 96e7efaf38..b071d9f62d 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -6,6 +6,7 @@ import { TaskRunExecutionResult, } from "@trigger.dev/core/v3"; +// Worker export const WorkerApiHeartbeatRequestBody = z.object({ cpu: z.object({ used: z.number(), @@ -37,7 +38,6 @@ export type WorkerApiConnectResponseBody = z.infer; -// Attempt start export const WorkerApiRunAttemptStartResponseBody = StartRunAttemptResult.and( z.object({ envVars: z.record(z.string()), @@ -47,7 +47,6 @@ export type WorkerApiRunAttemptStartResponseBody = z.infer< typeof WorkerApiRunAttemptStartResponseBody >; -// Attempt completion export const WorkerApiRunAttemptCompleteRequestBody = z.object({ completion: TaskRunExecutionResult, }); @@ -61,3 +60,28 @@ export const WorkerApiRunAttemptCompleteResponseBody = z.object({ export type WorkerApiRunAttemptCompleteResponseBody = z.infer< typeof WorkerApiRunAttemptCompleteResponseBody >; + +// Workload +export const WorkloadHeartbeatRequestBody = z.object({ + cpu: z.number(), + memory: z.number(), +}); +export type WorkloadHeartbeatRequestBody = z.infer; + +export const WorkloadHeartbeatResponseBody = WorkerApiHeartbeatResponseBody; +export type WorkloadHeartbeatResponseBody = z.infer; + +export const WorkloadRunAttemptCompleteRequestBody = WorkerApiRunAttemptCompleteRequestBody; +export type WorkloadRunAttemptCompleteRequestBody = z.infer< + typeof WorkloadRunAttemptCompleteRequestBody +>; + +export const WorkloadRunAttemptCompleteResponseBody = WorkerApiRunAttemptCompleteResponseBody; +export type WorkloadRunAttemptCompleteResponseBody = z.infer< + typeof WorkloadRunAttemptCompleteResponseBody +>; + +export const WorkloadRunAttemptStartResponseBody = WorkerApiRunAttemptStartResponseBody; +export type WorkloadRunAttemptStartResponseBody = z.infer< + typeof WorkloadRunAttemptStartResponseBody +>; diff --git a/packages/worker/src/util.ts b/packages/worker/src/util.ts new file mode 100644 index 0000000000..c957231bdf --- /dev/null +++ b/packages/worker/src/util.ts @@ -0,0 +1,13 @@ +/** Will ignore headers with falsey values */ +export function createHeaders(headersInit: Record) { + const headers = new Headers(); + + for (const [key, value] of Object.entries(headersInit)) { + if (!value) { + continue; + } + headers.set(key, value); + } + + return Object.fromEntries(headers.entries()); +} diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/workerSession.ts index c8fe04cfc9..c247a0542c 100644 --- a/packages/worker/src/workerSession.ts +++ b/packages/worker/src/workerSession.ts @@ -14,13 +14,14 @@ type WorkerSessionOptions = WorkerClientCommonOptions & { }; export class WorkerSession extends EventEmitter { - private readonly httpClient: WorkerHttpClient; + public readonly httpClient: WorkerHttpClient; + private readonly websocketClient: WorkerWebsocketClient; private readonly queueConsumer: RunQueueConsumer; private readonly heartbeatService: HeartbeatService; private readonly heartbeatIntervalSeconds: number; - constructor(private opts: WorkerSessionOptions) { + constructor(opts: WorkerSessionOptions) { super(); this.httpClient = new WorkerHttpClient(opts); diff --git a/packages/worker/src/workload/http.ts b/packages/worker/src/workload/http.ts new file mode 100644 index 0000000000..4cffb78187 --- /dev/null +++ b/packages/worker/src/workload/http.ts @@ -0,0 +1,126 @@ +import { z } from "zod"; +import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; +import { + WorkloadHeartbeatRequestBody, + WorkloadHeartbeatResponseBody, + WorkloadRunAttemptCompleteRequestBody, + WorkloadRunAttemptCompleteResponseBody, + WorkloadRunAttemptStartResponseBody, +} from "../schemas.js"; +import { WorkloadClientCommonOptions } from "./types.js"; +import { getDefaultWorkloadHeaders } from "./util.js"; + +type WorkloadHttpClientOptions = WorkloadClientCommonOptions; + +export class WorkloadHttpClient { + private readonly apiUrl: string; + private readonly deploymentId: string; + private readonly defaultHeaders: Record; + + constructor(opts: WorkloadHttpClientOptions) { + this.apiUrl = opts.workerApiUrl.replace(/\/$/, ""); + this.defaultHeaders = getDefaultWorkloadHeaders(opts); + this.deploymentId = opts.deploymentId; + + if (!this.apiUrl) { + throw new Error("apiURL is required and needs to be a non-empty string"); + } + + if (!this.deploymentId) { + throw new Error("deploymentId is required and needs to be a non-empty string"); + } + } + + async heartbeat(runId: string, snapshotId: string, body: WorkloadHeartbeatRequestBody) { + return wrapZodFetch( + WorkloadHeartbeatResponseBody, + `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/heartbeat`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + } + ); + } + + async startRunAttempt(runId: string, snapshotId: string) { + return wrapZodFetch( + WorkloadRunAttemptStartResponseBody, + `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async completeRunAttempt( + runId: string, + snapshotId: string, + body: WorkloadRunAttemptCompleteRequestBody + ) { + return wrapZodFetch( + WorkloadRunAttemptCompleteResponseBody, + `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + }, + body: JSON.stringify(body), + } + ); + } +} + +type ApiResult = + | { success: true; data: TSuccessResult } + | { + success: false; + error: string; + }; + +async function wrapZodFetch( + schema: T, + url: string, + requestInit?: RequestInit +): Promise>> { + try { + const response = await zodfetch(schema, url, requestInit, { + retry: { + minTimeoutInMs: 500, + maxTimeoutInMs: 5000, + maxAttempts: 5, + factor: 2, + randomize: false, + }, + }); + + return { + success: true, + data: response, + }; + } catch (error) { + if (error instanceof ApiError) { + return { + success: false, + error: error.message, + }; + } else if (error instanceof Error) { + return { + success: false, + error: error.message, + }; + } else { + return { + success: false, + error: String(error), + }; + } + } +} diff --git a/packages/worker/src/workload/types.ts b/packages/worker/src/workload/types.ts new file mode 100644 index 0000000000..6930550e0b --- /dev/null +++ b/packages/worker/src/workload/types.ts @@ -0,0 +1,4 @@ +export type WorkloadClientCommonOptions = { + workerApiUrl: string; + deploymentId: string; +}; diff --git a/packages/worker/src/workload/util.ts b/packages/worker/src/workload/util.ts new file mode 100644 index 0000000000..3457a0fbcb --- /dev/null +++ b/packages/worker/src/workload/util.ts @@ -0,0 +1,11 @@ +import { WORKLOAD_HEADER_NAME } from "../consts.js"; +import { createHeaders } from "../util.js"; +import { WorkloadClientCommonOptions } from "./types.js"; + +export function getDefaultWorkloadHeaders( + options: WorkloadClientCommonOptions +): Record { + return createHeaders({ + [WORKLOAD_HEADER_NAME.WORKLOAD_DEPLOYMENT_ID]: options.deploymentId, + }); +} diff --git a/packages/worker/src/workload/websocket.ts b/packages/worker/src/workload/websocket.ts new file mode 100644 index 0000000000..12bf1ac736 --- /dev/null +++ b/packages/worker/src/workload/websocket.ts @@ -0,0 +1,52 @@ +import { ZodSocketConnection } from "@trigger.dev/core/v3/zodSocket"; +import { PlatformToWorkerMessages, WorkerToPlatformMessages } from "../messages.js"; +import { WorkloadClientCommonOptions } from "./types.js"; +import { getDefaultWorkloadHeaders } from "./util.js"; + +type WorkerWebsocketClientOptions = WorkloadClientCommonOptions; + +export class WorkerWebsocketClient { + private readonly defaultHeaders: Record; + private platformSocket?: ZodSocketConnection< + typeof WorkerToPlatformMessages, + typeof PlatformToWorkerMessages + >; + + constructor(private opts: WorkerWebsocketClientOptions) { + this.defaultHeaders = getDefaultWorkloadHeaders(opts); + } + + start() { + const websocketPort = this.getPort(this.opts.workerApiUrl); + this.platformSocket = new ZodSocketConnection({ + namespace: "worker", + host: this.getHost(this.opts.workerApiUrl), + port: websocketPort, + secure: websocketPort === 443, + extraHeaders: this.defaultHeaders, + clientMessages: WorkerToPlatformMessages, + serverMessages: PlatformToWorkerMessages, + handlers: {}, + }); + } + + stop() { + this.platformSocket?.close(); + } + + private getHost(apiUrl: string): string { + const url = new URL(apiUrl); + return url.hostname; + } + + private getPort(apiUrl: string): number { + const url = new URL(apiUrl); + const port = Number(url.port); + + if (!isNaN(port) && port !== 0) { + return port; + } + + return url.protocol === "https" ? 443 : 80; + } +} From 768fc9beebcebeddc7a69b72ee44167cc321d19c Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:30:16 +0000 Subject: [PATCH 221/485] deprecate cli logger warn and error methods --- packages/cli-v3/src/utilities/logger.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/cli-v3/src/utilities/logger.ts b/packages/cli-v3/src/utilities/logger.ts index 640ceaa486..64a4b3fc13 100644 --- a/packages/cli-v3/src/utilities/logger.ts +++ b/packages/cli-v3/src/utilities/logger.ts @@ -58,7 +58,9 @@ export class Logger { }; info = (...args: unknown[]) => this.doLog("info", args); log = (...args: unknown[]) => this.doLog("log", args); + /** @deprecated **ONLY USE THIS IN THE CLI** - It will hang the process when used in deployed code (!) */ warn = (...args: unknown[]) => this.doLog("warn", args); + /** @deprecated **ONLY USE THIS IN THE CLI** - It will hang the process when used in deployed code (!) */ error = (...args: unknown[]) => this.doLog("error", args); table(data: TableRow[], level?: Exclude) { const keys: Keys[] = data.length === 0 ? [] : (Object.keys(data[0]!) as Keys[]); From d95a7506c2366cb842168b8befe3e11faac3e792 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:42:16 +0000 Subject: [PATCH 222/485] add run heartbeat route --- ....$runId.snapshots.$snapshotId.heartbeat.ts | 26 +++++++++++++++++++ .../worker/workerGroupTokenService.server.ts | 1 - 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts new file mode 100644 index 0000000000..125a351c6a --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts @@ -0,0 +1,26 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkloadHeartbeatResponseBody } from "@trigger.dev/worker/schemas"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; + +export const action = createActionWorkerApiRoute( + { + params: z.object({ + runId: z.string(), + snapshotId: z.string(), + }), + }, + async ({ + authenticatedWorker, + params, + }): Promise> => { + const { runId, snapshotId } = params; + + await authenticatedWorker.heartbeatRun({ + runId, + snapshotId, + }); + + return json({ ok: true }); + } +); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index faa7e9fda8..b9926ef268 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -15,7 +15,6 @@ import { CompleteRunAttemptResult, StartRunAttemptResult, ExecutionResult, - ProdTaskRunExecutionPayload, MachinePreset, } from "@trigger.dev/core/v3"; import { env } from "~/env.server"; From a06e270f85c32f1ca86398944fc0b04bb54a4150 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:43:39 +0000 Subject: [PATCH 223/485] update run engine zod version --- internal-packages/run-engine/package.json | 2 +- pnpm-lock.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index d0a220bf2a..3b1d373c9b 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -15,7 +15,7 @@ "nanoid": "^3.3.4", "redlock": "5.0.0-beta.2", "typescript": "^5.5.4", - "zod": "3.22.3" + "zod": "3.23.8" }, "devDependencies": { "@internal/testcontainers": "workspace:*", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 756a49a3cf..62227e1db0 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -986,8 +986,8 @@ importers: specifier: ^5.5.4 version: 5.5.4 zod: - specifier: 3.22.3 - version: 3.22.3 + specifier: 3.23.8 + version: 3.23.8 devDependencies: '@internal/testcontainers': specifier: workspace:* From 78a3d350dc17a9c77d8eecf80f649f24bbd19131 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 11:59:54 +0000 Subject: [PATCH 224/485] add workload heartbeats --- packages/trigger-sdk/package.json | 2 +- packages/worker/src/client/http.ts | 29 ++++++++++++++++++++++------ packages/worker/src/schemas.ts | 16 +++++++++++---- packages/worker/src/workerSession.ts | 6 +++--- 4 files changed, 39 insertions(+), 14 deletions(-) diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index 72cf800c82..ef8099c5d9 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -109,4 +109,4 @@ "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", "module": "./dist/esm/index.js" -} \ No newline at end of file +} diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts index b6e3da5564..eb18d99507 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/client/http.ts @@ -9,6 +9,8 @@ import { WorkerApiRunAttemptCompleteRequestBody, WorkerApiRunAttemptCompleteResponseBody, WorkerApiRunAttemptStartResponseBody, + WorkerApiRunHeartbeatRequestBody, + WorkerApiRunHeartbeatResponseBody, } from "../schemas.js"; import { WorkerClientCommonOptions } from "./types.js"; import { getDefaultWorkerHeaders } from "./util.js"; @@ -55,7 +57,19 @@ export class WorkerHttpClient { ); } - async heartbeat(body: WorkerApiHeartbeatRequestBody) { + async dequeue() { + return wrapZodFetch( + WorkerApiDequeueResponseBody, + `${this.apiUrl}/api/v1/worker-actions/dequeue`, + { + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async heartbeatWorker(body: WorkerApiHeartbeatRequestBody) { return wrapZodFetch( WorkerApiHeartbeatResponseBody, `${this.apiUrl}/api/v1/worker-actions/heartbeat`, @@ -70,19 +84,22 @@ export class WorkerHttpClient { ); } - async dequeue() { + async heartbeatRun(runId: string, snapshotId: string, body: WorkerApiRunHeartbeatRequestBody) { return wrapZodFetch( - WorkerApiDequeueResponseBody, - `${this.apiUrl}/api/v1/worker-actions/dequeue`, + WorkerApiRunHeartbeatResponseBody, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, { + method: "POST", headers: { ...this.defaultHeaders, + "Content-Type": "application/json", }, + body: JSON.stringify(body), } ); } - async startRun(runId: string, snapshotId: string) { + async startRunAttempt(runId: string, snapshotId: string) { return wrapZodFetch( WorkerApiRunAttemptStartResponseBody, `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, @@ -95,7 +112,7 @@ export class WorkerHttpClient { ); } - async completeRun( + async completeRunAttempt( runId: string, snapshotId: string, body: WorkerApiRunAttemptCompleteRequestBody diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/schemas.ts index b071d9f62d..a49a5359f0 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/schemas.ts @@ -38,6 +38,17 @@ export type WorkerApiConnectResponseBody = z.infer; +export const WorkerApiRunHeartbeatRequestBody = z.object({ + cpu: z.number(), + memory: z.number(), +}); +export type WorkerApiRunHeartbeatRequestBody = z.infer; + +export const WorkerApiRunHeartbeatResponseBody = z.object({ + ok: z.literal(true), +}); +export type WorkerApiRunHeartbeatResponseBody = z.infer; + export const WorkerApiRunAttemptStartResponseBody = StartRunAttemptResult.and( z.object({ envVars: z.record(z.string()), @@ -62,10 +73,7 @@ export type WorkerApiRunAttemptCompleteResponseBody = z.infer< >; // Workload -export const WorkloadHeartbeatRequestBody = z.object({ - cpu: z.number(), - memory: z.number(), -}); +export const WorkloadHeartbeatRequestBody = WorkerApiRunHeartbeatRequestBody; export type WorkloadHeartbeatRequestBody = z.infer; export const WorkloadHeartbeatResponseBody = WorkerApiHeartbeatResponseBody; diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/workerSession.ts index c247a0542c..bff1431bdd 100644 --- a/packages/worker/src/workerSession.ts +++ b/packages/worker/src/workerSession.ts @@ -39,7 +39,7 @@ export class WorkerSession extends EventEmitter { console.debug("[WorkerSession] Sending heartbeat"); const body = this.getHeartbeatBody(); - const response = await this.httpClient.heartbeat(body); + const response = await this.httpClient.heartbeatWorker(body); if (!response.success) { console.error("[WorkerSession] Heartbeat failed", { error: response.error }); @@ -74,7 +74,7 @@ export class WorkerSession extends EventEmitter { ): Promise { console.log("[WorkerSession] onRequestRunAttemptStart", { time, run, snapshot }); - const start = await this.httpClient.startRun(run.id, snapshot.id); + const start = await this.httpClient.startRunAttempt(run.id, snapshot.id); if (!start.success) { console.error("[WorkerSession] Failed to start run", { error: start.error }); @@ -97,7 +97,7 @@ export class WorkerSession extends EventEmitter { ): Promise { console.log("[WorkerSession] onRunAttemptCompleted", { time, run, snapshot, completion }); - const complete = await this.httpClient.completeRun(run.id, snapshot.id, { + const complete = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { completion: completion, }); From 5da5a612b4f50b6b910479327935915734e37aa4 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 12:19:00 +0000 Subject: [PATCH 225/485] unify run heartbeat methods --- packages/cli-v3/src/entryPoints/managed-run-controller.ts | 2 +- packages/worker/src/workload/http.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 849aee3fc9..1e263417be 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -69,7 +69,7 @@ class ManagedRunController { console.debug("[ManagedRunController] Sending heartbeat"); - const response = await this.httpClient.heartbeat(this.runId, this.snapshotId, { + const response = await this.httpClient.heartbeatRun(this.runId, this.snapshotId, { cpu: 0, memory: 0, }); diff --git a/packages/worker/src/workload/http.ts b/packages/worker/src/workload/http.ts index 4cffb78187..c643b91e30 100644 --- a/packages/worker/src/workload/http.ts +++ b/packages/worker/src/workload/http.ts @@ -31,7 +31,7 @@ export class WorkloadHttpClient { } } - async heartbeat(runId: string, snapshotId: string, body: WorkloadHeartbeatRequestBody) { + async heartbeatRun(runId: string, snapshotId: string, body: WorkloadHeartbeatRequestBody) { return wrapZodFetch( WorkloadHeartbeatResponseBody, `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/heartbeat`, From bb824df2c85a32ed3a9ab55d6b1a8ae06ddccad1 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 12:41:25 +0000 Subject: [PATCH 226/485] restore trigger task service router --- .../app/v3/services/triggerTask.server.ts | 701 ++---------------- 1 file changed, 57 insertions(+), 644 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 47b3e39a2b..9102143c20 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -1,30 +1,9 @@ -import { - IOPacket, - QueueOptions, - SemanticInternalAttributes, - TriggerTaskRequestBody, - packetRequiresOffloading, -} from "@trigger.dev/core/v3"; -import { env } from "~/env.server"; +import { TriggerTaskRequestBody } from "@trigger.dev/core/v3"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; -import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; -import { workerQueue } from "~/services/worker.server"; -import { marqs, sanitizeQueueName } from "~/v3/marqs/index.server"; -import { eventRepository } from "../eventRepository.server"; -import { generateFriendlyId } from "../friendlyIdentifiers"; -import { uploadToObjectStore } from "../r2.server"; -import { startActiveSpan } from "../tracer.server"; -import { getEntitlement } from "~/services/platform.v3.server"; -import { BaseService, ServiceValidationError } from "./baseService.server"; -import { logger } from "~/services/logger.server"; -import { isFinalAttemptStatus, isFinalRunStatus } from "../taskStatus"; -import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; -import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; -import { handleMetadataPacket } from "~/utils/packets"; -import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; -import { ExpireEnqueuedRunService } from "./expireEnqueuedRun.server"; -import { guardQueueSizeLimitsForEnv } from "../queueSizeLimits.server"; -import { clampMaxDuration } from "../utils/maxDuration"; +import { WithRunEngine } from "./baseService.server"; +import { RunEngineVersion, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { TriggerTaskServiceV1 } from "./triggerTaskV1.server"; +import { TriggerTaskServiceV2 } from "./triggerTaskV2.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -42,7 +21,7 @@ export class OutOfEntitlementError extends Error { } } -export class TriggerTaskService extends BaseService { +export class TriggerTaskService extends WithRunEngine { public async call( taskId: string, environment: AuthenticatedEnvironment, @@ -52,638 +31,72 @@ export class TriggerTaskService extends BaseService { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); - const idempotencyKey = options.idempotencyKey ?? body.options?.idempotencyKey; - const delayUntil = await parseDelay(body.options?.delay); - - const ttl = - typeof body.options?.ttl === "number" - ? stringifyDuration(body.options?.ttl) - : body.options?.ttl ?? (environment.type === "DEVELOPMENT" ? "10m" : undefined); - - const existingRun = idempotencyKey - ? await this._prisma.taskRun.findUnique({ - where: { - runtimeEnvironmentId_taskIdentifier_idempotencyKey: { - runtimeEnvironmentId: environment.id, - idempotencyKey, - taskIdentifier: taskId, - }, - }, - }) - : undefined; - - if (existingRun) { - span.setAttribute("runId", existingRun.friendlyId); - - return existingRun; - } - - if (environment.type !== "DEVELOPMENT") { - const result = await getEntitlement(environment.organizationId); - if (result && result.hasAccess === false) { - throw new OutOfEntitlementError(); - } + //todo we need to determine the version using the BackgroundWorker + //- triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + //- No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + // const workerWithTasks = workerId + // ? await getWorkerDeploymentFromWorker(prisma, workerId) + // : run.runtimeEnvironment.type === "DEVELOPMENT" + // ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) + // : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); + + if (environment.project.engine === RunEngineVersion.V1) { + return await this.callV1(taskId, environment, body, options); } - const queueSizeGuard = await guardQueueSizeLimitsForEnv(environment, marqs); - - logger.debug("Queue size guard result", { - queueSizeGuard, - environment: { - id: environment.id, - type: environment.type, - organization: environment.organization, - project: environment.project, - }, - }); - - if (!queueSizeGuard.isWithinLimits) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` - ); - } - - if ( - body.options?.tags && - typeof body.options.tags !== "string" && - body.options.tags.length > MAX_TAGS_PER_RUN - ) { - throw new ServiceValidationError( - `Runs can only have ${MAX_TAGS_PER_RUN} tags, you're trying to set ${body.options.tags.length}.` - ); + if (environment.type === RuntimeEnvironmentType.DEVELOPMENT) { + return await this.callV1(taskId, environment, body, options); } - const runFriendlyId = generateFriendlyId("run"); - - const payloadPacket = await this.#handlePayloadPacket( - body.payload, - body.options?.payloadType ?? "application/json", - runFriendlyId, - environment - ); - - const metadataPacket = body.options?.metadata - ? handleMetadataPacket( - body.options?.metadata, - body.options?.metadataType ?? "application/json" - ) - : undefined; - - const dependentAttempt = body.options?.dependentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.dependentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }) - : undefined; - - if ( - dependentAttempt && - (isFinalAttemptStatus(dependentAttempt.status) || - isFinalRunStatus(dependentAttempt.taskRun.status)) - ) { - logger.debug("Dependent attempt or run is in a terminal state", { - dependentAttempt: dependentAttempt, - }); - - if (isFinalAttemptStatus(dependentAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentAttempt.taskRun.status}` - ); - } - } - - const parentAttempt = body.options?.parentAttempt - ? await this._prisma.taskRunAttempt.findUnique({ - where: { friendlyId: body.options.parentAttempt }, - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }) - : undefined; - - const dependentBatchRun = body.options?.dependentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.dependentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - depth: true, - }, - }, - }, - }, - }, - }) - : undefined; - - if ( - dependentBatchRun && - dependentBatchRun.dependentTaskAttempt && - (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status) || - isFinalRunStatus(dependentBatchRun.dependentTaskAttempt.taskRun.status)) - ) { - logger.debug("Dependent batch run task attempt or run has been canceled", { - dependentBatchRunId: dependentBatchRun.id, - status: dependentBatchRun.status, - attempt: dependentBatchRun.dependentTaskAttempt, - }); - - if (isFinalAttemptStatus(dependentBatchRun.dependentTaskAttempt.status)) { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent attempt has a status of ${dependentBatchRun.dependentTaskAttempt.status}` - ); - } else { - throw new ServiceValidationError( - `Cannot trigger ${taskId} as the parent run has a status of ${dependentBatchRun.dependentTaskAttempt.taskRun.status}` - ); - } - } - - const parentBatchRun = body.options?.parentBatch - ? await this._prisma.batchTaskRun.findUnique({ - where: { friendlyId: body.options.parentBatch }, - include: { - dependentTaskAttempt: { - include: { - taskRun: { - select: { - id: true, - status: true, - taskIdentifier: true, - rootTaskRunId: true, - }, - }, - }, - }, - }, - }) - : undefined; - - return await eventRepository.traceEvent( - taskId, - { - context: options.traceContext, - spanParentAsLink: options.spanParentAsLink, - parentAsLinkType: options.parentAsLinkType, - kind: "SERVER", - environment, - taskSlug: taskId, - attributes: { - properties: { - [SemanticInternalAttributes.SHOW_ACTIONS]: true, - }, - style: { - icon: options.customIcon ?? "task", - }, - runIsTest: body.options?.test ?? false, - batchId: options.batchId, - idempotencyKey, - }, - incomplete: true, - immediate: true, - }, - async (event, traceContext, traceparent) => { - const run = await autoIncrementCounter.incrementInTransaction( - `v3-run:${environment.id}:${taskId}`, - async (num, tx) => { - const lockedToBackgroundWorker = body.options?.lockToVersion - ? await tx.backgroundWorker.findUnique({ - where: { - projectId_runtimeEnvironmentId_version: { - projectId: environment.projectId, - runtimeEnvironmentId: environment.id, - version: body.options?.lockToVersion, - }, - }, - }) - : undefined; + //todo Additional checks + /* + - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. + - Add an `engine` column to `Project` in the database. - let queueName = sanitizeQueueName( - await this.#getQueueName(taskId, environment, body.options?.queue?.name) - ); + Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. - // Check that the queuename is not an empty string - if (!queueName) { - queueName = sanitizeQueueName(`task/${taskId}`); - } + You run `npx trigger.dev@latest deploy` with config v2. + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. - event.setAttribute("queueName", queueName); - span.setAttribute("queueName", queueName); + You run `npx trigger.dev@latest dev` with config v2 + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. - //upsert tags - let tagIds: string[] = []; - const bodyTags = - typeof body.options?.tags === "string" ? [body.options.tags] : body.options?.tags; - if (bodyTags && bodyTags.length > 0) { - for (const tag of bodyTags) { - const tagRecord = await createTag({ - tag, - projectId: environment.projectId, - }); - if (tagRecord) { - tagIds.push(tagRecord.id); - } - } - } + When triggering + - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + */ - const depth = dependentAttempt - ? dependentAttempt.taskRun.depth + 1 - : parentAttempt - ? parentAttempt.taskRun.depth + 1 - : dependentBatchRun?.dependentTaskAttempt - ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 - : 0; - - const taskRun = await tx.taskRun.create({ - data: { - status: delayUntil ? "DELAYED" : "PENDING", - number: num, - friendlyId: runFriendlyId, - runtimeEnvironmentId: environment.id, - projectId: environment.projectId, - idempotencyKey, - taskIdentifier: taskId, - payload: payloadPacket.data ?? "", - payloadType: payloadPacket.dataType, - context: body.context, - traceContext: traceContext, - traceId: event.traceId, - spanId: event.spanId, - parentSpanId: - options.parentAsLinkType === "replay" ? undefined : traceparent?.spanId, - lockedToVersionId: lockedToBackgroundWorker?.id, - concurrencyKey: body.options?.concurrencyKey, - queue: queueName, - isTest: body.options?.test ?? false, - delayUntil, - queuedAt: delayUntil ? undefined : new Date(), - maxAttempts: body.options?.maxAttempts, - ttl, - tags: - tagIds.length === 0 - ? undefined - : { - connect: tagIds.map((id) => ({ id })), - }, - parentTaskRunId: - dependentAttempt?.taskRun.id ?? - parentAttempt?.taskRun.id ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.id, - parentTaskRunAttemptId: - dependentAttempt?.id ?? - parentAttempt?.id ?? - dependentBatchRun?.dependentTaskAttempt?.id, - rootTaskRunId: - dependentAttempt?.taskRun.rootTaskRunId ?? - dependentAttempt?.taskRun.id ?? - parentAttempt?.taskRun.rootTaskRunId ?? - parentAttempt?.taskRun.id ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.rootTaskRunId ?? - dependentBatchRun?.dependentTaskAttempt?.taskRun.id, - batchId: dependentBatchRun?.id ?? parentBatchRun?.id, - resumeParentOnCompletion: !!(dependentAttempt ?? dependentBatchRun), - depth, - metadata: metadataPacket?.data, - metadataType: metadataPacket?.dataType, - seedMetadata: metadataPacket?.data, - seedMetadataType: metadataPacket?.dataType, - maxDurationInSeconds: body.options?.maxDuration - ? clampMaxDuration(body.options.maxDuration) - : undefined, - runTags: bodyTags, - }, - }); - - event.setAttribute("runId", taskRun.friendlyId); - span.setAttribute("runId", taskRun.friendlyId); - - if (dependentAttempt) { - await tx.taskRunDependency.create({ - data: { - taskRunId: taskRun.id, - dependentAttemptId: dependentAttempt.id, - }, - }); - } else if (dependentBatchRun) { - await tx.taskRunDependency.create({ - data: { - taskRunId: taskRun.id, - dependentBatchRunId: dependentBatchRun.id, - }, - }); - } - - if (body.options?.queue) { - const concurrencyLimit = - typeof body.options.queue.concurrencyLimit === "number" - ? Math.max(0, body.options.queue.concurrencyLimit) - : undefined; - - let taskQueue = await tx.taskQueue.findFirst({ - where: { - runtimeEnvironmentId: environment.id, - name: queueName, - }, - }); - - const existingConcurrencyLimit = - typeof taskQueue?.concurrencyLimit === "number" - ? taskQueue.concurrencyLimit - : undefined; - - if (taskQueue) { - if (existingConcurrencyLimit !== concurrencyLimit) { - taskQueue = await tx.taskQueue.update({ - where: { - id: taskQueue.id, - }, - data: { - concurrencyLimit: - typeof concurrencyLimit === "number" ? concurrencyLimit : null, - rateLimit: body.options.queue.rateLimit, - }, - }); - - if (typeof taskQueue.concurrencyLimit === "number") { - await marqs?.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); - } else { - await marqs?.removeQueueConcurrencyLimits(environment, taskQueue.name); - } - } - } else { - const queueId = generateFriendlyId("queue"); - - taskQueue = await tx.taskQueue.create({ - data: { - friendlyId: queueId, - name: queueName, - concurrencyLimit, - runtimeEnvironmentId: environment.id, - projectId: environment.projectId, - rateLimit: body.options.queue.rateLimit, - type: "NAMED", - }, - }); - - if (typeof taskQueue.concurrencyLimit === "number") { - await marqs?.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); - } - } - } - - if (taskRun.delayUntil) { - await workerQueue.enqueue( - "v3.enqueueDelayedRun", - { runId: taskRun.id }, - { tx, runAt: delayUntil, jobKey: `v3.enqueueDelayedRun.${taskRun.id}` } - ); - } - - if (!taskRun.delayUntil && taskRun.ttl) { - const expireAt = parseNaturalLanguageDuration(taskRun.ttl); - - if (expireAt) { - await ExpireEnqueuedRunService.enqueue(taskRun.id, expireAt, tx); - } - } - - return taskRun; - }, - async (_, tx) => { - const counter = await tx.taskRunNumberCounter.findUnique({ - where: { - taskIdentifier_environmentId: { - taskIdentifier: taskId, - environmentId: environment.id, - }, - }, - select: { lastNumber: true }, - }); - - return counter?.lastNumber; - }, - this._prisma - ); - - //release the concurrency for the env and org, if part of a (batch)triggerAndWait - if (dependentAttempt) { - const isSameTask = dependentAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency(dependentAttempt.taskRun.id, isSameTask); - } - if (dependentBatchRun?.dependentTaskAttempt) { - const isSameTask = - dependentBatchRun.dependentTaskAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency( - dependentBatchRun.dependentTaskAttempt.taskRun.id, - isSameTask - ); - } - - if (!run) { - return; - } - - // We need to enqueue the task run into the appropriate queue. This is done after the tx completes to prevent a race condition where the task run hasn't been created yet by the time we dequeue. - if (run.status === "PENDING") { - await marqs?.enqueueMessage( - environment, - run.queue, - run.id, - { - type: "EXECUTE", - taskIdentifier: taskId, - projectId: environment.projectId, - environmentId: environment.id, - environmentType: environment.type, - }, - body.options?.concurrencyKey - ); - } - - return run; - } - ); + return await this.callV2(taskId, environment, body, options); }); } - async #getQueueName(taskId: string, environment: AuthenticatedEnvironment, queueName?: string) { - if (queueName) { - return queueName; - } - - const defaultQueueName = `task/${taskId}`; - - const worker = await findCurrentWorkerFromEnvironment(environment); - - if (!worker) { - logger.debug("Failed to get queue name: No worker found", { - taskId, - environmentId: environment.id, - }); - - return defaultQueueName; - } - - const task = await this._prisma.backgroundWorkerTask.findUnique({ - where: { - workerId_slug: { - workerId: worker.id, - slug: taskId, - }, - }, - }); - - if (!task) { - console.log("Failed to get queue name: No task found", { - taskId, - environmentId: environment.id, - }); - - return defaultQueueName; - } - - const queueConfig = QueueOptions.optional().nullable().safeParse(task.queueConfig); - - if (!queueConfig.success) { - console.log("Failed to get queue name: Invalid queue config", { - taskId, - environmentId: environment.id, - queueConfig: task.queueConfig, - }); - - return defaultQueueName; - } - - return queueConfig.data?.name ?? defaultQueueName; + private async callV1( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} + ) { + const service = new TriggerTaskServiceV1(this._prisma); + return await service.call(taskId, environment, body, options); } - async #handlePayloadPacket( - payload: any, - payloadType: string, - pathPrefix: string, - environment: AuthenticatedEnvironment + private async callV2( + taskId: string, + environment: AuthenticatedEnvironment, + body: TriggerTaskRequestBody, + options: TriggerTaskServiceOptions = {} ) { - return await startActiveSpan("handlePayloadPacket()", async (span) => { - const packet = this.#createPayloadPacket(payload, payloadType); - - if (!packet.data) { - return packet; - } - - const { needsOffloading, size } = packetRequiresOffloading( - packet, - env.TASK_PAYLOAD_OFFLOAD_THRESHOLD - ); - - if (!needsOffloading) { - return packet; - } - - const filename = `${pathPrefix}/payload.json`; - - await uploadToObjectStore(filename, packet.data, packet.dataType, environment); - - return { - data: filename, - dataType: "application/store", - }; + const service = new TriggerTaskServiceV2({ + prisma: this._prisma, + engine: this._engine, + }); + return await service.call({ + taskId, + environment, + body, + options, }); } - - #createPayloadPacket(payload: any, payloadType: string): IOPacket { - if (payloadType === "application/json") { - return { data: JSON.stringify(payload), dataType: "application/json" }; - } - - if (typeof payload === "string") { - return { data: payload, dataType: payloadType }; - } - - return { dataType: payloadType }; - } -} - -export async function parseDelay(value?: string | Date): Promise { - if (!value) { - return; - } - - if (value instanceof Date) { - return value; - } - - try { - const date = new Date(value); - - // Check if the date is valid - if (isNaN(date.getTime())) { - return parseNaturalLanguageDuration(value); - } - - if (date.getTime() <= Date.now()) { - return; - } - - return date; - } catch (error) { - return parseNaturalLanguageDuration(value); - } -} - -function stringifyDuration(seconds: number): string | undefined { - if (seconds <= 0) { - return; - } - - const units = { - w: Math.floor(seconds / 604800), - d: Math.floor((seconds % 604800) / 86400), - h: Math.floor((seconds % 86400) / 3600), - m: Math.floor((seconds % 3600) / 60), - s: Math.floor(seconds % 60), - }; - - // Filter the units having non-zero values and join them - const result: string = Object.entries(units) - .filter(([unit, val]) => val != 0) - .map(([unit, val]) => `${val}${unit}`) - .join(""); - - return result; } From 0a605b63b7e5c832b603988f4d0ee9101d2c2408 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 12:42:46 +0000 Subject: [PATCH 227/485] remove debug log --- packages/core/src/v3/apps/http.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/core/src/v3/apps/http.ts b/packages/core/src/v3/apps/http.ts index 970b3473f2..03d852630a 100644 --- a/packages/core/src/v3/apps/http.ts +++ b/packages/core/src/v3/apps/http.ts @@ -23,7 +23,6 @@ export async function getJsonBody(req: IncomingMessage): Promise { }); req.on("end", () => { - console.log("got body", body); resolve(safeJsonParse(body)); }); }); From 68a7e33c37e4122d54c768250567a94eda0da8b2 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:32:58 +0000 Subject: [PATCH 228/485] complete failed run events, fix success output --- apps/webapp/app/v3/runEngine.server.ts | 41 +++++++++++++++++-- .../app/v3/services/completeAttempt.server.ts | 2 +- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index bf277f7288..0297b1eed6 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -3,9 +3,11 @@ import { prisma } from "~/db.server"; import { env } from "~/env.server"; import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; -import { eventRepository } from "./eventRepository.server"; -import { createJsonErrorObject } from "@trigger.dev/core/v3"; +import { createExceptionPropertiesFromError, eventRepository } from "./eventRepository.server"; +import { createJsonErrorObject, sanitizeError } from "@trigger.dev/core/v3"; import { logger } from "~/services/logger.server"; +import { safeJsonParse } from "~/utils/json"; +import type { Attributes } from "@opentelemetry/api"; export const engine = singleton("RunEngine", createRunEngine); @@ -48,7 +50,12 @@ function createRunEngine() { endTime: time, attributes: { isError: false, - output: run.output, + output: + run.outputType === "application/store" || run.outputType === "text/plain" + ? run.output + : run.output + ? (safeJsonParse(run.output) as Attributes) + : undefined, outputType: run.outputType, }, }); @@ -60,6 +67,34 @@ function createRunEngine() { } }); + engine.eventBus.on("runFailed", async ({ time, run }) => { + try { + const sanitizedError = sanitizeError(run.error); + const exception = createExceptionPropertiesFromError(sanitizedError); + + await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time: time, + properties: { + exception, + }, + }, + ], + }); + } catch (error) { + logger.error("[runFailed] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } + }); + engine.eventBus.on("runCancelled", async ({ time, run }) => { try { const inProgressEvents = await eventRepository.queryIncompleteEvents({ diff --git a/apps/webapp/app/v3/services/completeAttempt.server.ts b/apps/webapp/app/v3/services/completeAttempt.server.ts index b044a4d291..56adaaa5fd 100644 --- a/apps/webapp/app/v3/services/completeAttempt.server.ts +++ b/apps/webapp/app/v3/services/completeAttempt.server.ts @@ -12,7 +12,7 @@ import { shouldRetryError, taskRunErrorEnhancer, } from "@trigger.dev/core/v3"; -import { $transaction, PrismaClientOrTransaction } from "~/db.server"; +import { PrismaClientOrTransaction } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { safeJsonParse } from "~/utils/json"; From 3058cddbb02634c110c8455e7937c9efcf6b34a1 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:21:53 +0000 Subject: [PATCH 229/485] child event completion on run failure, better logging --- apps/webapp/app/v3/eventRepository.server.ts | 8 +- apps/webapp/app/v3/runEngine.server.ts | 103 +++++++++++++++++- .../run-engine/src/engine/eventBus.ts | 5 - 3 files changed, 106 insertions(+), 10 deletions(-) diff --git a/apps/webapp/app/v3/eventRepository.server.ts b/apps/webapp/app/v3/eventRepository.server.ts index b4fc5fb0e8..5cf7b5313d 100644 --- a/apps/webapp/app/v3/eventRepository.server.ts +++ b/apps/webapp/app/v3/eventRepository.server.ts @@ -243,7 +243,7 @@ export class EventRepository { eventId: event.id, }); - await this.insert({ + const completedEvent = { ...omit(event, "id"), isPartial: false, isError: options?.attributes.isError ?? false, @@ -263,7 +263,11 @@ export class EventRepository { : "application/json", payload: event.payload as Attributes, payloadType: event.payloadType, - }); + } satisfies CreatableEvent; + + await this.insert(completedEvent); + + return completedEvent; } async cancelEvent(event: TaskEventRecord, cancelledAt: Date, reason: string) { diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 0297b1eed6..3d22aba801 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -46,7 +46,7 @@ function createRunEngine() { engine.eventBus.on("runSucceeded", async ({ time, run }) => { try { - await eventRepository.completeEvent(run.spanId, { + const completedEvent = await eventRepository.completeEvent(run.spanId, { endTime: time, attributes: { isError: false, @@ -59,10 +59,19 @@ function createRunEngine() { outputType: run.outputType, }, }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } } catch (error) { logger.error("[runSucceeded] Failed to complete event", { error: error instanceof Error ? error.message : error, runId: run.id, + spanId: run.spanId, }); } }); @@ -72,7 +81,7 @@ function createRunEngine() { const sanitizedError = sanitizeError(run.error); const exception = createExceptionPropertiesFromError(sanitizedError); - await eventRepository.completeEvent(run.spanId, { + const completedEvent = await eventRepository.completeEvent(run.spanId, { endTime: time, attributes: { isError: true, @@ -80,17 +89,104 @@ function createRunEngine() { events: [ { name: "exception", - time: time, + time, properties: { exception, }, }, ], }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } + + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: completedEvent?.runId, + }); + + await Promise.all( + inProgressEvents.map((event) => { + try { + const completedEvent = eventRepository.completeEvent(event.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception, + }, + }, + ], + }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete in-progress event for unknown reason", { + runId: run.id, + spanId: run.spanId, + eventId: event.id, + }); + return; + } + } catch (error) { + logger.error("[runFailed] Failed to complete in-progress event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + eventId: event.id, + }); + } + }) + ); } catch (error) { logger.error("[runFailed] Failed to complete event", { error: error instanceof Error ? error.message : error, runId: run.id, + spanId: run.spanId, + }); + } + }); + + engine.eventBus.on("runExpired", async ({ time, run }) => { + try { + const completedEvent = await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception: { + message: `Run expired because the TTL (${run.ttl}) was reached`, + }, + }, + }, + ], + }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } + } catch (error) { + logger.error("[runExpired] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, }); } }); @@ -111,6 +207,7 @@ function createRunEngine() { logger.error("[runCancelled] Failed to cancel event", { error: error instanceof Error ? error.message : error, runId: run.id, + spanId: run.spanId, }); } }); diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index d9f4db0f3f..8c90f11366 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -17,7 +17,6 @@ export type EventBusEvents = { }; }, ]; - //todo eventRepository runExpired: [ { time: Date; @@ -28,7 +27,6 @@ export type EventBusEvents = { }; }, ]; - //todo eventRepository runSucceeded: [ { time: Date; @@ -40,7 +38,6 @@ export type EventBusEvents = { }; }, ]; - //todo eventRepository runFailed: [ { time: Date; @@ -67,7 +64,6 @@ export type EventBusEvents = { retryAt: Date; }, ]; - //todo eventRepository runCancelled: [ { time: Date; @@ -87,7 +83,6 @@ export type EventBusEvents = { }; }, ]; - //todo advanced logging executionSnapshotCreated: [ { time: Date; From 3070a9d2214702b1507d95f975ca641f637f6ad2 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 28 Nov 2024 14:24:16 +0000 Subject: [PATCH 230/485] fix http client run heartbeat route --- packages/worker/src/client/http.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/client/http.ts index eb18d99507..b80c86c4b3 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/client/http.ts @@ -87,7 +87,7 @@ export class WorkerHttpClient { async heartbeatRun(runId: string, snapshotId: string, body: WorkerApiRunHeartbeatRequestBody) { return wrapZodFetch( WorkerApiRunHeartbeatResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/heartbeat`, { method: "POST", headers: { From a73a3eb3a68e967a431625c419bc48d71d2ef626 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 11:44:32 +0000 Subject: [PATCH 231/485] update lockfile --- pnpm-lock.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1ec2042d2a..185eb66720 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.3.1 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.2.2 + specifier: workspace:3.3.1 version: link:../worker c12: specifier: ^1.11.1 From 515641efd7959216f3318f64c1ed41d8c613f7f0 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 12:08:12 +0000 Subject: [PATCH 232/485] remove rate limit from run engine --- internal-packages/run-engine/src/engine/index.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 3e919a9930..fdcb3d0daf 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -355,7 +355,6 @@ export class RunEngine { }, data: { concurrencyLimit, - rateLimit: queue.rateLimit, }, }); } else { @@ -366,7 +365,6 @@ export class RunEngine { concurrencyLimit, runtimeEnvironmentId: environment.id, projectId: environment.project.id, - rateLimit: queue.rateLimit, type: "NAMED", }, }); From 3e26ea4942fa9d5282abaa693923e0392d0b745e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:47:32 +0000 Subject: [PATCH 233/485] Always display errors before other code blocks in the run overview tab --- .../route.tsx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 7b9f912bdf..0a91da4a0a 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -787,12 +787,13 @@ function RunBody({ + {run.error && } + {run.payload !== undefined && ( )} - {run.error !== undefined ? ( - - ) : run.output !== undefined ? ( + + {run.error === undefined && run.output !== undefined ? ( ) : null} From ff20023fe6d3a12cde584ca6fdd57af6a4ebe319 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:49:32 +0000 Subject: [PATCH 234/485] add retry delay spans --- apps/webapp/app/v3/runEngine.server.ts | 32 +++++++++++++++++++ .../run-engine/src/engine/eventBus.ts | 2 +- .../run-engine/src/engine/index.ts | 1 + 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 3d22aba801..4c10821db2 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -212,6 +212,38 @@ function createRunEngine() { } }); + engine.eventBus.on("runRetryScheduled", async ({ time, run, environment, retryAt }) => { + try { + await eventRepository.recordEvent(`Retry #${run.attemptNumber} delay`, { + taskSlug: run.taskIdentifier, + environment, + attributes: { + // TODO: We'll need the execution data for this + // metadata: this.#generateMetadataAttributesForNextAttempt(execution), + properties: { + retryAt: retryAt.toISOString(), + }, + runId: run.friendlyId, + style: { + icon: "schedule-attempt", + }, + // TODO: This doesn't exist, decide if we need it + // queueId: run.queueId, + queueName: run.queue, + }, + context: run.traceContext as Record, + spanIdSeed: `retry-${run.attemptNumber + 1}`, + endTime: retryAt, + }); + } catch (error) { + logger.error("[runRetryScheduled] Failed to record retry event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { try { const foundRun = await prisma.taskRun.findUnique({ diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 8c90f11366..dba7cb1548 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -49,12 +49,12 @@ export type EventBusEvents = { }; }, ]; - //todo eventRepository runRetryScheduled: [ { time: Date; run: { id: string; + friendlyId: string; attemptNumber: number; queue: string; traceContext: Record; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index fdcb3d0daf..257447387e 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -2104,6 +2104,7 @@ export class RunEngine { time: failedAt, run: { id: run.id, + friendlyId: run.friendlyId, attemptNumber: nextAttemptNumber, queue: run.queue, taskIdentifier: run.taskIdentifier, From a1b545b638922bed8eac3d0ab5b714d949ab4c7a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:51:25 +0000 Subject: [PATCH 235/485] resurrect uncaught exception handler --- packages/cli-v3/src/entryPoints/managed-run-worker.ts | 1 + packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts | 1 + packages/cli-v3/src/executions/taskRunProcess.ts | 3 +++ 3 files changed, 5 insertions(+) diff --git a/packages/cli-v3/src/entryPoints/managed-run-worker.ts b/packages/cli-v3/src/entryPoints/managed-run-worker.ts index 894b7930c9..76489906c5 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-worker.ts @@ -53,6 +53,7 @@ sourceMapSupport.install({ }); process.on("uncaughtException", function (error, origin) { + console.error("Uncaught exception", { error, origin }); if (error instanceof Error) { process.send && process.send({ diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts index 7b15629fd0..104d4187a9 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts @@ -53,6 +53,7 @@ sourceMapSupport.install({ }); process.on("uncaughtException", function (error, origin) { + console.error("Uncaught exception", { error, origin }); if (error instanceof Error) { process.send && process.send({ diff --git a/packages/cli-v3/src/executions/taskRunProcess.ts b/packages/cli-v3/src/executions/taskRunProcess.ts index 22e3c9f6d5..33ce18f779 100644 --- a/packages/cli-v3/src/executions/taskRunProcess.ts +++ b/packages/cli-v3/src/executions/taskRunProcess.ts @@ -186,6 +186,9 @@ export class TaskRunProcess { WAIT_FOR_DURATION: async (message) => { this.onWaitForDuration.post(message); }, + UNCAUGHT_EXCEPTION: async (message) => { + logger.debug(`[${this.runId}] uncaught exception in task run process`, { ...message }); + }, }, }); From bed6677a84725199d058478b3468fb7dae78117b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:51:54 +0000 Subject: [PATCH 236/485] run completion handling and retries --- .../worker/workerGroupTokenService.server.ts | 32 ++-- .../src/entryPoints/managed-run-controller.ts | 161 ++++++++++++------ packages/core/src/v3/schemas/common.ts | 1 + 3 files changed, 127 insertions(+), 67 deletions(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index b9926ef268..23385cecee 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -210,25 +210,25 @@ export class WorkerGroupTokenService extends WithRunEngine { }); } - if (!workerInstance.environmentId) { + if (!workerInstance.environment) { logger.error( - "[WorkerGroupTokenService] Non-shared worker instance not linked to environment", + "[WorkerGroupTokenService] Unmanaged worker instance not linked to environment", { workerGroup, workerInstance } ); return; } if (!workerInstance.deployment) { - logger.error( - "[WorkerGroupTokenService] Non-shared worker instance not linked to deployment", - { workerGroup, workerInstance } - ); + logger.error("[WorkerGroupTokenService] Unmanaged worker instance not linked to deployment", { + workerGroup, + workerInstance, + }); return; } if (!workerInstance.deployment.workerId) { logger.error( - "[WorkerGroupTokenService] Non-shared worker instance deployment not linked to background worker", + "[WorkerGroupTokenService] Unmanaged worker instance deployment not linked to background worker", { workerGroup, workerInstance } ); return; @@ -241,7 +241,7 @@ export class WorkerGroupTokenService extends WithRunEngine { workerGroupId: workerGroup.id, workerInstanceId: workerInstance.id, masterQueue: workerGroup.masterQueue, - environmentId: workerInstance.environmentId, + environmentId: workerInstance.environment.id, deploymentId: workerInstance.deployment.id, backgroundWorkerId: workerInstance.deployment.workerId, environment: workerInstance.environment, @@ -305,7 +305,7 @@ export class WorkerGroupTokenService extends WithRunEngine { if (!workerGroup.projectId || !workerGroup.organizationId) { logger.error( - "[WorkerGroupTokenService] Non-shared worker group missing project or organization", + "[WorkerGroupTokenService] Unmanaged worker group missing project or organization", { workerGroup, workerInstance, @@ -316,7 +316,7 @@ export class WorkerGroupTokenService extends WithRunEngine { } if (!deploymentId) { - logger.error("[WorkerGroupTokenService] Non-shared worker group required deployment ID", { + logger.error("[WorkerGroupTokenService] Unmanaged worker group required deployment ID", { workerGroup, workerInstance, }); @@ -565,9 +565,17 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { centsPerMs: 0, } satisfies MachinePreset; - const envVars = this.environment + const environment = + this.environment ?? + (await this._prisma.runtimeEnvironment.findUnique({ + where: { + id: engineResult.execution.environment.id, + }, + })); + + const envVars = environment ? await this.getEnvVars( - this.environment, + environment, engineResult.run.id, engineResult.execution.machine ?? defaultMachinePreset ) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 1e263417be..03c81e6518 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -6,7 +6,9 @@ import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; import { HeartbeatService, WorkerManifest } from "@trigger.dev/core/v3"; -import { WorkloadHttpClient } from "@trigger.dev/worker"; +import { WorkloadHttpClient, type WorkloadRunAttemptStartResponseBody } from "@trigger.dev/worker"; +import { assertExhaustive } from "../utilities/assertExhaustive.js"; +import { setTimeout as wait } from "timers/promises"; const Env = z.object({ TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), @@ -91,18 +93,7 @@ class ManagedRunController { }); } - async start() { - logger.debug("[ManagedRunController] Starting up"); - - // TODO: remove this after testing - setTimeout(() => { - // exit after 5 minutes - console.error("[ManagedRunController] Exiting after 5 minutes"); - process.exit(1); - }, 60 * 5000); - - this.heartbeatService.start(); - + private async startAndExecuteRunAttempt() { if (!this.runId || !this.snapshotId) { logger.debug("[ManagedRunController] Missing run ID or snapshot ID", { runId: this.runId, @@ -130,9 +121,57 @@ class ManagedRunController { ...envVars, }; + try { + return await this.executeRun({ run, snapshot, envVars: taskRunEnv, execution }); + } catch (error) { + console.error("Error while executing attempt", { + error, + }); + + const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + completion: { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + }, + }); + + if (!completionResult.success) { + console.error("Failed to submit completion after error", { + error: completionResult.error, + }); + process.exit(1); + } + + logger.log("completed run", completionResult.data.result); + } + } + + async start() { + logger.debug("[ManagedRunController] Starting up"); + + // TODO: remove this after testing + setTimeout(() => { + // exit after 5 minutes + console.error("[ManagedRunController] Exiting after 5 minutes"); + process.exit(1); + }, 60 * 5000); + + this.heartbeatService.start(); + + this.startAndExecuteRunAttempt(); + } + + private async executeRun({ + run, + snapshot, + envVars, + execution, + }: WorkloadRunAttemptStartResponseBody) { this.taskRunProcess = new TaskRunProcess({ workerManifest: this.workerManifest, - env: taskRunEnv, + env: envVars, serverWorker: { id: "unmanaged", contentHash: env.TRIGGER_CONTENT_HASH, @@ -145,61 +184,73 @@ class ManagedRunController { messageId: run.id, }); + await this.taskRunProcess.initialize(); + + logger.log("executing task run process", { + attemptId: execution.attempt.id, + runId: execution.run.id, + }); + + const completion = await this.taskRunProcess.execute(); + + logger.log("Completed run", completion); + try { - await this.taskRunProcess.initialize(); + await this.taskRunProcess.cleanup(true); + } catch (error) { + console.error("Failed to cleanup task run process, submitting completion anyway", { + error, + }); + } - logger.log("executing task run process", { - attemptId: execution.attempt.id, - runId: execution.run.id, + const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + completion, + }); + + if (!completionResult.success) { + console.error("Failed to submit completion", { + error: completionResult.error, }); + process.exit(1); + } - const completion = await this.taskRunProcess.execute(); + logger.log("Completion submitted", completionResult.data.result); - logger.log("Completed run", completion); + const { attemptStatus } = completionResult.data.result; - try { - await this.taskRunProcess.cleanup(true); - } catch (error) { - console.error("Failed to cleanup task run process, submitting completion anyway", { - error, - }); - } + this.snapshotId = completionResult.data.result.snapshot.id; - const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { - completion, - }); + if (attemptStatus === "RUN_FINISHED") { + logger.debug("Run finished, shutting down"); + process.exit(0); + } - if (!completionResult.success) { - console.error("Failed to submit completion", { - error: completionResult.error, - }); - process.exit(1); - } + if (attemptStatus === "RUN_PENDING_CANCEL") { + logger.debug("Run pending cancel, shutting down"); + process.exit(0); + } - logger.log("Completion submitted", completionResult.data.result); - } catch (error) { - console.error("Error while executing attempt", { - error, - }); + if (attemptStatus === "RETRY_QUEUED") { + logger.debug("Retry queued, shutting down"); + process.exit(0); + } - const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { - completion: { - id: execution.run.id, - ok: false, - retry: undefined, - error: TaskRunProcess.parseExecuteError(error), - }, - }); + if (attemptStatus === "RETRY_IMMEDIATELY") { + if (completion.ok) { + throw new Error("Should retry but completion OK."); + } - if (!completionResult.success) { - console.error("Failed to submit completion after error", { - error: completionResult.error, - }); - process.exit(1); + if (!completion.retry) { + throw new Error("Should retry but missing retry params."); } - logger.log("completed run", completionResult.data.result); + await wait(completion.retry.delay); + + this.startAndExecuteRunAttempt(); + return; } + + assertExhaustive(attemptStatus); } async stop() { diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 1bb33d7db7..1f9f18f4c3 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -259,6 +259,7 @@ export type TaskRunContext = z.infer; export const TaskRunExecutionRetry = z.object({ timestamp: z.number(), + /** Retry delay in milliseconds */ delay: z.number(), error: z.unknown().optional(), }); From 8973ccb7e6fd4456a7b909768018834192c3375d Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:28:09 +0000 Subject: [PATCH 237/485] fix span presenter v2 engine outputs and errors --- .../app/presenters/v3/SpanPresenter.server.ts | 39 +++++++++++++++---- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index 9ee83d7e50..ceb7351f2b 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -86,6 +86,9 @@ export class SpanPresenter extends BasePresenter { engine: true, masterQueue: true, secondaryMasterQueue: true, + error: true, + output: true, + outputType: true, //status + duration status: true, startedAt: true, @@ -186,13 +189,33 @@ export class SpanPresenter extends BasePresenter { }) : null; + const finishedData = + run.engine === "V2" + ? run + : isFinished + ? await this._replica.taskRunAttempt.findFirst({ + select: { + output: true, + outputType: true, + error: true, + }, + where: { + status: { in: FINAL_ATTEMPT_STATUSES }, + taskRunId: run.id, + }, + orderBy: { + createdAt: "desc", + }, + }) + : null; + const output = - finishedAttempt === null + finishedData === null ? undefined - : finishedAttempt.outputType === "application/store" - ? `/resources/packets/${run.runtimeEnvironment.id}/${finishedAttempt.output}` - : typeof finishedAttempt.output !== "undefined" && finishedAttempt.output !== null - ? await prettyPrintPacket(finishedAttempt.output, finishedAttempt.outputType ?? undefined) + : finishedData.outputType === "application/store" + ? `/resources/packets/${run.runtimeEnvironment.id}/${finishedData.output}` + : typeof finishedData.output !== "undefined" && finishedData.output !== null + ? await prettyPrintPacket(finishedData.output, finishedData.outputType ?? undefined) : undefined; const payload = @@ -203,14 +226,14 @@ export class SpanPresenter extends BasePresenter { : undefined; let error: TaskRunError | undefined = undefined; - if (finishedAttempt?.error) { - const result = TaskRunError.safeParse(finishedAttempt.error); + if (finishedData?.error) { + const result = TaskRunError.safeParse(finishedData.error); if (result.success) { error = result.data; } else { error = { type: "CUSTOM_ERROR", - raw: JSON.stringify(finishedAttempt.error), + raw: JSON.stringify(finishedData.error), }; } } From 676496aabc86562b7bd4c63a0499f226bda73592 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 14:30:04 +0000 Subject: [PATCH 238/485] bump worker version --- packages/cli-v3/package.json | 2 +- packages/worker/package.json | 2 +- pnpm-lock.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 83981697a2..62d6450920 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -89,7 +89,7 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.3.2", "@trigger.dev/core": "workspace:3.3.2", - "@trigger.dev/worker": "workspace:3.3.1", + "@trigger.dev/worker": "workspace:3.3.2", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/worker/package.json b/packages/worker/package.json index 49ff221673..f60475ffe8 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.3.1", + "version": "3.3.2", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4c34dc01f9..ee8c0cd300 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.3.2 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.3.1 + specifier: workspace:3.3.2 version: link:../worker c12: specifier: ^1.11.1 From 0631b22565e745cbb0a6eb4b6e1bfbc19000e2d2 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:09:52 +0000 Subject: [PATCH 239/485] remove retry span attribute bloat --- apps/webapp/app/v3/runEngine.server.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 4c10821db2..c1d59fcf17 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -218,8 +218,6 @@ function createRunEngine() { taskSlug: run.taskIdentifier, environment, attributes: { - // TODO: We'll need the execution data for this - // metadata: this.#generateMetadataAttributesForNextAttempt(execution), properties: { retryAt: retryAt.toISOString(), }, @@ -227,8 +225,6 @@ function createRunEngine() { style: { icon: "schedule-attempt", }, - // TODO: This doesn't exist, decide if we need it - // queueId: run.queueId, queueName: run.queue, }, context: run.traceContext as Record, From f1b4861f1e9c8dada3a4e387ffbef020e62b9636 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:46:26 +0000 Subject: [PATCH 240/485] bump worker version --- packages/cli-v3/package.json | 2 +- packages/worker/package.json | 2 +- pnpm-lock.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 25797ff77f..f8b6bf85f0 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -89,7 +89,7 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.3.4", "@trigger.dev/core": "workspace:3.3.4", - "@trigger.dev/worker": "workspace:3.3.2", + "@trigger.dev/worker": "workspace:3.3.4", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/worker/package.json b/packages/worker/package.json index f60475ffe8..6fcea1ae4c 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.3.2", + "version": "3.3.4", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f81af6b55e..f54437249d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.3.4 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.3.2 + specifier: workspace:3.3.4 version: link:../worker c12: specifier: ^1.11.1 From 7913e39280882605f4467f2fe9c36a42b52d17c7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 3 Dec 2024 20:49:07 +0000 Subject: [PATCH 241/485] report invocation usage --- apps/webapp/app/v3/runEngine.server.ts | 15 +++++++++++++++ .../run-engine/src/engine/eventBus.ts | 1 - 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index c1d59fcf17..e67098661e 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -8,6 +8,7 @@ import { createJsonErrorObject, sanitizeError } from "@trigger.dev/core/v3"; import { logger } from "~/services/logger.server"; import { safeJsonParse } from "~/utils/json"; import type { Attributes } from "@opentelemetry/api"; +import { reportInvocationUsage } from "~/services/platform.v3.server"; export const engine = singleton("RunEngine", createRunEngine); @@ -240,6 +241,20 @@ function createRunEngine() { } }); + engine.eventBus.on("runRetryScheduled", async ({ time, run, organization }) => { + try { + if (run.attemptNumber === 1 && run.baseCostInCents > 0) { + await reportInvocationUsage(organization.id, run.baseCostInCents, { runId: run.id }); + } + } catch (error) { + logger.error("[runRetryScheduled] Failed to report invocation usage", { + error: error instanceof Error ? error.message : error, + runId: run.id, + orgId: organization.id, + }); + } + }); + engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { try { const foundRun = await prisma.taskRun.findUnique({ diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index dba7cb1548..70c0293638 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -3,7 +3,6 @@ import { AuthenticatedEnvironment } from "../shared"; import { TaskRunError } from "@trigger.dev/core/v3"; export type EventBusEvents = { - //todo reportInvocationUsage() runAttemptStarted: [ { time: Date; From 1cc028423e867b4604587a5095c628511106fa42 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:33:03 +0000 Subject: [PATCH 242/485] bump node-22 image to 22.12.0 require esm --- packages/cli-v3/src/deploy/buildImage.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index deb44e9f3a..13e87d9312 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -443,7 +443,7 @@ const BASE_IMAGE: Record = { bun: "imbios/bun-node:1.1.24-22-slim@sha256:9cfb7cd87529261c482fe17d8894c0986263f3a5ccf84ad65c00ec0e1ed539c6", node: "node:21-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170", "node-22": - "node:22-bookworm-slim@sha256:f73e9c70d4279d5e7b7cc1fe307c5de18b61089ffa2235230408dfb14e2f09a0", + "node:22.12.0-bookworm-slim@sha256:a4b757cd491c7f0b57f57951f35f4e85b7e1ad54dbffca4cf9af0725e1650cd8", }; const DEFAULT_PACKAGES = ["busybox", "ca-certificates", "dumb-init", "git", "openssl"]; From cd0347fce79a04309be576a1303babbe4711477b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:33:49 +0000 Subject: [PATCH 243/485] use full node 21 image version tag --- packages/cli-v3/src/deploy/buildImage.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index 13e87d9312..986f3eaa7c 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -441,7 +441,7 @@ export type GenerateContainerfileOptions = { const BASE_IMAGE: Record = { bun: "imbios/bun-node:1.1.24-22-slim@sha256:9cfb7cd87529261c482fe17d8894c0986263f3a5ccf84ad65c00ec0e1ed539c6", - node: "node:21-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170", + node: "node:21.7.3-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170", "node-22": "node:22.12.0-bookworm-slim@sha256:a4b757cd491c7f0b57f57951f35f4e85b7e1ad54dbffca4cf9af0725e1650cd8", }; From b42db7b923254a0f76134fdc794a9c53d2db738e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:37:41 +0000 Subject: [PATCH 244/485] use multi-arch images for bun and node --- packages/cli-v3/src/deploy/buildImage.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/cli-v3/src/deploy/buildImage.ts b/packages/cli-v3/src/deploy/buildImage.ts index 986f3eaa7c..60d959404d 100644 --- a/packages/cli-v3/src/deploy/buildImage.ts +++ b/packages/cli-v3/src/deploy/buildImage.ts @@ -440,8 +440,8 @@ export type GenerateContainerfileOptions = { }; const BASE_IMAGE: Record = { - bun: "imbios/bun-node:1.1.24-22-slim@sha256:9cfb7cd87529261c482fe17d8894c0986263f3a5ccf84ad65c00ec0e1ed539c6", - node: "node:21.7.3-bookworm-slim@sha256:99afef5df7400a8d118e0504576d32ca700de5034c4f9271d2ff7c91cc12d170", + bun: "imbios/bun-node:1.1.24-22-slim@sha256:eec3c2937e30c579258a92c60847e5515488513337b23ec13996228f6a146ff5", + node: "node:21.7.3-bookworm-slim@sha256:dfc05dee209a1d7adf2ef189bd97396daad4e97c6eaa85778d6f75205ba1b0fb", "node-22": "node:22.12.0-bookworm-slim@sha256:a4b757cd491c7f0b57f57951f35f4e85b7e1ad54dbffca4cf9af0725e1650cd8", }; From d9db2688319e87929679d1b21ae81604610dc493 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:41:07 +0000 Subject: [PATCH 245/485] remove unmanaged core export and add managed runtime --- .../cli-v3/src/entryPoints/managed-run-worker.ts | 4 ++-- .../src/entryPoints/unmanaged-run-worker.ts | 2 +- packages/core/package.json | 15 --------------- .../core/src/v3/runtime/managedRuntimeManager.ts | 2 +- packages/core/src/v3/unmanaged/index.ts | 1 - packages/core/src/v3/workers/index.ts | 2 ++ 6 files changed, 6 insertions(+), 20 deletions(-) delete mode 100644 packages/core/src/v3/unmanaged/index.ts diff --git a/packages/cli-v3/src/entryPoints/managed-run-worker.ts b/packages/cli-v3/src/entryPoints/managed-run-worker.ts index 76489906c5..252a55f422 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-worker.ts @@ -18,7 +18,6 @@ import { waitUntil, apiClientManager, } from "@trigger.dev/core/v3"; -import { UnmanagedRuntimeManager } from "@trigger.dev/core/v3/unmanaged"; import { TriggerTracer } from "@trigger.dev/core/v3/tracer"; import { ConsoleInterceptor, @@ -37,6 +36,7 @@ import { UsageTimeoutManager, StandardMetadataManager, StandardWaitUntilManager, + ManagedRuntimeManager, } from "@trigger.dev/core/v3/workers"; import { ZodIpcConnection } from "@trigger.dev/core/v3/zodIpc"; import { readFile } from "node:fs/promises"; @@ -461,7 +461,7 @@ async function flushMetadata(timeoutInMs: number = 10_000) { console.log(`Flushed runMetadata in ${duration}ms`); } -const managedWorkerRuntime = new UnmanagedRuntimeManager(); +const managedWorkerRuntime = new ManagedRuntimeManager(); runtime.setGlobalRuntimeManager(managedWorkerRuntime); diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts index 104d4187a9..c2f03fb052 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts @@ -18,7 +18,6 @@ import { waitUntil, apiClientManager, } from "@trigger.dev/core/v3"; -import { UnmanagedRuntimeManager } from "@trigger.dev/core/v3/unmanaged"; import { TriggerTracer } from "@trigger.dev/core/v3/tracer"; import { ConsoleInterceptor, @@ -37,6 +36,7 @@ import { UsageTimeoutManager, StandardMetadataManager, StandardWaitUntilManager, + UnmanagedRuntimeManager, } from "@trigger.dev/core/v3/workers"; import { ZodIpcConnection } from "@trigger.dev/core/v3/zodIpc"; import { readFile } from "node:fs/promises"; diff --git a/packages/core/package.json b/packages/core/package.json index 9a515d3bae..070a4c8e1b 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -55,7 +55,6 @@ "./v3/utils/timers": "./src/v3/utils/timers.ts", "./v3/dev": "./src/v3/dev/index.ts", "./v3/prod": "./src/v3/prod/index.ts", - "./v3/unmanaged": "./src/v3/unmanaged/index.ts", "./v3/workers": "./src/v3/workers/index.ts", "./v3/schemas": "./src/v3/schemas/index.ts" }, @@ -161,9 +160,6 @@ "v3/prod": [ "dist/commonjs/v3/prod/index.d.ts" ], - "v3/unmanaged": [ - "dist/commonjs/v3/unmanaged/index.d.ts" - ], "v3/workers": [ "dist/commonjs/v3/workers/index.d.ts" ], @@ -610,17 +606,6 @@ "default": "./dist/commonjs/v3/prod/index.js" } }, - "./v3/unmanaged": { - "import": { - "@triggerdotdev/source": "./src/v3/unmanaged/index.ts", - "types": "./dist/esm/v3/unmanaged/index.d.ts", - "default": "./dist/esm/v3/unmanaged/index.js" - }, - "require": { - "types": "./dist/commonjs/v3/unmanaged/index.d.ts", - "default": "./dist/commonjs/v3/unmanaged/index.js" - } - }, "./v3/workers": { "import": { "@triggerdotdev/source": "./src/v3/workers/index.ts", diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index 88b0350590..3cc6b4a903 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -8,7 +8,7 @@ import { unboundedTimeout } from "../utils/timers.js"; type Waitpoint = any; -export class UnmanagedRuntimeManager implements RuntimeManager { +export class ManagedRuntimeManager implements RuntimeManager { private readonly waitpoints: Map = new Map(); _taskWaits: Map void }> = new Map(); diff --git a/packages/core/src/v3/unmanaged/index.ts b/packages/core/src/v3/unmanaged/index.ts deleted file mode 100644 index 57123d4bb5..0000000000 --- a/packages/core/src/v3/unmanaged/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { UnmanagedRuntimeManager } from "../runtime/unmanagedRuntimeManager.js"; diff --git a/packages/core/src/v3/workers/index.ts b/packages/core/src/v3/workers/index.ts index 504302dde2..2912d69387 100644 --- a/packages/core/src/v3/workers/index.ts +++ b/packages/core/src/v3/workers/index.ts @@ -16,3 +16,5 @@ export { ProdUsageManager, type ProdUsageManagerOptions } from "../usage/prodUsa export { UsageTimeoutManager } from "../timeout/usageTimeoutManager.js"; export { StandardMetadataManager } from "../runMetadata/manager.js"; export { StandardWaitUntilManager } from "../waitUntil/manager.js"; +export { ManagedRuntimeManager } from "../runtime/managedRuntimeManager.js"; +export { UnmanagedRuntimeManager } from "../runtime/unmanagedRuntimeManager.js"; From 71f04cf93cbb18d5a7d0a8dbbf87d0a080eb8cca Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 6 Dec 2024 10:28:37 +0000 Subject: [PATCH 246/485] bump worker version --- packages/cli-v3/package.json | 2 +- packages/worker/package.json | 2 +- pnpm-lock.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index f27a90e5bb..c7508dfa29 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -89,7 +89,7 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.3.5", "@trigger.dev/core": "workspace:3.3.5", - "@trigger.dev/worker": "workspace:3.3.4", + "@trigger.dev/worker": "workspace:3.3.5", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/worker/package.json b/packages/worker/package.json index 6fcea1ae4c..0824feab2b 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.3.4", + "version": "3.3.5", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3b39f7fd4a..db285d6017 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.3.5 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.3.4 + specifier: workspace:3.3.5 version: link:../worker c12: specifier: ^1.11.1 From 3fe3c396f5b057396dad5d028e2f1f40ae06de23 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 6 Dec 2024 12:26:32 +0000 Subject: [PATCH 247/485] Suggested glossary for the RunEngine, TBC --- internal-packages/run-engine/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index ee381573a5..1c676c4cb9 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -9,6 +9,17 @@ It is responsible for: - Heartbeats which detects stalled runs and attempts to automatically recover them. - Registering checkpoints which enable pausing/resuming of runs. +## Glossary + +- **Platform**: The main Trigger.dev API, dashboard, database. The Run Engine is part of the platform. +- **Worker group**: A group of workers that all pull from the same queue, e.g. "us-east-1", "my-self-hosted-workers". + - **Worker**: A worker is a 'server' that connects to the platform and receives runs. + - **Supervisor**: Pulls new runs from the queue, communicates with the platform, spins up new Deploy executors. + - **Checkpointer**: Responsible for checkpointing runs. + - **Deploy executor**: Container that comes from a specific deploy from a user's project. + - **Run controller**: The code that manages running the task. + - **Run executor**: The actual task running. + ## Run locking Many operations on the run are "atomic" in the sense that only a single operation can mutate them at a time. We use RedLock to create a distributed lock to ensure this. Postgres locking is not enough on its own because we have multiple API instances and Redis is used for the queue. From 2635c58508fdd8610da18f57c84f98e28ebccaf3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 6 Dec 2024 14:04:12 +0000 Subject: [PATCH 248/485] Removed BatchTaskRun changes from this branch, they were done in main --- .../migration.sql | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql b/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql index 46551cc7f9..6ef11749a7 100644 --- a/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql +++ b/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql @@ -2,7 +2,6 @@ Warnings: - You are about to drop the `WorkerGroup` table. If the table is not empty, all the data it contains will be lost. - - A unique constraint covering the columns `[runtimeEnvironmentId,idempotencyKey]` on the table `BatchTaskRun` will be added. If there are existing duplicate values, this will fail. - A unique constraint covering the columns `[workerGroupId,resourceIdentifier]` on the table `WorkerInstance` will be added. If there are existing duplicate values, this will fail. - Added the required column `resourceIdentifier` to the `WorkerInstance` table without a default value. This is not possible if the table is not empty. @@ -25,15 +24,9 @@ ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_tokenId_fkey"; -- DropForeignKey ALTER TABLE "WorkerInstance" DROP CONSTRAINT "WorkerInstance_workerGroupId_fkey"; --- DropIndex -DROP INDEX "BatchTaskRun_runtimeEnvironmentId_taskIdentifier_idempotenc_key"; - -- DropIndex DROP INDEX "WorkerInstance_workerGroupId_name_key"; --- AlterTable -ALTER TABLE "BatchTaskRun" ALTER COLUMN "taskIdentifier" DROP NOT NULL; - -- AlterTable ALTER TABLE "Project" ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1'; @@ -69,9 +62,6 @@ CREATE UNIQUE INDEX "WorkerInstanceGroup_masterQueue_key" ON "WorkerInstanceGrou -- CreateIndex CREATE UNIQUE INDEX "WorkerInstanceGroup_tokenId_key" ON "WorkerInstanceGroup"("tokenId"); --- CreateIndex -CREATE UNIQUE INDEX "BatchTaskRun_runtimeEnvironmentId_idempotencyKey_key" ON "BatchTaskRun"("runtimeEnvironmentId", "idempotencyKey"); - -- CreateIndex CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_resourceIdentifier_key" ON "WorkerInstance"("workerGroupId", "resourceIdentifier"); From fb6c33d40cd3c996d0f432e069bfc88a786e58cd Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 6 Dec 2024 15:28:34 +0000 Subject: [PATCH 249/485] Set the BatchTaskRun status to completed when all runs are completed --- .../run-engine/src/engine/index.ts | 99 ++++++++++++++++++- .../run-engine/src/engine/statuses.ts | 17 +++- .../src/engine/tests/batchTrigger.test.ts | 58 +++++++++++ 3 files changed, 169 insertions(+), 5 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 257447387e..2abe625c2c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -49,15 +49,15 @@ import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { runStatusFromError } from "./errors"; import { EventBusEvents } from "./eventBus"; -import { RunLocker } from "./locking"; -import { machinePresetFromConfig } from "./machinePresets"; -import { isDequeueableExecutionStatus, isExecuting } from "./statuses"; -import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; import { executionResultFromSnapshot, getExecutionSnapshotCompletedWaitpoints, getLatestExecutionSnapshot, } from "./executionSnapshots"; +import { RunLocker } from "./locking"; +import { machinePresetFromConfig } from "./machinePresets"; +import { isDequeueableExecutionStatus, isExecuting, isFinalRunStatus } from "./statuses"; +import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; const workerCatalog = { finishWaitpoint: { @@ -94,6 +94,12 @@ const workerCatalog = { }), visibilityTimeoutMs: 5000, }, + tryCompleteBatch: { + schema: z.object({ + batchId: z.string(), + }), + visibilityTimeoutMs: 10_000, + }, }; type EngineWorker = Worker; @@ -162,6 +168,9 @@ export class RunEngine { queueRunsWaitingForWorker: async ({ payload }) => { await this.#queueRunsWaitingForWorker({ backgroundWorkerId: payload.backgroundWorkerId }); }, + tryCompleteBatch: async ({ payload }) => { + await this.#tryCompleteBatch({ batchId: payload.batchId }); + }, }, }); @@ -1305,6 +1314,8 @@ export class RunEngine { } } + await this.#finalizeRun(run); + return executionResultFromSnapshot(newSnapshot); }); }); @@ -1967,6 +1978,7 @@ export class RunEngine { organizationId: true, }, }, + batchId: true, }, }); const newSnapshot = await getLatestExecutionSnapshot(prisma, runId); @@ -2005,6 +2017,8 @@ export class RunEngine { }, }); + await this.#finalizeRun(run); + return { attemptStatus: "RUN_FINISHED", snapshot: newSnapshot, @@ -2209,6 +2223,8 @@ export class RunEngine { }, }); + await this.#finalizeRun(run); + return { attemptStatus: "RUN_FINISHED", snapshot: newSnapshot, @@ -2817,6 +2833,81 @@ export class RunEngine { this.eventBus.emit("workerNotification", { time: new Date(), run: { id: runId } }); } + /* + * Whether the run succeeds, fails, is cancelled… we need to run these operations + */ + async #finalizeRun({ id, batchId }: { id: string; batchId: string | null }) { + if (batchId) { + await this.worker.enqueue({ + //this will debounce the call + id: `tryCompleteBatch:${batchId}`, + job: "tryCompleteBatch", + payload: { batchId: batchId }, + //2s in the future + availableAt: new Date(Date.now() + 2_000), + }); + } + } + + /** + * Checks to see if all runs for a BatchTaskRun are completed, if they are then update the status. + * This isn't used operationally, but it's used for the Batches dashboard page. + */ + async #tryCompleteBatch({ batchId }: { batchId: string }) { + return this.#trace( + "#tryCompleteBatch", + { + batchId, + }, + async (span) => { + const batch = await this.prisma.batchTaskRun.findUnique({ + select: { + status: true, + runtimeEnvironmentId: true, + }, + where: { + id: batchId, + }, + }); + + if (!batch) { + this.logger.error("#tryCompleteBatch batch doesn't exist", { batchId }); + return; + } + + if (batch.status === "COMPLETED") { + this.logger.debug("#tryCompleteBatch: Batch already completed", { batchId }); + return; + } + + const runs = await this.prisma.taskRun.findMany({ + select: { + id: true, + status: true, + }, + where: { + batchId, + runtimeEnvironmentId: batch.runtimeEnvironmentId, + }, + }); + + if (runs.every((r) => isFinalRunStatus(r.status))) { + this.logger.debug("#tryCompleteBatch: All runs are completed", { batchId }); + await this.prisma.batchTaskRun.update({ + where: { + id: batchId, + }, + data: { + status: "COMPLETED", + }, + }); + } else { + this.logger.debug("#tryCompleteBatch: Not all runs are completed", { batchId }); + } + } + ); + } + async #getAuthenticatedEnvironmentFromRun(runId: string, tx?: PrismaClientOrTransaction) { const prisma = tx ?? this.prisma; const taskRun = await prisma.taskRun.findUnique({ diff --git a/internal-packages/run-engine/src/engine/statuses.ts b/internal-packages/run-engine/src/engine/statuses.ts index 6b5f78a5be..bd3ae0b452 100644 --- a/internal-packages/run-engine/src/engine/statuses.ts +++ b/internal-packages/run-engine/src/engine/statuses.ts @@ -1,4 +1,4 @@ -import { TaskRunExecutionStatus } from "@trigger.dev/database"; +import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; export function isDequeueableExecutionStatus(status: TaskRunExecutionStatus): boolean { const dequeuableExecutionStatuses: TaskRunExecutionStatus[] = ["QUEUED"]; @@ -12,3 +12,18 @@ export function isExecuting(status: TaskRunExecutionStatus): boolean { ]; return executingExecutionStatuses.includes(status); } + +export function isFinalRunStatus(status: TaskRunStatus): boolean { + const finalStatuses: TaskRunStatus[] = [ + "CANCELED", + "INTERRUPTED", + "COMPLETED_SUCCESSFULLY", + "COMPLETED_WITH_ERRORS", + "SYSTEM_FAILURE", + "CRASHED", + "EXPIRED", + "TIMED_OUT", + ]; + + return finalStatuses.includes(status); +} diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index 7e1c756de2..f1025d4d0d 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -7,6 +7,7 @@ import { trace } from "@opentelemetry/api"; import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; +import { setTimeout } from "node:timers/promises"; describe("RunEngine batchTrigger", () => { containerTest( @@ -115,6 +116,63 @@ describe("RunEngine batchTrigger", () => { //check the queue length const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); expect(queueLength).toBe(2); + + //dequeue + const [d1, d2] = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run1.masterQueue, + maxRunCount: 10, + }); + + //attempts + const attempt1 = await engine.startRunAttempt({ + runId: d1.run.id, + snapshotId: d1.snapshot.id, + }); + const attempt2 = await engine.startRunAttempt({ + runId: d2.run.id, + snapshotId: d2.snapshot.id, + }); + + //complete the runs + const result1 = await engine.completeRunAttempt({ + runId: attempt1.run.id, + snapshotId: attempt1.snapshot.id, + completion: { + ok: true, + id: attempt1.run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + const result2 = await engine.completeRunAttempt({ + runId: attempt2.run.id, + snapshotId: attempt2.snapshot.id, + completion: { + ok: true, + id: attempt2.run.id, + output: `{"baz":"qux"}`, + outputType: "application/json", + }, + }); + + //the batch won't complete immediately + const batchAfter1 = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter1?.status).toBe("PENDING"); + + await setTimeout(3_000); + + //the batch should complete + const batchAfter2 = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter2?.status).toBe("COMPLETED"); } finally { engine.quit(); } From 7653c2212b8a0957d6fea0cffa5bd29a0ae7480e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 10 Dec 2024 16:24:12 +0000 Subject: [PATCH 250/485] When dequeuing respect passed in maxResources --- .../run-engine/src/engine/index.ts | 48 +++- .../src/engine/tests/dequeuing.test.ts | 209 ++++++++++++++++++ .../run-engine/src/run-queue/index.test.ts | 10 +- .../run-engine/src/run-queue/index.ts | 44 ++-- internal-packages/testcontainers/src/index.ts | 42 +++- internal-packages/testcontainers/src/setup.ts | 5 +- 6 files changed, 324 insertions(+), 34 deletions(-) create mode 100644 internal-packages/run-engine/src/engine/tests/dequeuing.test.ts diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2abe625c2c..80dc6a7865 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -470,6 +470,12 @@ export class RunEngine { return []; } + //we can't send more than the max resources + const consumedResources: MachineResources = { + cpu: 0, + memory: 0, + }; + const dequeuedRuns: DequeuedMessage[] = []; for (const message of messages) { @@ -559,7 +565,7 @@ export class RunEngine { ); //worker mismatch so put it back in the queue - await this.runQueue.nackMessage(orgId, runId); + await this.runQueue.nackMessage({ orgId, messageId: runId }); return null; } @@ -594,6 +600,36 @@ export class RunEngine { config: result.task.machineConfig ?? {}, }); + //increment the consumed resources + consumedResources.cpu += machinePreset.cpu; + consumedResources.memory += machinePreset.memory; + + //are we under the limit? + if (maxResources) { + if ( + consumedResources.cpu > maxResources.cpu || + consumedResources.memory > maxResources.memory + ) { + this.logger.debug( + "RunEngine.dequeueFromMasterQueue(): Consumed resources over limit, nacking", + { + runId, + consumedResources, + maxResources, + } + ); + + //put it back in the queue where it was + await this.runQueue.nackMessage({ + orgId, + messageId: runId, + incrementAttemptCount: false, + retryAt: result.run.createdAt.getTime() - result.run.priorityMs, + }); + return null; + } + } + //update the run const lockedTaskRun = await prisma.taskRun.update({ where: { @@ -655,7 +691,7 @@ export class RunEngine { ); //will auto-retry - const gotRequeued = await this.runQueue.nackMessage(orgId, runId); + const gotRequeued = await this.runQueue.nackMessage({ orgId, messageId: runId }); if (!gotRequeued) { await this.#systemFailure({ runId, @@ -746,7 +782,7 @@ export class RunEngine { orgId, } ); - await this.runQueue.nackMessage(orgId, runId); + await this.runQueue.nackMessage({ orgId, messageId: runId }); continue; } @@ -2300,7 +2336,11 @@ export class RunEngine { return await this.runLock.lock([run.id], 5000, async (signal) => { //we nack the message, this allows another work to pick up the run - const gotRequeued = await this.runQueue.nackMessage(orgId, run.id, timestamp); + const gotRequeued = await this.runQueue.nackMessage({ + orgId, + messageId: run.id, + retryAt: timestamp, + }); if (!gotRequeued) { const result = await this.#systemFailure({ diff --git a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts new file mode 100644 index 0000000000..13e91ef9f2 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts @@ -0,0 +1,209 @@ +import { + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "node:timers/promises"; +import { MinimalAuthenticatedEnvironment } from "../../shared/index.js"; +import { PrismaClientOrTransaction } from "@trigger.dev/database"; + +describe("RunEngine dequeuing", () => { + containerTest("Dequeues 5 runs", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the runs + const runs = await triggerRuns({ + engine, + environment: authenticatedEnvironment, + taskIdentifier, + prisma, + count: 10, + }); + expect(runs.length).toBe(10); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(10); + + //dequeue + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 5, + }); + + expect(dequeued.length).toBe(5); + } finally { + engine.quit(); + } + }); + + containerTest( + "Dequeues runs within machine constraints", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier, { + preset: "small-1x", + }); + + //trigger the runs + const runs = await triggerRuns({ + engine, + environment: authenticatedEnvironment, + taskIdentifier, + prisma, + count: 20, + }); + expect(runs.length).toBe(20); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(20); + + //dequeue + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 5, + maxResources: { + cpu: 1.1, + memory: 3.8, + }, + }); + expect(dequeued.length).toBe(2); + + //check the queue length + const queueLength2 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength2).toBe(18); + + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 10, + maxResources: { + cpu: 4.7, + memory: 3.0, + }, + }); + expect(dequeued2.length).toBe(6); + + //check the queue length + const queueLength3 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength3).toBe(12); + } finally { + engine.quit(); + } + } + ); +}); + +async function triggerRuns({ + engine, + environment, + taskIdentifier, + prisma, + count, +}: { + engine: RunEngine; + environment: MinimalAuthenticatedEnvironment; + taskIdentifier: string; + prisma: PrismaClientOrTransaction; + count: number; +}) { + const runs = []; + for (let i = 0; i < count; i++) { + runs[i] = await engine.trigger( + { + number: i, + friendlyId: generateFriendlyId("run"), + environment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${taskIdentifier}`, + isTest: false, + tags: [], + }, + prisma + ); + } + + return runs; +} diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 75af69b456..f914555d97 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -596,7 +596,10 @@ describe("RunQueue", () => { ); expect(taskConcurrency).toBe(1); - await queue.nackMessage(messages[0].message.orgId, messages[0].messageId); + await queue.nackMessage({ + orgId: messages[0].message.orgId, + messageId: messages[0].messageId, + }); //we need to wait because the default wait is 1 second await setTimeout(300); @@ -755,7 +758,10 @@ describe("RunQueue", () => { expect(exists).toBe(1); //nack (we only have attempts set to 1) - await queue.nackMessage(messages[0].message.orgId, messages[0].messageId); + await queue.nackMessage({ + orgId: messages[0].message.orgId, + messageId: messages[0].messageId, + }); //dequeue const messages2 = await queue.dequeueMessageFromMasterQueue("test_12345", "main", 10); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index dbbdd8d82e..e558ad8dfc 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -407,7 +407,17 @@ export class RunQueue { * Negative acknowledge a message, which will requeue the message (with an optional future date). If you pass no date it will get reattempted with exponential backoff. */ - public async nackMessage(orgId: string, messageId: string, retryAt?: number) { + public async nackMessage({ + orgId, + messageId, + retryAt, + incrementAttemptCount = true, + }: { + orgId: string; + messageId: string; + retryAt?: number; + incrementAttemptCount?: boolean; + }) { return this.#trace( "nackMessage", async (span) => { @@ -445,21 +455,23 @@ export class RunQueue { ); const envQueueKey = this.keys.envQueueKeyFromQueue(message.queue); - message.attempt = message.attempt + 1; - if (message.attempt >= maxAttempts) { - await this.redis.moveToDeadLetterQueue( - messageKey, - messageQueue, - concurrencyKey, - envConcurrencyKey, - projectConcurrencyKey, - envQueueKey, - taskConcurrencyKey, - "dlq", - messageId, - JSON.stringify(message.masterQueues) - ); - return false; + if (incrementAttemptCount) { + message.attempt = message.attempt + 1; + if (message.attempt >= maxAttempts) { + await this.redis.moveToDeadLetterQueue( + messageKey, + messageQueue, + concurrencyKey, + envConcurrencyKey, + projectConcurrencyKey, + envQueueKey, + taskConcurrencyKey, + "dlq", + messageId, + JSON.stringify(message.masterQueues) + ); + return false; + } } const nextRetryDelay = calculateNextRetryDelay(this.retryOptions, message.attempt); diff --git a/internal-packages/testcontainers/src/index.ts b/internal-packages/testcontainers/src/index.ts index acfe02b33e..ace5560d56 100644 --- a/internal-packages/testcontainers/src/index.ts +++ b/internal-packages/testcontainers/src/index.ts @@ -30,7 +30,12 @@ type Use = (value: T) => Promise; const network = async ({}, use: Use) => { const network = await new Network().start(); - await use(network); + try { + await use(network); + } finally { + // Make sure to stop the network after use + await network.stop(); + } }; const postgresContainer = async ( @@ -38,8 +43,11 @@ const postgresContainer = async ( use: Use ) => { const { container } = await createPostgresContainer(network); - await use(container); - await container.stop(); + try { + await use(container); + } finally { + await container.stop(); + } }; const prisma = async ( @@ -53,16 +61,22 @@ const prisma = async ( }, }, }); - await use(prisma); - await prisma.$disconnect(); + try { + await use(prisma); + } finally { + await prisma.$disconnect(); + } }; export const postgresTest = test.extend({ network, postgresContainer, prisma }); const redisContainer = async ({}, use: Use) => { const { container } = await createRedisContainer(); - await use(container); - await container.stop(); + try { + await use(container); + } finally { + await container.stop(); + } }; const redis = async ( @@ -74,8 +88,11 @@ const redis = async ( port: redisContainer.getPort(), password: redisContainer.getPassword(), }); - await use(redis); - await redis.quit(); + try { + await use(redis); + } finally { + await redis.quit(); + } }; export const redisTest = test.extend({ redisContainer, redis }); @@ -88,8 +105,11 @@ const electricOrigin = async ( use: Use ) => { const { origin, container } = await createElectricContainer(postgresContainer, network); - await use(origin); - await container.stop(); + try { + await use(origin); + } finally { + await container.stop(); + } }; export const containerTest = test.extend({ diff --git a/internal-packages/testcontainers/src/setup.ts b/internal-packages/testcontainers/src/setup.ts index f9e2cf5bae..cc7dada97d 100644 --- a/internal-packages/testcontainers/src/setup.ts +++ b/internal-packages/testcontainers/src/setup.ts @@ -3,6 +3,7 @@ import { generateFriendlyId, sanitizeQueueName, } from "@trigger.dev/core/v3/apps"; +import { MachineConfig } from "@trigger.dev/core/v3/schemas"; import { BackgroundWorkerTask, Prisma, @@ -63,7 +64,8 @@ export async function setupAuthenticatedEnvironment( export async function setupBackgroundWorker( prisma: PrismaClient, environment: AuthenticatedEnvironment, - taskIdentifier: string | string[] + taskIdentifier: string | string[], + machineConfig?: MachineConfig ) { const worker = await prisma.backgroundWorker.create({ data: { @@ -90,6 +92,7 @@ export async function setupBackgroundWorker( workerId: worker.id, runtimeEnvironmentId: environment.id, projectId: environment.project.id, + machineConfig, }, }); From ff369f34f440eef0bef3de268f148cc08cb75666 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 10 Dec 2024 16:51:17 +0000 Subject: [PATCH 251/485] Ported over the new run props: idempotencyKeyExpiresAt, versions, oneTimeUseToken, maxDurationInSeconds --- .../app/v3/services/triggerTaskV2.server.ts | 7 +++---- .../run-engine/src/engine/index.ts | 18 +++++++++++++++++- .../run-engine/src/engine/types.ts | 8 +++++++- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 78fec28670..98defa3441 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -285,10 +285,9 @@ export class TriggerTaskServiceV2 extends WithRunEngine { parentSpanId: options.parentAsLinkType === "replay" ? undefined : traceparent?.spanId, lockedToVersionId: lockedToBackgroundWorker?.id, - // TODO - // taskVersion: lockedToBackgroundWorker?.version, - // sdkVersion: lockedToBackgroundWorker?.sdkVersion, - // cliVersion: lockedToBackgroundWorker?.cliVersion, + taskVersion: lockedToBackgroundWorker?.version, + sdkVersion: lockedToBackgroundWorker?.sdkVersion, + cliVersion: lockedToBackgroundWorker?.cliVersion, concurrencyKey: body.options?.concurrencyKey, queueName, queue: body.options?.queue, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 80dc6a7865..8121927779 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -197,6 +197,7 @@ export class RunEngine { number, environment, idempotencyKey, + idempotencyKeyExpiresAt, taskIdentifier, payload, payloadType, @@ -206,6 +207,9 @@ export class RunEngine { spanId, parentSpanId, lockedToVersionId, + taskVersion, + sdkVersion, + cliVersion, concurrencyKey, masterQueue, queueName, @@ -226,6 +230,8 @@ export class RunEngine { metadataType, seedMetadata, seedMetadataType, + oneTimeUseToken, + maxDurationInSeconds, }: TriggerParams, tx?: PrismaClientOrTransaction ): Promise { @@ -257,6 +263,7 @@ export class RunEngine { runtimeEnvironmentId: environment.id, projectId: environment.project.id, idempotencyKey, + idempotencyKeyExpiresAt, taskIdentifier, payload, payloadType, @@ -266,6 +273,9 @@ export class RunEngine { spanId, parentSpanId, lockedToVersionId, + taskVersion, + sdkVersion, + cliVersion, concurrencyKey, queue: queueName, masterQueue, @@ -280,8 +290,10 @@ export class RunEngine { tags.length === 0 ? undefined : { - connect: tags.map((id) => ({ id })), + connect: tags, }, + runTags: tags.length === 0 ? undefined : tags.map((tag) => tag.name), + oneTimeUseToken, parentTaskRunId, rootTaskRunId, batchId, @@ -291,6 +303,7 @@ export class RunEngine { metadataType, seedMetadata, seedMetadataType, + maxDurationInSeconds, executionSnapshots: { create: { engine: "V2", @@ -642,6 +655,9 @@ export class RunEngine { startedAt: result.run.startedAt ?? new Date(), baseCostInCents: this.options.machines.baseCostInCents, machinePreset: machinePreset.name, + taskVersion: result.worker.version, + sdkVersion: result.worker.sdkVersion, + cliVersion: result.worker.cliVersion, maxDurationInSeconds: getMaxDuration( result.run.maxDurationInSeconds, result.task.maxDurationInSeconds diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index fa7e368734..36dd29ef74 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -43,6 +43,7 @@ export type TriggerParams = { number: number; environment: MinimalAuthenticatedEnvironment; idempotencyKey?: string; + idempotencyKeyExpiresAt?: Date; taskIdentifier: string; payload: string; payloadType: string; @@ -52,6 +53,9 @@ export type TriggerParams = { spanId: string; parentSpanId?: string; lockedToVersionId?: string; + taskVersion?: string; + sdkVersion?: string; + cliVersion?: string; concurrencyKey?: string; masterQueue: string; queueName: string; @@ -62,7 +66,7 @@ export type TriggerParams = { maxAttempts?: number; priorityMs?: number; ttl?: string; - tags: string[]; + tags: { id: string; name: string }[]; parentTaskRunId?: string; rootTaskRunId?: string; batchId?: string; @@ -72,4 +76,6 @@ export type TriggerParams = { metadataType?: string; seedMetadata?: string; seedMetadataType?: string; + oneTimeUseToken?: string; + maxDurationInSeconds?: number; }; From fcb4e3bafbd9a695118dfa7d53471bc9b1677a4d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 10 Dec 2024 16:53:46 +0000 Subject: [PATCH 252/485] =?UTF-8?q?Didn=E2=80=99t=20hit=20save=E2=80=A6=20?= =?UTF-8?q?the=20new=20props=20when=20triggering=20tasks=20passed=20throug?= =?UTF-8?q?h?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../app/v3/services/triggerTaskV2.server.ts | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 98defa3441..fffcb8a927 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -26,6 +26,7 @@ import { stringifyDuration } from "@trigger.dev/core/v3/apps"; import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; import { Prisma } from "@trigger.dev/database"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; +import { clampMaxDuration } from "../utils/maxDuration"; /** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV2 extends WithRunEngine { @@ -231,7 +232,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { span.setAttribute("queueName", queueName); //upsert tags - let tagIds: string[] = []; + let tags: { id: string; name: string }[] = []; const bodyTags = typeof body.options?.tags === "string" ? [body.options.tags] : body.options?.tags; if (bodyTags && bodyTags.length > 0) { @@ -241,7 +242,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { projectId: environment.projectId, }); if (tagRecord) { - tagIds.push(tagRecord.id); + tags.push(tagRecord); } } } @@ -273,8 +274,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { friendlyId: runFriendlyId, environment: environment, idempotencyKey, - // TODO - // idempotencyKeyExpiresAt: idempotencyKey ? idempotencyKeyExpiresAt : undefined, + idempotencyKeyExpiresAt: idempotencyKey ? idempotencyKeyExpiresAt : undefined, taskIdentifier: taskId, payload: payloadPacket.data ?? "", payloadType: payloadPacket.dataType, @@ -297,9 +297,8 @@ export class TriggerTaskServiceV2 extends WithRunEngine { queuedAt: delayUntil ? undefined : new Date(), maxAttempts: body.options?.maxAttempts, ttl, - tags: tagIds, - // TODO - // oneTimeUseToken: options.oneTimeUseToken, + tags, + oneTimeUseToken: options.oneTimeUseToken, parentTaskRunId: parentRun?.id, rootTaskRunId: parentRun?.rootTaskRunId ?? undefined, batchId: body.options?.parentBatch ?? undefined, @@ -309,12 +308,9 @@ export class TriggerTaskServiceV2 extends WithRunEngine { metadataType: metadataPacket?.dataType, seedMetadata: metadataPacket?.data, seedMetadataType: metadataPacket?.dataType, - // TODO - // maxDurationInSeconds: body.options?.maxDuration - // ? clampMaxDuration(body.options.maxDuration) - // : undefined, - // runTags: bodyTags, - // oneTimeUseToken: options.oneTimeUseToken, + maxDurationInSeconds: body.options?.maxDuration + ? clampMaxDuration(body.options.maxDuration) + : undefined, }, this._prisma ); From fd58ce6e4a7666afa7e568f3541a893f505156ce Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 11 Dec 2024 09:28:02 +0000 Subject: [PATCH 253/485] Idempotency expiration + waitpoint edge case --- .../app/v3/services/triggerTaskV2.server.ts | 59 +++++++++---------- .../run-engine/src/engine/index.ts | 34 ++++++++--- 2 files changed, 55 insertions(+), 38 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index fffcb8a927..02786922a6 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -44,7 +44,6 @@ export class TriggerTaskServiceV2 extends WithRunEngine { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); - // TODO: Add idempotency key expiring here const idempotencyKey = options.idempotencyKey ?? body.options?.idempotencyKey; const idempotencyKeyExpiresAt = options.idempotencyKeyExpiresAt ?? @@ -74,39 +73,39 @@ export class TriggerTaskServiceV2 extends WithRunEngine { if (existingRun) { span.setAttribute("runId", existingRun.friendlyId); - // TODO - // if ( - // existingRun.idempotencyKeyExpiresAt && - // existingRun.idempotencyKeyExpiresAt < new Date() - // ) { - // logger.debug("[TriggerTaskService][call] Idempotency key has expired", { - // idempotencyKey: options.idempotencyKey, - // run: existingRun, - // }); - - // // Update the existing batch to remove the idempotency key - // await this._prisma.taskRun.update({ - // where: { id: existingRun.id }, - // data: { idempotencyKey: null }, - // }); - // } - - //We're using `andWait` so we need to block the parent run with a waitpoint if ( - existingRun.associatedWaitpoint?.status === "PENDING" && - body.options?.resumeParentOnCompletion && - body.options?.parentRunId + existingRun.idempotencyKeyExpiresAt && + existingRun.idempotencyKeyExpiresAt < new Date() ) { - await this._engine.blockRunWithWaitpoint({ - runId: body.options.parentRunId, - waitpointId: existingRun.associatedWaitpoint.id, - environmentId: environment.id, - projectId: environment.projectId, - tx: this._prisma, + logger.debug("[TriggerTaskService][call] Idempotency key has expired", { + idempotencyKey: options.idempotencyKey, + run: existingRun, }); - } - return existingRun; + // Update the existing run to remove the idempotency key + await this._prisma.taskRun.update({ + where: { id: existingRun.id }, + data: { idempotencyKey: null }, + }); + } else { + //We're using `andWait` so we need to block the parent run with a waitpoint + if ( + existingRun.associatedWaitpoint?.status === "PENDING" && + body.options?.resumeParentOnCompletion && + body.options?.parentRunId + ) { + await this._engine.blockRunWithWaitpoint({ + runId: body.options.parentRunId, + waitpointId: existingRun.associatedWaitpoint.id, + environmentId: environment.id, + projectId: environment.projectId, + checkWaitpointIsPending: true, + tx: this._prisma, + }); + } + + return existingRun; + } } if (environment.type !== "DEVELOPMENT") { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8121927779..280e6ebf74 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1530,6 +1530,7 @@ export class RunEngine { waitpointId, projectId, failAfter, + checkWaitpointIsPending = false, tx, }: { runId: string; @@ -1537,19 +1538,13 @@ export class RunEngine { environmentId: string; projectId: string; failAfter?: Date; + /** If the waitpoint could be completed, i.e. not inside a run lock and not new */ + checkWaitpointIsPending?: boolean; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; return await this.runLock.lock([runId], 5000, async (signal) => { - const taskWaitpoint = await prisma.taskRunWaitpoint.create({ - data: { - taskRunId: runId, - waitpointId: waitpointId, - projectId: projectId, - }, - }); - let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; @@ -1560,6 +1555,29 @@ export class RunEngine { newStatus = "EXECUTING_WITH_WAITPOINTS"; } + if (checkWaitpointIsPending) { + const waitpoint = await prisma.waitpoint.findUnique({ + where: { id: waitpointId }, + }); + + if (!waitpoint) { + throw new ServiceValidationError("Waitpoint not found", 404); + } + + //the waitpoint has been completed since it was retrieved + if (waitpoint.status !== "PENDING") { + return snapshot; + } + } + + const taskWaitpoint = await prisma.taskRunWaitpoint.create({ + data: { + taskRunId: runId, + waitpointId: waitpointId, + projectId: projectId, + }, + }); + //if the state has changed, create a new snapshot if (newStatus !== snapshot.executionStatus) { snapshot = await this.#createExecutionSnapshot(prisma, { From d7a9e2b667ee1e5cee2c583d764d91609b5093c0 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 12 Dec 2024 12:21:17 +0000 Subject: [PATCH 254/485] WIP on creating checkpoint, parking for now --- .../run-engine/src/engine/index.ts | 18 +++++++++++++++++- .../run-engine/src/engine/statuses.ts | 12 ++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 280e6ebf74..2366dd0093 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -56,7 +56,12 @@ import { } from "./executionSnapshots"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; -import { isDequeueableExecutionStatus, isExecuting, isFinalRunStatus } from "./statuses"; +import { + isCheckpointable, + isDequeueableExecutionStatus, + isExecuting, + isFinalRunStatus, +} from "./statuses"; import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; const workerCatalog = { @@ -1739,6 +1744,17 @@ export class RunEngine { } //todo check the status is checkpointable + if (!isCheckpointable(snapshot.executionStatus)) { + this.logger.error("Tried to createCheckpoint on a run in an invalid state", { + snapshot, + }); + + //check if the server should already be shutting down, if so return a result saying it can shutdown but that there's no checkpoint + + //otherwise return a result saying it can't checkpoint with an error and execution status + + return; + } //create a new execution snapshot, with the checkpoint diff --git a/internal-packages/run-engine/src/engine/statuses.ts b/internal-packages/run-engine/src/engine/statuses.ts index bd3ae0b452..3ed80df993 100644 --- a/internal-packages/run-engine/src/engine/statuses.ts +++ b/internal-packages/run-engine/src/engine/statuses.ts @@ -13,6 +13,18 @@ export function isExecuting(status: TaskRunExecutionStatus): boolean { return executingExecutionStatuses.includes(status); } +export function isCheckpointable(status: TaskRunExecutionStatus): boolean { + const checkpointableStatuses: TaskRunExecutionStatus[] = [ + //will allow checkpoint starts + "RUN_CREATED", + "QUEUED", + //executing + "EXECUTING", + "EXECUTING_WITH_WAITPOINTS", + ]; + return checkpointableStatuses.includes(status); +} + export function isFinalRunStatus(status: TaskRunStatus): boolean { const finalStatuses: TaskRunStatus[] = [ "CANCELED", From b95dd631a703f67a4bcb24a8b2757ce4d3109e54 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 9 Dec 2024 09:40:29 +0000 Subject: [PATCH 255/485] fix worker routes --- apps/webapp/app/routes/api.v1.worker-actions.connect.ts | 2 +- apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts | 2 +- apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts | 2 +- ...tions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts | 2 +- ...-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts | 2 +- ...orker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts | 2 +- apps/webapp/app/routes/api.v1.workers.ts | 1 + 7 files changed, 7 insertions(+), 6 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts index be666b2403..5b74c0033a 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody, -} from "@trigger.dev/worker/schemas"; +} from "@trigger.dev/worker"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts index 027b170d7d..fd19968fb1 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker/schemas"; +import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const loader = createLoaderWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts index 6f930c8b3a..332e5396ff 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiConnectResponseBody, WorkerApiHeartbeatRequestBody, -} from "@trigger.dev/worker/schemas"; +} from "@trigger.dev/worker"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts index e0860949d7..cabd1b37f9 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiRunAttemptCompleteRequestBody, WorkerApiRunAttemptCompleteResponseBody, -} from "@trigger.dev/worker/schemas"; +} from "@trigger.dev/worker"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts index 6babc1c0fd..37cd18c63c 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiRunAttemptStartResponseBody } from "@trigger.dev/worker/schemas"; +import { WorkerApiRunAttemptStartResponseBody } from "@trigger.dev/worker"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts index 125a351c6a..226ab12373 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkloadHeartbeatResponseBody } from "@trigger.dev/worker/schemas"; +import { WorkloadHeartbeatResponseBody } from "@trigger.dev/worker"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.workers.ts b/apps/webapp/app/routes/api.v1.workers.ts index 8655f7f512..8625ae959d 100644 --- a/apps/webapp/app/routes/api.v1.workers.ts +++ b/apps/webapp/app/routes/api.v1.workers.ts @@ -13,6 +13,7 @@ import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.serv export const loader = createLoaderApiRoute( { corsStrategy: "all", + findResource: async () => 1, // This is a dummy function, we don't need to find a resource }, async ({ authentication }): Promise> => { const service = new WorkerGroupService(); From d434746e4bbcefa1cd58a46c40469af5f14ee891 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:04:54 +0000 Subject: [PATCH 256/485] upgrade webapp node types to support generic event emitter --- apps/webapp/package.json | 2 +- pnpm-lock.yaml | 265 ++++++++++----------------------------- 2 files changed, 67 insertions(+), 200 deletions(-) diff --git a/apps/webapp/package.json b/apps/webapp/package.json index 9c63f5bc11..3488e24a93 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -208,7 +208,7 @@ "@types/lodash.omit": "^4.5.7", "@types/marked": "^4.0.3", "@types/morgan": "^1.9.3", - "@types/node": "^18.11.15", + "@types/node": "20.14.14", "@types/node-fetch": "^2.6.2", "@types/prismjs": "^1.26.0", "@types/qs": "^6.9.7", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index db285d6017..6bcfe58105 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -660,7 +660,7 @@ importers: version: link:../../internal-packages/testcontainers '@remix-run/dev': specifier: 2.1.0 - version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.2.2) + version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.2.2) '@remix-run/eslint-config': specifier: 2.1.0 version: 2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.2.2) @@ -713,8 +713,8 @@ importers: specifier: ^1.9.3 version: 1.9.4 '@types/node': - specifier: ^18.11.15 - version: 18.11.18 + specifier: 20.14.14 + version: 20.14.14 '@types/node-fetch': specifier: ^2.6.2 version: 2.6.2 @@ -834,7 +834,7 @@ importers: version: 3.4.1(ts-node@10.9.1) ts-node: specifier: ^10.7.0 - version: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + version: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) tsconfig-paths: specifier: ^3.14.1 version: 3.14.1 @@ -846,7 +846,7 @@ importers: version: 4.0.5(typescript@5.2.2) vitest: specifier: ^1.4.0 - version: 1.4.0(@types/node@18.11.18) + version: 1.4.0(@types/node@20.14.14) docs: {} @@ -6846,7 +6846,7 @@ packages: engines: {node: ^8.13.0 || >=10.10.0} dependencies: '@grpc/proto-loader': 0.7.7 - '@types/node': 18.19.20 + '@types/node': 20.14.14 /@grpc/proto-loader@0.7.7: resolution: {integrity: sha512-1TIeXOi8TuSCQprPItwoMymZXxWT0CPxUhkrkeCUH+D8U7QDwQ6b7SUz2MaLuWM2llT+J/TVFLmQI5KtML3BhQ==} @@ -8662,7 +8662,7 @@ packages: engines: {node: '>=16'} hasBin: true dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 playwright-core: 1.37.0 optionalDependencies: fsevents: 2.3.2 @@ -13836,7 +13836,7 @@ packages: - encoding dev: false - /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.2.2): + /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.2.2): resolution: {integrity: sha512-Hn5lw46F+a48dp5uHKe68ckaHgdStW4+PmLod+LMFEqrMbkF0j4XD1ousebxlv989o0Uy/OLgfRMgMy4cBOvHg==} engines: {node: '>=18.0.0'} hasBin: true @@ -13861,7 +13861,7 @@ packages: '@remix-run/serve': 2.1.0(typescript@5.2.2) '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) '@types/mdx': 2.0.5 - '@vanilla-extract/integration': 6.2.1(@types/node@18.11.18) + '@vanilla-extract/integration': 6.2.1(@types/node@20.14.14) arg: 5.0.2 cacache: 17.1.4 chalk: 4.1.2 @@ -14418,7 +14418,7 @@ packages: resolution: {integrity: sha512-DTuBFbqu4gGfajREEMrkq5jBhcnskinhr4+AnfJEk48zhVeEv3XnUKGIX98B74kxhYsIMfApGGySTn7V3b5yBA==} engines: {node: '>= 12.13.0', npm: '>= 6.12.0'} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: false /@slack/types@2.8.0: @@ -14433,7 +14433,7 @@ packages: '@slack/logger': 3.0.0 '@slack/types': 2.8.0 '@types/is-stream': 1.1.0 - '@types/node': 18.19.20 + '@types/node': 20.14.14 axios: 0.27.2 eventemitter3: 3.1.2 form-data: 2.5.1 @@ -15334,7 +15334,7 @@ packages: resolution: {integrity: sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==} dependencies: '@types/connect': 3.4.35 - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/bun@1.1.6: @@ -15370,7 +15370,7 @@ packages: /@types/connect@3.4.35: resolution: {integrity: sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/cookie@0.4.1: @@ -15386,12 +15386,12 @@ packages: /@types/cors@2.8.17: resolution: {integrity: sha512-8CGDvrBj1zgo2qE+oS3pOCyYNqCPryMWY2bGfwA0dcfopWGgxs+78df0Rs3rc9THP4JkOhLsAa+15VdpAqkcUA==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 /@types/cross-spawn@6.0.2: resolution: {integrity: sha512-KuwNhp3eza+Rhu8IFI5HUXRP0LIhqH5cAjubUvGXXthh4YYBuP2ntwEX+Cz8GJoZUHlKo247wPWOfA9LYEq4cw==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/d3-array@3.0.8: @@ -15459,7 +15459,7 @@ packages: /@types/docker-modem@3.0.6: resolution: {integrity: sha512-yKpAGEuKRSS8wwx0joknWxsmLha78wNMe9R2S3UNsVOkZded8UqOrV8KoeDXoXsjndxwyF3eIhyClGbO1SEhEg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@types/ssh2': 1.15.1 dev: true @@ -15467,7 +15467,7 @@ packages: resolution: {integrity: sha512-42R9eoVqJDSvVspV89g7RwRqfNExgievLNWoHkg7NoWIqAmavIbgQBb4oc0qRtHkxE+I3Xxvqv7qVXFABKPBTg==} dependencies: '@types/docker-modem': 3.0.6 - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@types/ssh2': 1.15.1 dev: true @@ -15502,7 +15502,7 @@ packages: /@types/express-serve-static-core@4.17.32: resolution: {integrity: sha512-aI5h/VOkxOF2Z1saPy0Zsxs5avets/iaiAJYznQFm5By/pamU31xWKL//epiF4OfUA2qTOc9PV6tCUjhO8wlZA==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@types/qs': 6.9.7 '@types/range-parser': 1.2.4 dev: true @@ -15519,7 +15519,7 @@ packages: /@types/fluent-ffmpeg@2.1.26: resolution: {integrity: sha512-0JVF3wdQG+pN0ImwWD0bNgJiKF2OHg/7CDBHw5UIbRTvlnkgGHK6V5doE54ltvhud4o31/dEiHm23CAlxFiUQg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/gradient-string@1.1.2: @@ -15541,7 +15541,7 @@ packages: /@types/interpret@1.1.3: resolution: {integrity: sha512-uBaBhj/BhilG58r64mtDb/BEdH51HIQLgP5bmWzc5qCtFMja8dCk/IOJmk36j0lbi9QHwI6sbtUNGuqXdKCAtQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: false /@types/invariant@2.2.37: @@ -15557,7 +15557,7 @@ packages: /@types/is-stream@1.1.0: resolution: {integrity: sha512-jkZatu4QVbR60mpIzjINmtS1ZF4a/FqdTUTBeQDVOQ2PYyidtwFKr0B5G6ERukKwliq+7mIXvxyppwzG5EgRYg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: false /@types/js-cookie@2.2.7: @@ -15585,7 +15585,7 @@ packages: /@types/keyv@3.1.4: resolution: {integrity: sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/lodash.omit@4.5.7: @@ -15630,7 +15630,7 @@ packages: /@types/morgan@1.9.4: resolution: {integrity: sha512-cXoc4k+6+YAllH3ZHmx4hf7La1dzUk6keTR4bF4b4Sc0mZxU/zK4wO7l+ZzezXm/jkYj/qC+uYGZrarZdIVvyQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/ms@0.7.31: @@ -15639,34 +15639,31 @@ packages: /@types/mute-stream@0.0.4: resolution: {integrity: sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: false /@types/node-fetch@2.6.2: resolution: {integrity: sha512-DHqhlq5jeESLy19TYhLakJ07kNumXWjcDdxXsLUMJZ6ue8VZJj4kLPQVE/2mdHh3xZziNF1xppu5lwmS53HR+A==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 form-data: 3.0.1 dev: true /@types/node-fetch@2.6.4: resolution: {integrity: sha512-1ZX9fcN4Rvkvgv4E6PAY5WXUFWFcRWxZa3EW83UjycOB9ljJCedb2CupIP4RZMEwF/M3eTcCihbBRgwtGbg5Rg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 form-data: 3.0.1 dev: false /@types/node-forge@1.3.10: resolution: {integrity: sha512-y6PJDYN4xYBxwd22l+OVH35N+1fCYWiuC3aiP2SlXVE6Lo7SS+rSx9r89hLxrP4pn6n1lBGhHJ12pj3F3Mpttw==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 /@types/node@12.20.55: resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} - /@types/node@18.11.18: - resolution: {integrity: sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==} - /@types/node@18.17.1: resolution: {integrity: sha512-xlR1jahfizdplZYRU59JlUx9uzF1ARa8jbhM11ccpCJya8kvos5jwdm2ZAgxSCwOl0fq21svP18EVwPBXMQudw==} dev: true @@ -15710,7 +15707,7 @@ packages: /@types/pg@8.11.6: resolution: {integrity: sha512-/2WmmBXHLsfRqzfHW7BNZ8SbYzE8OSk7i3WjFYvfgRHj7S1xj+16Je5fUKv3lVdVzk/zn9TXOqf+avFCFIE0yQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 pg-protocol: 1.6.1 pg-types: 4.0.2 dev: false @@ -15718,7 +15715,7 @@ packages: /@types/pg@8.6.6: resolution: {integrity: sha512-O2xNmXebtwVekJDD+02udOncjVcMZQuTEQEMpKJ0ZRf5E7/9JJX3izhKUcUifBkyKpljyUM6BTgy2trmviKlpw==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 pg-protocol: 1.6.1 pg-types: 2.2.0 @@ -15771,7 +15768,7 @@ packages: /@types/readable-stream@4.0.14: resolution: {integrity: sha512-xZn/AuUbCMShGsqH/ehZtGDwQtbx00M9rZ2ENLe4tOjFZ/JFeWMhEZkk2fEe1jAUqqEAURIkFJ7Az/go8mM1/w==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 safe-buffer: 5.1.2 dev: true @@ -15783,7 +15780,7 @@ packages: resolution: {integrity: sha512-G3sY+NpsA9jnwm0ixhAFQSJ3Q9JkpLZpJbI3GMv0mIAT0y3mRabYeINzal5WOChIiaTEGQYlHOKgkaM9EisWHw==} dependencies: '@types/caseless': 0.12.5 - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@types/tough-cookie': 4.0.5 form-data: 2.5.1 dev: false @@ -15795,7 +15792,7 @@ packages: /@types/responselike@1.0.0: resolution: {integrity: sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/retry@0.12.0: @@ -15830,7 +15827,7 @@ packages: resolution: {integrity: sha512-z5xyF6uh8CbjAu9760KDKsH2FcDxZ2tFCsA4HIMWE6IkiYMXfVoa+4f9KX+FN0ZLsaMw1WNG2ETLA6N+/YA+cg==} dependencies: '@types/mime': 3.0.1 - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/shimmer@1.0.2: @@ -15853,13 +15850,13 @@ packages: /@types/ssh2-streams@0.1.12: resolution: {integrity: sha512-Sy8tpEmCce4Tq0oSOYdfqaBpA3hDM8SoxoFh5vzFsu2oL+znzGz8oVWW7xb4K920yYMUY+PIG31qZnFMfPWNCg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/ssh2@0.5.52: resolution: {integrity: sha512-lbLLlXxdCZOSJMCInKH2+9V/77ET2J6NPQHpFI0kda61Dd1KglJs+fPQBchizmzYSOJBgdTajhPqBO1xxLywvg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@types/ssh2-streams': 0.1.12 dev: true @@ -15878,7 +15875,7 @@ packages: dependencies: '@types/cookiejar': 2.1.5 '@types/methods': 1.1.4 - '@types/node': 18.19.20 + '@types/node': 20.14.14 form-data: 4.0.0 dev: true @@ -15892,7 +15889,7 @@ packages: /@types/tar@6.1.4: resolution: {integrity: sha512-Cp4oxpfIzWt7mr2pbhHT2OTXGMAL0szYCzuf8lRWyIMCgsx6/Hfc3ubztuhvzXHXgraTQxyOCmmg7TDGIMIJJQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 minipass: 4.0.0 dev: true @@ -15914,7 +15911,7 @@ packages: /@types/webpack@5.28.5(@swc/core@1.3.101)(esbuild@0.19.11): resolution: {integrity: sha512-wR87cgvxj3p6D0Crt1r5avwqffqPXUkNlnQ1mjU93G7gCuFjufZR4I6j8cz5g1F1tTYpfOOFvly+cmIQwL9wvw==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 tapable: 2.2.1 webpack: 5.88.2(@swc/core@1.3.101)(esbuild@0.19.11) transitivePeerDependencies: @@ -15931,25 +15928,25 @@ packages: /@types/ws@8.5.10: resolution: {integrity: sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 /@types/ws@8.5.12: resolution: {integrity: sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/ws@8.5.4: resolution: {integrity: sha512-zdQDHKUgcX/zBc4GrwsE/7dVdAD8JR4EuiAXiiUhhfyIJXXb2+PrGshFyeXWQPMmmZ2XxgaqclgpIC7eTXc1mg==} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: true /@types/yauzl@2.10.3: resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} requiresBuild: true dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 dev: false optional: true @@ -16239,7 +16236,7 @@ packages: outdent: 0.8.0 dev: true - /@vanilla-extract/integration@6.2.1(@types/node@18.11.18): + /@vanilla-extract/integration@6.2.1(@types/node@20.14.14): resolution: {integrity: sha512-+xYJz07G7TFAMZGrOqArOsURG+xcYvqctujEkANjw2McCBvGEK505RxQqOuNiA9Mi9hgGdNp2JedSa94f3eoLg==} dependencies: '@babel/core': 7.22.17 @@ -16253,8 +16250,8 @@ packages: lodash: 4.17.21 mlly: 1.7.1 outdent: 0.8.0 - vite: 4.4.9(@types/node@18.11.18) - vite-node: 0.28.5(@types/node@18.11.18) + vite: 4.4.9(@types/node@20.14.14) + vite-node: 0.28.5(@types/node@20.14.14) transitivePeerDependencies: - '@types/node' - less @@ -19507,7 +19504,7 @@ packages: dependencies: '@types/cookie': 0.4.1 '@types/cors': 2.8.17 - '@types/node': 18.19.20 + '@types/node': 20.14.14 accepts: 1.3.8 base64id: 2.0.0 cookie: 0.4.2 @@ -22716,7 +22713,7 @@ packages: resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==} engines: {node: '>= 10.13.0'} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 merge-stream: 2.0.0 supports-color: 8.1.1 @@ -25625,7 +25622,7 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.29 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) yaml: 2.3.1 dev: true @@ -25643,7 +25640,7 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.44 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) yaml: 2.3.1 /postcss-loader@8.1.1(postcss@8.4.44)(typescript@5.2.2)(webpack@5.88.2): @@ -26203,7 +26200,7 @@ packages: '@protobufjs/path': 1.1.2 '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 - '@types/node': 18.19.20 + '@types/node': 20.14.14 long: 5.2.3 /proxy-addr@2.0.7: @@ -28569,7 +28566,7 @@ packages: resolution: {integrity: sha512-cYjgBM2SY/dTm8Lr6eMyyONaHTZHA/QjHxFUIW5WH8FevSRIGAVtXEmBkUXF1fsqe7QvvRgQSGSJZmjDacegGg==} engines: {node: '>=12.*'} dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 qs: 6.11.0 dev: false @@ -29375,7 +29372,7 @@ packages: /ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} - /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2): + /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2): resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} hasBin: true peerDependencies: @@ -29395,7 +29392,7 @@ packages: '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.3 - '@types/node': 18.11.18 + '@types/node': 20.14.14 acorn: 8.10.0 acorn-walk: 8.2.0 arg: 4.1.3 @@ -30439,31 +30436,7 @@ packages: d3-timer: 3.0.1 dev: false - /vite-node@0.28.5(@types/node@18.11.18): - resolution: {integrity: sha512-LmXb9saMGlrMZbXTvOveJKwMTBTNUH66c8rJnQ0ZPNX+myPEol64+szRzXtV5ORb0Hb/91yq+/D3oERoyAt6LA==} - engines: {node: '>=v14.16.0'} - hasBin: true - dependencies: - cac: 6.7.14 - debug: 4.3.7 - mlly: 1.7.1 - pathe: 1.1.2 - picocolors: 1.0.1 - source-map: 0.6.1 - source-map-support: 0.5.21 - vite: 4.4.9(@types/node@18.11.18) - transitivePeerDependencies: - - '@types/node' - - less - - lightningcss - - sass - - stylus - - sugarss - - supports-color - - terser - dev: true - - /vite-node@0.28.5(@types/node@18.19.20): + /vite-node@0.28.5(@types/node@20.14.14): resolution: {integrity: sha512-LmXb9saMGlrMZbXTvOveJKwMTBTNUH66c8rJnQ0ZPNX+myPEol64+szRzXtV5ORb0Hb/91yq+/D3oERoyAt6LA==} engines: {node: '>=v14.16.0'} hasBin: true @@ -30475,7 +30448,7 @@ packages: picocolors: 1.0.1 source-map: 0.6.1 source-map-support: 0.5.21 - vite: 4.4.9(@types/node@18.19.20) + vite: 4.4.9(@types/node@20.14.14) transitivePeerDependencies: - '@types/node' - less @@ -30487,7 +30460,7 @@ packages: - terser dev: true - /vite-node@1.4.0(@types/node@18.11.18): + /vite-node@1.4.0(@types/node@20.14.14): resolution: {integrity: sha512-VZDAseqjrHgNd4Kh8icYHWzTKSCZMhia7GyHfhtzLW33fZlG9SwsB6CEhgyVOWkJfJ2pFLrp/Gj1FSfAiqH9Lw==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -30496,7 +30469,7 @@ packages: debug: 4.3.7 pathe: 1.1.2 picocolors: 1.0.1 - vite: 5.2.7(@types/node@18.11.18) + vite: 5.2.7(@types/node@20.14.14) transitivePeerDependencies: - '@types/node' - less @@ -30572,40 +30545,6 @@ packages: - typescript dev: true - /vite@4.1.4(@types/node@18.19.20): - resolution: {integrity: sha512-3knk/HsbSTKEin43zHu7jTwYWv81f8kgAL99G5NWBcA1LKvtvcVAC4JjBH1arBunO9kQka+1oGbrMKOjk4ZrBg==} - engines: {node: ^14.18.0 || >=16.0.0} - hasBin: true - peerDependencies: - '@types/node': '>= 14' - less: '*' - sass: '*' - stylus: '*' - sugarss: '*' - terser: ^5.4.0 - peerDependenciesMeta: - '@types/node': - optional: true - less: - optional: true - sass: - optional: true - stylus: - optional: true - sugarss: - optional: true - terser: - optional: true - dependencies: - '@types/node': 18.19.20 - esbuild: 0.16.17 - postcss: 8.4.44 - resolve: 1.22.8 - rollup: 3.10.0 - optionalDependencies: - fsevents: 2.3.3 - dev: true - /vite@4.1.4(@types/node@20.14.14): resolution: {integrity: sha512-3knk/HsbSTKEin43zHu7jTwYWv81f8kgAL99G5NWBcA1LKvtvcVAC4JjBH1arBunO9kQka+1oGbrMKOjk4ZrBg==} engines: {node: ^14.18.0 || >=16.0.0} @@ -30640,43 +30579,7 @@ packages: fsevents: 2.3.3 dev: true - /vite@4.4.9(@types/node@18.11.18): - resolution: {integrity: sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA==} - engines: {node: ^14.18.0 || >=16.0.0} - hasBin: true - peerDependencies: - '@types/node': '>= 14' - less: '*' - lightningcss: ^1.21.0 - sass: '*' - stylus: '*' - sugarss: '*' - terser: ^5.4.0 - peerDependenciesMeta: - '@types/node': - optional: true - less: - optional: true - lightningcss: - optional: true - sass: - optional: true - stylus: - optional: true - sugarss: - optional: true - terser: - optional: true - dependencies: - '@types/node': 18.11.18 - esbuild: 0.18.11 - postcss: 8.4.44 - rollup: 3.29.1 - optionalDependencies: - fsevents: 2.3.3 - dev: true - - /vite@4.4.9(@types/node@18.19.20): + /vite@4.4.9(@types/node@20.14.14): resolution: {integrity: sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA==} engines: {node: ^14.18.0 || >=16.0.0} hasBin: true @@ -30704,7 +30607,7 @@ packages: terser: optional: true dependencies: - '@types/node': 18.19.20 + '@types/node': 20.14.14 esbuild: 0.18.11 postcss: 8.4.44 rollup: 3.29.1 @@ -30712,42 +30615,6 @@ packages: fsevents: 2.3.3 dev: true - /vite@5.2.7(@types/node@18.11.18): - resolution: {integrity: sha512-k14PWOKLI6pMaSzAuGtT+Cf0YmIx12z9YGon39onaJNy8DLBfBJrzg9FQEmkAM5lpHBZs9wksWAsyF/HkpEwJA==} - engines: {node: ^18.0.0 || >=20.0.0} - hasBin: true - peerDependencies: - '@types/node': ^18.0.0 || >=20.0.0 - less: '*' - lightningcss: ^1.21.0 - sass: '*' - stylus: '*' - sugarss: '*' - terser: ^5.4.0 - peerDependenciesMeta: - '@types/node': - optional: true - less: - optional: true - lightningcss: - optional: true - sass: - optional: true - stylus: - optional: true - sugarss: - optional: true - terser: - optional: true - dependencies: - '@types/node': 18.11.18 - esbuild: 0.20.2 - postcss: 8.4.44 - rollup: 4.13.2 - optionalDependencies: - fsevents: 2.3.3 - dev: true - /vite@5.2.7(@types/node@20.14.14): resolution: {integrity: sha512-k14PWOKLI6pMaSzAuGtT+Cf0YmIx12z9YGon39onaJNy8DLBfBJrzg9FQEmkAM5lpHBZs9wksWAsyF/HkpEwJA==} engines: {node: ^18.0.0 || >=20.0.0} @@ -30808,7 +30675,7 @@ packages: dependencies: '@types/chai': 4.3.4 '@types/chai-subset': 1.3.3 - '@types/node': 18.19.20 + '@types/node': 20.14.14 '@vitest/expect': 0.28.5 '@vitest/runner': 0.28.5 '@vitest/spy': 0.28.5 @@ -30827,8 +30694,8 @@ packages: tinybench: 2.3.1 tinypool: 0.3.1 tinyspy: 1.0.2 - vite: 4.1.4(@types/node@18.19.20) - vite-node: 0.28.5(@types/node@18.19.20) + vite: 4.1.4(@types/node@20.14.14) + vite-node: 0.28.5(@types/node@20.14.14) why-is-node-running: 2.2.2 transitivePeerDependencies: - less @@ -30840,7 +30707,7 @@ packages: - terser dev: true - /vitest@1.4.0(@types/node@18.11.18): + /vitest@1.4.0(@types/node@20.14.14): resolution: {integrity: sha512-gujzn0g7fmwf83/WzrDTnncZt2UiXP41mHuFYFrdwaLRVQ6JYQEiME2IfEjU3vcFL3VKa75XhI3lFgn+hfVsQw==} engines: {node: ^18.0.0 || >=20.0.0} hasBin: true @@ -30865,7 +30732,7 @@ packages: jsdom: optional: true dependencies: - '@types/node': 18.11.18 + '@types/node': 20.14.14 '@vitest/expect': 1.4.0 '@vitest/runner': 1.4.0 '@vitest/snapshot': 1.4.0 @@ -30883,8 +30750,8 @@ packages: strip-literal: 2.1.0 tinybench: 2.6.0 tinypool: 0.8.3 - vite: 5.2.7(@types/node@18.11.18) - vite-node: 1.4.0(@types/node@18.11.18) + vite: 5.2.7(@types/node@20.14.14) + vite-node: 1.4.0(@types/node@20.14.14) why-is-node-running: 2.2.2 transitivePeerDependencies: - less From eb696419daf392986805948eeae4a12ec9bd17eb Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:07:03 +0000 Subject: [PATCH 257/485] separate event bus handler singleton and run failure alerts --- apps/webapp/app/entry.server.tsx | 3 + apps/webapp/app/v3/runEngine.server.ts | 261 --------------- .../webapp/app/v3/runEngineHandlers.server.ts | 296 ++++++++++++++++++ 3 files changed, 299 insertions(+), 261 deletions(-) create mode 100644 apps/webapp/app/v3/runEngineHandlers.server.ts diff --git a/apps/webapp/app/entry.server.tsx b/apps/webapp/app/entry.server.tsx index 4a4330c9f2..a937c24ba7 100644 --- a/apps/webapp/app/entry.server.tsx +++ b/apps/webapp/app/entry.server.tsx @@ -205,6 +205,8 @@ process.on("uncaughtException", (error, origin) => { const sqsEventConsumer = singleton("sqsEventConsumer", getSharedSqsEventConsumer); +singleton("RunEngineEventBusHandlers", registerRunEngineEventBusHandlers); + export { apiRateLimiter } from "./services/apiRateLimit.server"; export { socketIo } from "./v3/handleSocketIo.server"; export { wss } from "./v3/handleWebsockets.server"; @@ -214,6 +216,7 @@ import { eventLoopMonitor } from "./eventLoopMonitor.server"; import { env } from "./env.server"; import { logger } from "./services/logger.server"; import { Prisma } from "./db.server"; +import { registerRunEngineEventBusHandlers } from "./v3/runEngineHandlers.server"; if (env.EVENT_LOOP_MONITOR_ENABLED === "1") { eventLoopMonitor.enable(); diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index e67098661e..fffc38f6b1 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -3,12 +3,6 @@ import { prisma } from "~/db.server"; import { env } from "~/env.server"; import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; -import { createExceptionPropertiesFromError, eventRepository } from "./eventRepository.server"; -import { createJsonErrorObject, sanitizeError } from "@trigger.dev/core/v3"; -import { logger } from "~/services/logger.server"; -import { safeJsonParse } from "~/utils/json"; -import type { Attributes } from "@opentelemetry/api"; -import { reportInvocationUsage } from "~/services/platform.v3.server"; export const engine = singleton("RunEngine", createRunEngine); @@ -45,260 +39,5 @@ function createRunEngine() { tracer, }); - engine.eventBus.on("runSucceeded", async ({ time, run }) => { - try { - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: false, - output: - run.outputType === "application/store" || run.outputType === "text/plain" - ? run.output - : run.output - ? (safeJsonParse(run.output) as Attributes) - : undefined, - outputType: run.outputType, - }, - }); - - if (!completedEvent) { - logger.error("[runFailed] Failed to complete event for unknown reason", { - runId: run.id, - spanId: run.spanId, - }); - return; - } - } catch (error) { - logger.error("[runSucceeded] Failed to complete event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - }); - } - }); - - engine.eventBus.on("runFailed", async ({ time, run }) => { - try { - const sanitizedError = sanitizeError(run.error); - const exception = createExceptionPropertiesFromError(sanitizedError); - - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception, - }, - }, - ], - }); - - if (!completedEvent) { - logger.error("[runFailed] Failed to complete event for unknown reason", { - runId: run.id, - spanId: run.spanId, - }); - return; - } - - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: completedEvent?.runId, - }); - - await Promise.all( - inProgressEvents.map((event) => { - try { - const completedEvent = eventRepository.completeEvent(event.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception, - }, - }, - ], - }); - - if (!completedEvent) { - logger.error("[runFailed] Failed to complete in-progress event for unknown reason", { - runId: run.id, - spanId: run.spanId, - eventId: event.id, - }); - return; - } - } catch (error) { - logger.error("[runFailed] Failed to complete in-progress event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - eventId: event.id, - }); - } - }) - ); - } catch (error) { - logger.error("[runFailed] Failed to complete event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - }); - } - }); - - engine.eventBus.on("runExpired", async ({ time, run }) => { - try { - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception: { - message: `Run expired because the TTL (${run.ttl}) was reached`, - }, - }, - }, - ], - }); - - if (!completedEvent) { - logger.error("[runFailed] Failed to complete event for unknown reason", { - runId: run.id, - spanId: run.spanId, - }); - return; - } - } catch (error) { - logger.error("[runExpired] Failed to complete event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - }); - } - }); - - engine.eventBus.on("runCancelled", async ({ time, run }) => { - try { - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: run.friendlyId, - }); - - await Promise.all( - inProgressEvents.map((event) => { - const error = createJsonErrorObject(run.error); - return eventRepository.cancelEvent(event, time, error.message); - }) - ); - } catch (error) { - logger.error("[runCancelled] Failed to cancel event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - }); - } - }); - - engine.eventBus.on("runRetryScheduled", async ({ time, run, environment, retryAt }) => { - try { - await eventRepository.recordEvent(`Retry #${run.attemptNumber} delay`, { - taskSlug: run.taskIdentifier, - environment, - attributes: { - properties: { - retryAt: retryAt.toISOString(), - }, - runId: run.friendlyId, - style: { - icon: "schedule-attempt", - }, - queueName: run.queue, - }, - context: run.traceContext as Record, - spanIdSeed: `retry-${run.attemptNumber + 1}`, - endTime: retryAt, - }); - } catch (error) { - logger.error("[runRetryScheduled] Failed to record retry event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - spanId: run.spanId, - }); - } - }); - - engine.eventBus.on("runRetryScheduled", async ({ time, run, organization }) => { - try { - if (run.attemptNumber === 1 && run.baseCostInCents > 0) { - await reportInvocationUsage(organization.id, run.baseCostInCents, { runId: run.id }); - } - } catch (error) { - logger.error("[runRetryScheduled] Failed to report invocation usage", { - error: error instanceof Error ? error.message : error, - runId: run.id, - orgId: organization.id, - }); - } - }); - - engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { - try { - const foundRun = await prisma.taskRun.findUnique({ - where: { - id: run.id, - }, - include: { - runtimeEnvironment: { - include: { - project: true, - organization: true, - }, - }, - }, - }); - - if (!foundRun) { - logger.error("Failed to find run", { runId: run.id }); - return; - } - - await eventRepository.recordEvent( - `[ExecutionSnapshot] ${snapshot.executionStatus} - ${snapshot.description}`, - { - environment: foundRun.runtimeEnvironment, - taskSlug: foundRun.taskIdentifier, - context: foundRun.traceContext as Record, - attributes: { - runId: foundRun.friendlyId, - isDebug: true, - properties: { - snapshot, - }, - }, - duration: 0, - } - ); - } catch (error) { - logger.error("[executionSnapshotCreated] Failed to record event", { - error: error instanceof Error ? error.message : error, - runId: run.id, - }); - } - }); - return engine; } diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts new file mode 100644 index 0000000000..7410ecf9ec --- /dev/null +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -0,0 +1,296 @@ +import { prisma } from "~/db.server"; +import { createExceptionPropertiesFromError, eventRepository } from "./eventRepository.server"; +import { createJsonErrorObject, sanitizeError } from "@trigger.dev/core/v3"; +import { logger } from "~/services/logger.server"; +import { safeJsonParse } from "~/utils/json"; +import type { Attributes } from "@opentelemetry/api"; +import { reportInvocationUsage } from "~/services/platform.v3.server"; +import { socketIo } from "./handleSocketIo.server"; +import { engine } from "./runEngine.server"; +import { PerformTaskRunAlertsService } from "./services/alerts/performTaskRunAlerts.server"; + +export function registerRunEngineEventBusHandlers() { + engine.eventBus.on("runSucceeded", async ({ time, run }) => { + try { + const completedEvent = await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: false, + output: + run.outputType === "application/store" || run.outputType === "text/plain" + ? run.output + : run.output + ? (safeJsonParse(run.output) as Attributes) + : undefined, + outputType: run.outputType, + }, + }); + + if (!completedEvent) { + logger.error("[runSucceeded] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } + } catch (error) { + logger.error("[runSucceeded] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + // Handle alerts + engine.eventBus.on("runFailed", async ({ time, run }) => { + try { + await PerformTaskRunAlertsService.enqueue(run.id, prisma); + } catch (error) { + logger.error("[runFailed] Failed to enqueue alerts", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + // Handle events + engine.eventBus.on("runFailed", async ({ time, run }) => { + try { + const sanitizedError = sanitizeError(run.error); + const exception = createExceptionPropertiesFromError(sanitizedError); + + const completedEvent = await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception, + }, + }, + ], + }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } + + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: completedEvent?.runId, + }); + + await Promise.all( + inProgressEvents.map((event) => { + try { + const completedEvent = eventRepository.completeEvent(event.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception, + }, + }, + ], + }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete in-progress event for unknown reason", { + runId: run.id, + spanId: run.spanId, + eventId: event.id, + }); + return; + } + } catch (error) { + logger.error("[runFailed] Failed to complete in-progress event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + eventId: event.id, + }); + } + }) + ); + } catch (error) { + logger.error("[runFailed] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + engine.eventBus.on("runExpired", async ({ time, run }) => { + try { + const completedEvent = await eventRepository.completeEvent(run.spanId, { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception: { + message: `Run expired because the TTL (${run.ttl}) was reached`, + }, + }, + }, + ], + }); + + if (!completedEvent) { + logger.error("[runFailed] Failed to complete event for unknown reason", { + runId: run.id, + spanId: run.spanId, + }); + return; + } + } catch (error) { + logger.error("[runExpired] Failed to complete event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + engine.eventBus.on("runCancelled", async ({ time, run }) => { + try { + const inProgressEvents = await eventRepository.queryIncompleteEvents({ + runId: run.friendlyId, + }); + + await Promise.all( + inProgressEvents.map((event) => { + const error = createJsonErrorObject(run.error); + return eventRepository.cancelEvent(event, time, error.message); + }) + ); + } catch (error) { + logger.error("[runCancelled] Failed to cancel event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + engine.eventBus.on("runRetryScheduled", async ({ time, run, environment, retryAt }) => { + try { + await eventRepository.recordEvent(`Retry #${run.attemptNumber} delay`, { + taskSlug: run.taskIdentifier, + environment, + attributes: { + properties: { + retryAt: retryAt.toISOString(), + }, + runId: run.friendlyId, + style: { + icon: "schedule-attempt", + }, + queueName: run.queue, + }, + context: run.traceContext as Record, + spanIdSeed: `retry-${run.attemptNumber + 1}`, + endTime: retryAt, + }); + } catch (error) { + logger.error("[runRetryScheduled] Failed to record retry event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + spanId: run.spanId, + }); + } + }); + + engine.eventBus.on("runRetryScheduled", async ({ time, run, organization }) => { + try { + if (run.attemptNumber === 1 && run.baseCostInCents > 0) { + await reportInvocationUsage(organization.id, run.baseCostInCents, { runId: run.id }); + } + } catch (error) { + logger.error("[runRetryScheduled] Failed to report invocation usage", { + error: error instanceof Error ? error.message : error, + runId: run.id, + orgId: organization.id, + }); + } + }); + + engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { + try { + const foundRun = await prisma.taskRun.findUnique({ + where: { + id: run.id, + }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + }, + }, + }, + }); + + if (!foundRun) { + logger.error("Failed to find run", { runId: run.id }); + return; + } + + await eventRepository.recordEvent( + `[ExecutionSnapshot] ${snapshot.executionStatus} - ${snapshot.description}`, + { + environment: foundRun.runtimeEnvironment, + taskSlug: foundRun.taskIdentifier, + context: foundRun.traceContext as Record, + attributes: { + runId: foundRun.friendlyId, + isDebug: true, + properties: { + snapshotId: snapshot.id, + snapshotDescription: snapshot.description, + snapshotStatus: snapshot.executionStatus, + }, + }, + duration: 0, + } + ); + } catch (error) { + logger.error("[executionSnapshotCreated] Failed to record event", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } + }); + + engine.eventBus.on("workerNotification", async ({ time, run }) => { + logger.debug("[workerNotification] Notifying worker", { time, runId: run.id }); + + try { + socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); + } catch (error) { + logger.error("[workerNotification] Failed to notify worker", { + error: error instanceof Error ? error.message : error, + runId: run.id, + }); + } + }); +} From 8262320f3327c1a130104740c799fc231ef2a5cd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:18:10 +0000 Subject: [PATCH 258/485] duration waits --- .../worker/workerGroupTokenService.server.ts | 17 +++ .../run-engine/src/engine/index.ts | 6 + .../src/entryPoints/managed-run-worker.ts | 8 +- .../cli-v3/src/executions/taskRunProcess.ts | 37 ++++++ .../src/v3/runtime/managedRuntimeManager.ts | 125 ++++++++++++++---- packages/core/src/v3/schemas/messages.ts | 25 ++++ packages/core/src/v3/schemas/runEngine.ts | 7 +- packages/core/src/v3/schemas/schemas.ts | 14 ++ 8 files changed, 212 insertions(+), 27 deletions(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 23385cecee..e4d363ef81 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -16,6 +16,7 @@ import { StartRunAttemptResult, ExecutionResult, MachinePreset, + WaitForDurationResult, } from "@trigger.dev/core/v3"; import { env } from "~/env.server"; import { $transaction } from "~/db.server"; @@ -599,6 +600,22 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { return await this._engine.completeRunAttempt({ runId, snapshotId, completion }); } + async waitForDuration({ + runId, + snapshotId, + date, + }: { + runId: string; + snapshotId: string; + date: Date; + }): Promise { + return await this._engine.waitForDuration({ runId, snapshotId, date }); + } + + async getLatestSnapshot({ runId }: { runId: string }) { + return await this._engine.getRunExecutionData({ runId }); + } + toJSON(): WorkerGroupTokenAuthenticationResponse { if (this.type === WorkerInstanceGroupType.MANAGED) { return { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2366dd0093..e1ff9ea82d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1224,6 +1224,9 @@ export class RunEngine { if (waitpoint.status === "COMPLETED") { return { waitUntil: waitpoint.completedAt ?? new Date(), + waitpoint: { + id: waitpoint.id, + }, ...executionResultFromSnapshot(snapshot), }; } @@ -1246,6 +1249,9 @@ export class RunEngine { return { waitUntil: date, + waitpoint: { + id: waitpoint.id, + }, ...executionResultFromSnapshot(blockResult), }; }); diff --git a/packages/cli-v3/src/entryPoints/managed-run-worker.ts b/packages/cli-v3/src/entryPoints/managed-run-worker.ts index 252a55f422..e1c38c836d 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-worker.ts @@ -414,6 +414,12 @@ const zodIpc = new ZodIpcConnection({ FLUSH: async ({ timeoutInMs }, sender) => { await flushAll(timeoutInMs); }, + WAITPOINT_CREATED: async ({ wait, waitpoint }) => { + managedWorkerRuntime.associateWaitWithWaitpoint(wait.id, waitpoint.id); + }, + WAITPOINT_COMPLETED: async ({ waitpoint }) => { + managedWorkerRuntime.completeWaitpoints([waitpoint]); + }, }, }); @@ -461,7 +467,7 @@ async function flushMetadata(timeoutInMs: number = 10_000) { console.log(`Flushed runMetadata in ${duration}ms`); } -const managedWorkerRuntime = new ManagedRuntimeManager(); +const managedWorkerRuntime = new ManagedRuntimeManager(zodIpc); runtime.setGlobalRuntimeManager(managedWorkerRuntime); diff --git a/packages/cli-v3/src/executions/taskRunProcess.ts b/packages/cli-v3/src/executions/taskRunProcess.ts index 33ce18f779..118467ef4a 100644 --- a/packages/cli-v3/src/executions/taskRunProcess.ts +++ b/packages/cli-v3/src/executions/taskRunProcess.ts @@ -1,4 +1,5 @@ import { + CompletedWaitpoint, ExecutorToWorkerMessageCatalog, ServerBackgroundWorker, TaskRunErrorCodes, @@ -40,6 +41,7 @@ export type OnWaitForBatchMessage = InferSocketMessageSchema< typeof ExecutorToWorkerMessageCatalog, "WAIT_FOR_BATCH" >; +export type OnWaitMessage = InferSocketMessageSchema; export type TaskRunProcessOptions = { workerManifest: WorkerManifest; @@ -75,6 +77,7 @@ export class TaskRunProcess { public onWaitForDuration: Evt = new Evt(); public onWaitForTask: Evt = new Evt(); public onWaitForBatch: Evt = new Evt(); + public onWait: Evt = new Evt(); constructor(public readonly options: TaskRunProcessOptions) {} @@ -189,6 +192,9 @@ export class TaskRunProcess { UNCAUGHT_EXCEPTION: async (message) => { logger.debug(`[${this.runId}] uncaught exception in task run process`, { ...message }); }, + WAIT: async (message) => { + this.onWait.post(message); + }, }, }); @@ -277,6 +283,37 @@ export class TaskRunProcess { this._ipc?.send("WAIT_COMPLETED_NOTIFICATION", {}); } + waitpointCreated(waitId: string, waitpointId: string) { + if (!this._child?.connected || this._isBeingKilled || this._child.killed) { + console.error( + "Child process not connected or being killed, can't send waitpoint created notification" + ); + return; + } + + this._ipc?.send("WAITPOINT_CREATED", { + wait: { + id: waitId, + }, + waitpoint: { + id: waitpointId, + }, + }); + } + + waitpointCompleted(waitpoint: CompletedWaitpoint) { + if (!this._child?.connected || this._isBeingKilled || this._child.killed) { + console.error( + "Child process not connected or being killed, can't send waitpoint completed notification" + ); + return; + } + + this._ipc?.send("WAITPOINT_COMPLETED", { + waitpoint, + }); + } + async #handleExit(code: number | null, signal: NodeJS.Signals | null) { logger.debug("handling child exit", { code, signal }); diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index 3cc6b4a903..c9b559040c 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -1,41 +1,74 @@ import { BatchTaskRunExecutionResult, + CompletedWaitpoint, + RuntimeWait, TaskRunContext, TaskRunExecutionResult, + TaskRunFailedExecutionResult, + TaskRunSuccessfulExecutionResult, } from "../schemas/index.js"; +import { ExecutorToWorkerProcessConnection } from "../zodIpc.js"; import { RuntimeManager } from "./manager.js"; -import { unboundedTimeout } from "../utils/timers.js"; -type Waitpoint = any; +type Resolver = (value: CompletedWaitpoint) => void; export class ManagedRuntimeManager implements RuntimeManager { - private readonly waitpoints: Map = new Map(); - - _taskWaits: Map void }> = new Map(); - - _batchWaits: Map< - string, - { resolve: (value: BatchTaskRunExecutionResult) => void; reject: (err?: any) => void } - > = new Map(); + // Maps a resolver ID to a resolver function + private readonly resolversByWaitId: Map = new Map(); + // Maps a waitpoint ID to a wait ID + private readonly resolversByWaitpoint: Map = new Map(); + + constructor(private ipc: ExecutorToWorkerProcessConnection) { + setTimeout(() => { + console.log("Runtime status", { + resolversbyWaitId: this.resolversByWaitId.keys(), + resolversByWaitpoint: this.resolversByWaitpoint.keys(), + }); + }, 1000); + } disable(): void { // do nothing } async waitForDuration(ms: number): Promise { - await unboundedTimeout(ms); + console.log("waitForDuration", ms); + + const wait = { + type: "DATETIME", + id: crypto.randomUUID(), + date: new Date(Date.now() + ms), + } satisfies RuntimeWait; + + const promise = new Promise((resolve) => { + this.resolversByWaitId.set(wait.id, resolve); + }); + + // Send wait to parent process + this.ipc.send("WAIT", { wait }); + + await promise; } async waitUntil(date: Date): Promise { return this.waitForDuration(date.getTime() - Date.now()); } - async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { - const promise = new Promise((resolve) => { - this._taskWaits.set(params.id, { resolve }); + async waitForTask(params: { + id: string; + internalId?: string; + ctx: TaskRunContext; + }): Promise { + console.log("waitForTask", params); + + const promise = new Promise((resolve) => { + this.resolversByWaitId.set(params.internalId ?? params.id, resolve); }); - return await promise; + const waitpoint = await promise; + const result = this.waitpointToTaskRunExecutionResult(waitpoint); + + return result; } async waitForBatch(params: { @@ -43,39 +76,81 @@ export class ManagedRuntimeManager implements RuntimeManager { runs: string[]; ctx: TaskRunContext; }): Promise { + console.log("waitForBatch", params); + if (!params.runs.length) { return Promise.resolve({ id: params.id, items: [] }); } const promise = Promise.all( params.runs.map((runId) => { - return new Promise((resolve, reject) => { - this._taskWaits.set(runId, { resolve }); + return new Promise((resolve, reject) => { + this.resolversByWaitId.set(runId, resolve); }); }) ); - const results = await promise; + const waitpoints = await promise; return { id: params.id, - items: results, + items: waitpoints.map(this.waitpointToTaskRunExecutionResult), }; } - async completeWaitpoints(waitpoints: Waitpoint[]): Promise { + associateWaitWithWaitpoint(waitId: string, waitpointId: string) { + this.resolversByWaitpoint.set(waitpointId, waitId); + } + + async completeWaitpoints(waitpoints: CompletedWaitpoint[]): Promise { await Promise.all(waitpoints.map((waitpoint) => this.completeWaitpoint(waitpoint))); } - private completeWaitpoint(waitpoint: Waitpoint): void { - const wait = this._taskWaits.get(waitpoint.id); + private completeWaitpoint(waitpoint: CompletedWaitpoint): void { + console.log("completeWaitpoint", waitpoint); + + const waitId = waitpoint.completedByTaskRunId ?? this.resolversByWaitpoint.get(waitpoint.id); + + if (!waitId) { + // TODO: Handle failures better + console.log("No waitId found for waitpoint", waitpoint); + return; + } + + const resolve = this.resolversByWaitId.get(waitId); - if (!wait) { + if (!resolve) { + // TODO: Handle failures better + console.log("No resolver found for waitId", waitId); return; } - wait.resolve(waitpoint.completion); + console.log("Resolving waitpoint", waitpoint); + + resolve(waitpoint); - this._taskWaits.delete(waitpoint.id); + this.resolversByWaitId.delete(waitId); + } + + private waitpointToTaskRunExecutionResult(waitpoint: CompletedWaitpoint): TaskRunExecutionResult { + if (waitpoint.outputIsError) { + return { + ok: false, + id: waitpoint.id, + error: waitpoint.output + ? JSON.parse(waitpoint.output) + : { + type: "STRING_ERROR", + message: "Missing error output", + }, + } satisfies TaskRunFailedExecutionResult; + } else { + return { + ok: true, + id: waitpoint.id, + output: waitpoint.output, + outputType: waitpoint.outputType ?? "application/json", + } satisfies TaskRunSuccessfulExecutionResult; + } } } diff --git a/packages/core/src/v3/schemas/messages.ts b/packages/core/src/v3/schemas/messages.ts index 31d6dc3e4f..3a25cf0964 100644 --- a/packages/core/src/v3/schemas/messages.ts +++ b/packages/core/src/v3/schemas/messages.ts @@ -12,9 +12,11 @@ import { EnvironmentType, ProdTaskRunExecution, ProdTaskRunExecutionPayload, + RuntimeWait, TaskRunExecutionLazyAttemptPayload, WaitReason, } from "./schemas.js"; +import { CompletedWaitpoint } from "./runEngine.js"; const ackCallbackResult = z.discriminatedUnion("success", [ z.object({ @@ -191,6 +193,12 @@ export const ExecutorToWorkerMessageCatalog = { UNCAUGHT_EXCEPTION: { message: UncaughtExceptionMessage, }, + WAIT: { + message: z.object({ + version: z.literal("v1").default("v1"), + wait: RuntimeWait, + }), + }, }; export const WorkerToExecutorMessageCatalog = { @@ -226,6 +234,23 @@ export const WorkerToExecutorMessageCatalog = { }), callback: z.void(), }, + WAITPOINT_CREATED: { + message: z.object({ + version: z.literal("v1").default("v1"), + wait: z.object({ + id: z.string(), + }), + waitpoint: z.object({ + id: z.string(), + }), + }), + }, + WAITPOINT_COMPLETED: { + message: z.object({ + version: z.literal("v1").default("v1"), + waitpoint: CompletedWaitpoint, + }), + }, }; export const ProviderToPlatformMessages = { diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 7c60a45582..d59f4af51c 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -47,7 +47,7 @@ export const WaitpointType = { export type WaitpointType = (typeof WaitpointType)[keyof typeof WaitpointType]; -const CompletedWaitpoint = z.object({ +export const CompletedWaitpoint = z.object({ id: z.string(), type: z.enum(Object.values(WaitpointType) as [WaitpointType]), completedAt: z.coerce.date(), @@ -61,6 +61,8 @@ const CompletedWaitpoint = z.object({ outputIsError: z.boolean(), }); +export type CompletedWaitpoint = z.infer; + const ExecutionSnapshot = z.object({ id: z.string(), executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), @@ -169,6 +171,9 @@ export const WaitForDurationResult = z Use this date to determine when to continue. */ waitUntil: z.coerce.date(), + waitpoint: z.object({ + id: z.string(), + }), }) .and(ExecutionResult); export type WaitForDurationResult = z.infer; diff --git a/packages/core/src/v3/schemas/schemas.ts b/packages/core/src/v3/schemas/schemas.ts index 42edf8602e..0256a1f19b 100644 --- a/packages/core/src/v3/schemas/schemas.ts +++ b/packages/core/src/v3/schemas/schemas.ts @@ -240,3 +240,17 @@ export const TaskRunExecutionLazyAttemptPayload = z.object({ }); export type TaskRunExecutionLazyAttemptPayload = z.infer; + +export const RuntimeWait = z.discriminatedUnion("type", [ + z.object({ + type: z.literal("DATETIME"), + id: z.string(), + date: z.coerce.date(), + }), + z.object({ + type: z.literal("MANUAL"), + id: z.string(), + }), +]); + +export type RuntimeWait = z.infer; From 05b7e30bed3978dcb4acb4edded5e1aa0ea35794 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:19:01 +0000 Subject: [PATCH 259/485] fix execution snapshot debug spans --- apps/webapp/app/utils/taskEvent.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/webapp/app/utils/taskEvent.ts b/apps/webapp/app/utils/taskEvent.ts index 41a1703c25..8b2655559e 100644 --- a/apps/webapp/app/utils/taskEvent.ts +++ b/apps/webapp/app/utils/taskEvent.ts @@ -66,7 +66,6 @@ export function prepareTrace(events: TaskEvent[]): TraceSummary | undefined { id: event.spanId, parentId: event.parentId ?? undefined, runId: event.runId, - idempotencyKey: event.idempotencyKey, data: { message: event.message, style: event.style, @@ -78,8 +77,9 @@ export function prepareTrace(events: TaskEvent[]): TraceSummary | undefined { level: event.level, events: event.events, environmentType: event.environmentType, + isDebug: event.isDebug, }, - }; + } satisfies SpanSummary; spansBySpanId.set(event.spanId, span); From a6d5ec47e69aba30b7967a315c7dab51995ad622 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:23:02 +0000 Subject: [PATCH 260/485] task waits --- apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts | 1 + apps/webapp/app/v3/services/triggerTaskV2.server.ts | 3 ++- internal-packages/run-engine/src/engine/index.ts | 1 + packages/core/src/v3/runtime/index.ts | 6 +++++- packages/core/src/v3/runtime/manager.ts | 6 +++++- packages/core/src/v3/schemas/api.ts | 1 + packages/core/src/v3/schemas/common.ts | 1 + packages/trigger-sdk/src/v3/shared.ts | 8 +++++++- 8 files changed, 23 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index e6e3398e69..8cd4681154 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -96,6 +96,7 @@ const { action, loader } = createActionApiRoute( return json( { id: run.friendlyId, + internalId: run.id, }, { headers: $responseHeaders, diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 02786922a6..efcfebebf3 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -92,6 +92,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { if ( existingRun.associatedWaitpoint?.status === "PENDING" && body.options?.resumeParentOnCompletion && + // FIXME: This is currently the friendly ID body.options?.parentRunId ) { await this._engine.blockRunWithWaitpoint({ @@ -164,7 +165,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { //todo we will pass in the `parentRun` and `resumeParentOnCompletion` const parentRun = body.options?.parentRunId ? await this._prisma.taskRun.findFirst({ - where: { id: body.options.parentRunId }, + where: { friendlyId: body.options.parentRunId }, }) : undefined; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index e1ff9ea82d..7248904bfd 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1086,6 +1086,7 @@ export class RunEngine { }, run: { id: run.friendlyId, + internalId: run.id, payload: run.payload, payloadType: run.payloadType, createdAt: run.createdAt, diff --git a/packages/core/src/v3/runtime/index.ts b/packages/core/src/v3/runtime/index.ts index 7eecb99296..146c508e46 100644 --- a/packages/core/src/v3/runtime/index.ts +++ b/packages/core/src/v3/runtime/index.ts @@ -33,7 +33,11 @@ export class RuntimeAPI { return usage.pauseAsync(() => this.#getRuntimeManager().waitUntil(date)); } - public waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { + public waitForTask(params: { + id: string; + internalId: string; + ctx: TaskRunContext; + }): Promise { return usage.pauseAsync(() => this.#getRuntimeManager().waitForTask(params)); } diff --git a/packages/core/src/v3/runtime/manager.ts b/packages/core/src/v3/runtime/manager.ts index 56acfe3cf2..856bcddee1 100644 --- a/packages/core/src/v3/runtime/manager.ts +++ b/packages/core/src/v3/runtime/manager.ts @@ -8,7 +8,11 @@ export interface RuntimeManager { disable(): void; waitUntil(date: Date): Promise; waitForDuration(ms: number): Promise; - waitForTask(params: { id: string; ctx: TaskRunContext }): Promise; + waitForTask(params: { + id: string; + internalId?: string; + ctx: TaskRunContext; + }): Promise; waitForBatch(params: { id: string; runs: string[]; diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index a0f9ad5ebd..96d82c0dc9 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -114,6 +114,7 @@ export type TriggerTaskRequestBody = z.infer; export const TriggerTaskResponse = z.object({ id: z.string(), + internalId: z.string(), }); export type TriggerTaskResponse = z.infer; diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 1f9f18f4c3..60157ca293 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -228,6 +228,7 @@ export const TaskRunExecution = z.object({ run: TaskRun.and( z.object({ traceContext: z.record(z.unknown()).optional(), + internalId: z.string().optional(), }) ), queue: TaskRunExecutionQueue, diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index f7513f8c49..5e61f72f96 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -1220,7 +1220,7 @@ async function trigger_internal( } ); - return handle as RunHandleFromTypes; + return handle as any as RunHandleFromTypes; } async function batchTrigger_internal( @@ -1352,6 +1352,8 @@ async function triggerAndWait_internal Date: Fri, 13 Dec 2024 12:25:48 +0000 Subject: [PATCH 261/485] fix event bus types --- internal-packages/run-engine/src/engine/eventBus.ts | 7 ++++++- internal-packages/run-engine/src/engine/index.ts | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 70c0293638..7c54638935 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -54,10 +54,15 @@ export type EventBusEvents = { run: { id: string; friendlyId: string; + spanId: string; attemptNumber: number; queue: string; traceContext: Record; taskIdentifier: string; + baseCostInCents: number; + }; + organization: { + id: string; }; environment: AuthenticatedEnvironment; retryAt: Date; @@ -68,12 +73,12 @@ export type EventBusEvents = { time: Date; run: { id: string; + friendlyId: string; spanId: string; error: TaskRunError; }; }, ]; - //todo send socket message to the worker workerNotification: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 7248904bfd..5f2ebe1045 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1360,6 +1360,7 @@ export class RunEngine { time: new Date(), run: { id: run.id, + friendlyId: run.friendlyId, spanId: run.spanId, error, }, @@ -2216,6 +2217,11 @@ export class RunEngine { queue: run.queue, taskIdentifier: run.taskIdentifier, traceContext: run.traceContext as Record, + baseCostInCents: run.baseCostInCents, + spanId: run.spanId, + }, + organization: { + id: run.runtimeEnvironment.organizationId, }, environment: run.runtimeEnvironment, retryAt, From 4d41221db3cdf8022ef6dfef7a701a916d1bf479 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:32:26 +0000 Subject: [PATCH 262/485] temporary fix for react hook run handle type --- packages/react-hooks/src/hooks/useTaskTrigger.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/react-hooks/src/hooks/useTaskTrigger.ts b/packages/react-hooks/src/hooks/useTaskTrigger.ts index a65c274d4b..e313e5551a 100644 --- a/packages/react-hooks/src/hooks/useTaskTrigger.ts +++ b/packages/react-hooks/src/hooks/useTaskTrigger.ts @@ -99,7 +99,8 @@ export function useTaskTrigger( mutation.trigger({ payload, options }); }, isLoading: mutation.isMutating, - handle: mutation.data as RunHandleFromTypes>, + // FIXME: This is a temporary workaround to avoid type errors + handle: mutation.data as any as RunHandleFromTypes>, error: mutation.error, }; } From d707a948699d5e0a0ccc819158a9cf5bf9ad1ce9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:32:52 +0000 Subject: [PATCH 263/485] disable run notifications for now --- apps/webapp/app/v3/runEngineHandlers.server.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 7410ecf9ec..fedfad2417 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -285,7 +285,8 @@ export function registerRunEngineEventBusHandlers() { logger.debug("[workerNotification] Notifying worker", { time, runId: run.id }); try { - socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); + // TODO: re-enable this + // socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); } catch (error) { logger.error("[workerNotification] Failed to notify worker", { error: error instanceof Error ? error.message : error, From 3cee74c07c6631e5e942b7189a8a3ef48aef5aa8 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:41:48 +0000 Subject: [PATCH 264/485] convert any typecasts to expect errors to more easily fix later --- packages/react-hooks/src/hooks/useTaskTrigger.ts | 4 ++-- packages/trigger-sdk/src/v3/shared.ts | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/react-hooks/src/hooks/useTaskTrigger.ts b/packages/react-hooks/src/hooks/useTaskTrigger.ts index e313e5551a..ce35b37120 100644 --- a/packages/react-hooks/src/hooks/useTaskTrigger.ts +++ b/packages/react-hooks/src/hooks/useTaskTrigger.ts @@ -99,8 +99,8 @@ export function useTaskTrigger( mutation.trigger({ payload, options }); }, isLoading: mutation.isMutating, - // FIXME: This is a temporary workaround to avoid type errors - handle: mutation.data as any as RunHandleFromTypes>, + // @ts-expect-error + handle: mutation.data as RunHandleFromTypes>, error: mutation.error, }; } diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index 5e61f72f96..400ae6758a 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -1220,7 +1220,8 @@ async function trigger_internal( } ); - return handle as any as RunHandleFromTypes; + // @ts-expect-error + return handle as RunHandleFromTypes; } async function batchTrigger_internal( From b4739b7f6b285bb5353b07a0ab2a8e4256ae85cd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 13 Dec 2024 12:42:25 +0000 Subject: [PATCH 265/485] fix webapp types after node types upgrade --- apps/webapp/app/presenters/v3/RunStreamPresenter.server.ts | 2 +- apps/webapp/app/presenters/v3/TasksStreamPresenter.server.ts | 2 +- apps/webapp/app/utils/sse.server.ts | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/presenters/v3/RunStreamPresenter.server.ts b/apps/webapp/app/presenters/v3/RunStreamPresenter.server.ts index 5df64c9ae3..c340903076 100644 --- a/apps/webapp/app/presenters/v3/RunStreamPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/RunStreamPresenter.server.ts @@ -39,7 +39,7 @@ export class RunStreamPresenter { traceId: run.traceId, }); - let pinger: NodeJS.Timer | undefined = undefined; + let pinger: NodeJS.Timeout | undefined = undefined; const { unsubscribe, eventEmitter } = await eventRepository.subscribeToTrace(run.traceId); diff --git a/apps/webapp/app/presenters/v3/TasksStreamPresenter.server.ts b/apps/webapp/app/presenters/v3/TasksStreamPresenter.server.ts index c7318c3400..b01587d77f 100644 --- a/apps/webapp/app/presenters/v3/TasksStreamPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/TasksStreamPresenter.server.ts @@ -58,7 +58,7 @@ export class TasksStreamPresenter { projectSlug, }); - let pinger: NodeJS.Timer | undefined = undefined; + let pinger: NodeJS.Timeout | undefined = undefined; const subscriber = await projectPubSub.subscribe(`project:${project.id}:*`); diff --git a/apps/webapp/app/utils/sse.server.ts b/apps/webapp/app/utils/sse.server.ts index fced1fbaf4..56e7b191af 100644 --- a/apps/webapp/app/utils/sse.server.ts +++ b/apps/webapp/app/utils/sse.server.ts @@ -22,8 +22,8 @@ export function sse({ request, pingInterval = 1000, updateInterval = 348, run }: return new Response("SSE disabled", { status: 200 }); } - let pinger: NodeJS.Timer | undefined = undefined; - let updater: NodeJS.Timer | undefined = undefined; + let pinger: NodeJS.Timeout | undefined = undefined; + let updater: NodeJS.Timeout | undefined = undefined; let timeout: NodeJS.Timeout | undefined = undefined; const abort = () => { From c9a51f19ac84a311dba84bfe0d31aada60320458 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 14:36:31 +0000 Subject: [PATCH 266/485] updateEnvConcurrencyLimits across marqs and the runqueue --- .../admin.api.v1.environments.$environmentId.ts | 3 ++- ...dmin.api.v1.orgs.$organizationId.concurrency.ts | 3 ++- ...v1.orgs.$organizationId.environments.staging.ts | 5 +++-- apps/webapp/app/v3/runQueue.server.ts | 14 ++++++++++++++ .../v3/services/createBackgroundWorker.server.ts | 3 ++- .../createDeployedBackgroundWorker.server.ts | 14 +++++++------- .../app/v3/services/finalizeDeployment.server.ts | 4 ++-- 7 files changed, 32 insertions(+), 14 deletions(-) create mode 100644 apps/webapp/app/v3/runQueue.server.ts diff --git a/apps/webapp/app/routes/admin.api.v1.environments.$environmentId.ts b/apps/webapp/app/routes/admin.api.v1.environments.$environmentId.ts index dddbc007be..5ee92606ec 100644 --- a/apps/webapp/app/routes/admin.api.v1.environments.$environmentId.ts +++ b/apps/webapp/app/routes/admin.api.v1.environments.$environmentId.ts @@ -3,6 +3,7 @@ import { z } from "zod"; import { prisma } from "~/db.server"; import { authenticateApiRequestWithPersonalAccessToken } from "~/services/personalAccessToken.server"; import { marqs } from "~/v3/marqs/index.server"; +import { updateEnvConcurrencyLimits } from "~/v3/runQueue.server"; const ParamsSchema = z.object({ environmentId: z.string(), @@ -60,7 +61,7 @@ export async function action({ request, params }: ActionFunctionArgs) { }, }); - await marqs?.updateEnvConcurrencyLimits(environment); + await updateEnvConcurrencyLimits(environment); return json({ success: true }); } diff --git a/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.concurrency.ts b/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.concurrency.ts index 51d292eb05..d6491bcc45 100644 --- a/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.concurrency.ts +++ b/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.concurrency.ts @@ -3,6 +3,7 @@ import { z } from "zod"; import { prisma } from "~/db.server"; import { authenticateApiRequestWithPersonalAccessToken } from "~/services/personalAccessToken.server"; import { marqs } from "~/v3/marqs/index.server"; +import { updateEnvConcurrencyLimits } from "~/v3/runQueue.server"; const ParamsSchema = z.object({ organizationId: z.string(), @@ -97,7 +98,7 @@ export async function action({ request, params }: ActionFunctionArgs) { }, }); - await marqs?.updateEnvConcurrencyLimits({ ...modifiedEnvironment, organization }); + await updateEnvConcurrencyLimits({ ...modifiedEnvironment, organization }); } return json({ success: true }); diff --git a/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.environments.staging.ts b/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.environments.staging.ts index c4088257af..8483058f32 100644 --- a/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.environments.staging.ts +++ b/apps/webapp/app/routes/admin.api.v1.orgs.$organizationId.environments.staging.ts @@ -4,6 +4,7 @@ import { prisma } from "~/db.server"; import { createEnvironment } from "~/models/organization.server"; import { authenticateApiRequestWithPersonalAccessToken } from "~/services/personalAccessToken.server"; import { marqs } from "~/v3/marqs/index.server"; +import { updateEnvConcurrencyLimits } from "~/v3/runQueue.server"; const ParamsSchema = z.object({ organizationId: z.string(), @@ -58,10 +59,10 @@ export async function action({ request, params }: ActionFunctionArgs) { if (!stagingEnvironment) { const staging = await createEnvironment(organization, project, "STAGING"); - await marqs?.updateEnvConcurrencyLimits({ ...staging, organization, project }); + await updateEnvConcurrencyLimits({ ...staging, organization, project }); created++; } else { - await marqs?.updateEnvConcurrencyLimits({ ...stagingEnvironment, organization, project }); + await updateEnvConcurrencyLimits({ ...stagingEnvironment, organization, project }); } } diff --git a/apps/webapp/app/v3/runQueue.server.ts b/apps/webapp/app/v3/runQueue.server.ts new file mode 100644 index 0000000000..421445d006 --- /dev/null +++ b/apps/webapp/app/v3/runQueue.server.ts @@ -0,0 +1,14 @@ +import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { marqs } from "./marqs/index.server"; +import { engine } from "./runEngine.server"; + +//This allows us to update MARQS and the RunQueue + +/** Updates MARQS and the RunQueue limits */ +export async function updateEnvConcurrencyLimits(environment: AuthenticatedEnvironment) { + await Promise.allSettled([ + marqs?.updateEnvConcurrencyLimits(environment), + engine.runQueue.updateEnvConcurrencyLimits(environment), + ]); +} + diff --git a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts index cc54952489..5830ff8cf1 100644 --- a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts @@ -16,6 +16,7 @@ import { RegisterNextTaskScheduleInstanceService } from "./registerNextTaskSched import cronstrue from "cronstrue"; import { CheckScheduleService } from "./checkSchedule.server"; import { clampMaxDuration } from "../utils/maxDuration"; +import { updateEnvConcurrencyLimits } from "../runQueue.server"; export class CreateBackgroundWorkerService extends BaseService { public async call( @@ -109,7 +110,7 @@ export class CreateBackgroundWorkerService extends BaseService { } ); - await marqs?.updateEnvConcurrencyLimits(environment); + await updateEnvConcurrencyLimits(environment); } catch (err) { logger.error( "Error publishing WORKER_CREATED event or updating global concurrency limits", diff --git a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts index 85c652a38a..3dc3ca20db 100644 --- a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts @@ -1,17 +1,17 @@ import { CreateBackgroundWorkerRequestBody } from "@trigger.dev/core/v3"; import type { BackgroundWorker } from "@trigger.dev/database"; +import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { logger } from "~/services/logger.server"; import { generateFriendlyId } from "../friendlyIdentifiers"; +import { socketIo } from "../handleSocketIo.server"; +import { updateEnvConcurrencyLimits } from "../runQueue.server"; +import { PerformDeploymentAlertsService } from "./alerts/performDeploymentAlerts.server"; import { BaseService } from "./baseService.server"; import { createBackgroundTasks, syncDeclarativeSchedules } from "./createBackgroundWorker.server"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; -import { projectPubSub } from "./projectPubSub.server"; -import { marqs } from "~/v3/marqs/index.server"; -import { logger } from "~/services/logger.server"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; -import { PerformDeploymentAlertsService } from "./alerts/performDeploymentAlerts.server"; +import { projectPubSub } from "./projectPubSub.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; -import { socketIo } from "../handleSocketIo.server"; export class CreateDeployedBackgroundWorkerService extends BaseService { public async call( @@ -128,7 +128,7 @@ export class CreateDeployedBackgroundWorkerService extends BaseService { type: "deployed", } ); - await marqs?.updateEnvConcurrencyLimits(environment); + await updateEnvConcurrencyLimits(environment); } catch (err) { logger.error("Failed to publish WORKER_CREATED event", { err }); } diff --git a/apps/webapp/app/v3/services/finalizeDeployment.server.ts b/apps/webapp/app/v3/services/finalizeDeployment.server.ts index c610f91225..13418fd156 100644 --- a/apps/webapp/app/v3/services/finalizeDeployment.server.ts +++ b/apps/webapp/app/v3/services/finalizeDeployment.server.ts @@ -3,8 +3,8 @@ import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { socketIo } from "../handleSocketIo.server"; -import { marqs } from "../marqs/index.server"; import { registryProxy } from "../registryProxy.server"; +import { updateEnvConcurrencyLimits } from "../runQueue.server"; import { PerformDeploymentAlertsService } from "./alerts/performDeploymentAlerts.server"; import { BaseService, ServiceValidationError } from "./baseService.server"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; @@ -95,7 +95,7 @@ export class FinalizeDeploymentService extends BaseService { } ); - await marqs?.updateEnvConcurrencyLimits(authenticatedEnv); + await updateEnvConcurrencyLimits(authenticatedEnv); } catch (err) { logger.error("Failed to publish WORKER_CREATED event", { err }); } From 9da544ea111c4954ffdbd402fe634f7d394213ac Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 14:50:52 +0000 Subject: [PATCH 267/485] Pass proper values into the run engine --- apps/webapp/app/env.server.ts | 5 +++++ apps/webapp/app/v3/machinePresets.server.ts | 12 ++++++++++++ apps/webapp/app/v3/runEngine.server.ts | 19 +++++++------------ 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts index 2a30869a67..6d5c26dd64 100644 --- a/apps/webapp/app/env.server.ts +++ b/apps/webapp/app/env.server.ts @@ -244,6 +244,11 @@ const EnvironmentSchema = z.object({ MAXIMUM_DEV_QUEUE_SIZE: z.coerce.number().int().optional(), MAXIMUM_DEPLOYED_QUEUE_SIZE: z.coerce.number().int().optional(), MAX_BATCH_V2_TRIGGER_ITEMS: z.coerce.number().int().default(500), + + // Run Engine 2.0 + RUN_ENGINE_WORKER_COUNT: z.coerce.number().int().default(4), + RUN_ENGINE_TASKS_PER_WORKER: z.coerce.number().int().default(10), + RUN_ENGINE_WORKER_POLL_INTERVAL: z.coerce.number().int().default(100), }); export type Environment = z.infer; diff --git a/apps/webapp/app/v3/machinePresets.server.ts b/apps/webapp/app/v3/machinePresets.server.ts index 120a235c54..612dc16258 100644 --- a/apps/webapp/app/v3/machinePresets.server.ts +++ b/apps/webapp/app/v3/machinePresets.server.ts @@ -41,3 +41,15 @@ function derivePresetNameFromValues(cpu: number, memory: number): MachinePresetN return defaultMachine; } + +export function allMachines(): Record { + return Object.fromEntries( + Object.entries(machines).map(([name, preset]) => [ + name, + { + name: name as MachinePresetName, + ...preset, + }, + ]) + ); +} diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index fffc38f6b1..3cc0a7f64b 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -3,6 +3,8 @@ import { prisma } from "~/db.server"; import { env } from "~/env.server"; import { tracer } from "./tracer.server"; import { singleton } from "~/utils/singleton"; +import { defaultMachine, machines } from "@trigger.dev/platform/v3"; +import { allMachines } from "./machinePresets.server"; export const engine = singleton("RunEngine", createRunEngine); @@ -20,21 +22,14 @@ function createRunEngine() { ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), }, worker: { - workers: 1, - tasksPerWorker: env.WORKER_CONCURRENCY, + workers: env.RUN_ENGINE_WORKER_COUNT, + tasksPerWorker: env.RUN_ENGINE_TASKS_PER_WORKER, pollIntervalMs: env.WORKER_POLL_INTERVAL, }, machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, + defaultMachine: defaultMachine, + machines: allMachines(), + baseCostInCents: env.CENTS_PER_RUN, }, tracer, }); From ff79a77b8b8d05de21a923e0ae4de8cbed3700f7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 14:58:46 +0000 Subject: [PATCH 268/485] RunQueue settings and removed unused rebalancing workers --- apps/webapp/app/v3/runEngine.server.ts | 3 ++ .../run-engine/src/engine/index.ts | 19 +++++------ .../run-engine/src/engine/types.ts | 1 + .../run-engine/src/run-queue/index.ts | 4 --- .../run-engine/src/shared/asyncWorker.ts | 34 ------------------- 5 files changed, 13 insertions(+), 48 deletions(-) delete mode 100644 internal-packages/run-engine/src/shared/asyncWorker.ts diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 3cc0a7f64b..11a91a383c 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -31,6 +31,9 @@ function createRunEngine() { machines: allMachines(), baseCostInCents: env.CENTS_PER_RUN, }, + queue: { + defaultEnvConcurrency: env.DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT, + }, tracer, }); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 5f2ebe1045..28133b50f9 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -130,8 +130,7 @@ export class RunEngine { tracer: trace.getTracer("rq"), queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), - workers: 1, - defaultEnvConcurrency: 10, + defaultEnvConcurrency: options.queue?.defaultEnvConcurrency ?? 10, enableRebalancing: false, logger: new Logger("RunQueue", "warn"), redis: options.redis, @@ -376,14 +375,14 @@ export class RunEngine { }); if (taskQueue) { - taskQueue = await prisma.taskQueue.update({ - where: { - id: taskQueue.id, - }, - data: { - concurrencyLimit, - }, - }); + taskQueue = await prisma.taskQueue.update({ + where: { + id: taskQueue.id, + }, + data: { + concurrencyLimit, + }, + }); } else { taskQueue = await prisma.taskQueue.create({ data: { diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index 36dd29ef74..1d3efb222c 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -18,6 +18,7 @@ export type RunEngineOptions = { }; queue?: { retryOptions?: RetryOptions; + defaultEnvConcurrency?: number; }; /** If not set then checkpoints won't ever be used */ retryWarmStartThresholdMs?: number; diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index e558ad8dfc..3657b7d4de 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -7,7 +7,6 @@ import { import { Logger } from "@trigger.dev/core/logger"; import { flattenAttributes } from "@trigger.dev/core/v3"; import { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; -import { AsyncWorker } from "../shared/asyncWorker.js"; import { attributesFromAuthenticatedEnv, MinimalAuthenticatedEnvironment, @@ -37,7 +36,6 @@ export type RunQueueOptions = { redis: RedisOptions; defaultEnvConcurrency: number; windowSize?: number; - workers: number; queuePriorityStrategy: RunQueuePriorityStrategy; envQueuePriorityStrategy: RunQueuePriorityStrategy; enableRebalancing?: boolean; @@ -74,7 +72,6 @@ export class RunQueue { private redis: Redis; public keys: RunQueueKeyProducer; private queuePriorityStrategy: RunQueuePriorityStrategy; - #rebalanceWorkers: Array = []; constructor(private readonly options: RunQueueOptions) { this.retryOptions = options.retryOptions ?? defaultRetrySettings; @@ -653,7 +650,6 @@ export class RunQueue { } async quit() { - await Promise.all(this.#rebalanceWorkers.map((worker) => worker.stop())); await this.subscriber.unsubscribe(); await this.subscriber.quit(); await this.redis.quit(); diff --git a/internal-packages/run-engine/src/shared/asyncWorker.ts b/internal-packages/run-engine/src/shared/asyncWorker.ts deleted file mode 100644 index 016662e1d5..0000000000 --- a/internal-packages/run-engine/src/shared/asyncWorker.ts +++ /dev/null @@ -1,34 +0,0 @@ -export class AsyncWorker { - private running = false; - private timeout?: NodeJS.Timeout; - - constructor(private readonly fn: () => Promise, private readonly interval: number) {} - - start() { - if (this.running) { - return; - } - - this.running = true; - - this.#run(); - } - - stop() { - this.running = false; - } - - async #run() { - if (!this.running) { - return; - } - - try { - await this.fn(); - } catch (e) { - console.error(e); - } - - this.timeout = setTimeout(this.#run.bind(this), this.interval); - } -} From 03c9899c3e844b306541a068d01ff1abf939b57d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 15:00:53 +0000 Subject: [PATCH 269/485] Remove rebalancing prop --- internal-packages/run-engine/src/run-queue/index.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 3657b7d4de..b51c972a6a 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -38,7 +38,6 @@ export type RunQueueOptions = { windowSize?: number; queuePriorityStrategy: RunQueuePriorityStrategy; envQueuePriorityStrategy: RunQueuePriorityStrategy; - enableRebalancing?: boolean; verbose?: boolean; logger: Logger; retryOptions?: RetryOptions; From c84b50669478cc309eb7ec6b79c11bc26a7e5280 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 15:10:08 +0000 Subject: [PATCH 270/485] Tidied more things up --- internal-packages/run-engine/src/engine/index.ts | 1 - .../run-engine/src/run-queue/index.ts | 15 ++++++--------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 28133b50f9..e3f48fe9c7 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -131,7 +131,6 @@ export class RunEngine { queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), defaultEnvConcurrency: options.queue?.defaultEnvConcurrency ?? 10, - enableRebalancing: false, logger: new Logger("RunQueue", "warn"), redis: options.redis, retryOptions: options.queue?.retryOptions, diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index b51c972a6a..c81a4cdc82 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -5,12 +5,14 @@ import { SEMATTRS_MESSAGING_SYSTEM, } from "@opentelemetry/semantic-conventions"; import { Logger } from "@trigger.dev/core/logger"; -import { flattenAttributes } from "@trigger.dev/core/v3"; +import { calculateNextRetryDelay, flattenAttributes } from "@trigger.dev/core/v3"; +import { type RetryOptions } from "@trigger.dev/core/v3/schemas"; import { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; import { attributesFromAuthenticatedEnv, MinimalAuthenticatedEnvironment, } from "../shared/index.js"; +import { RunQueueShortKeyProducer } from "./keyProducer.js"; import { InputPayload, OutputPayload, @@ -19,7 +21,6 @@ import { RunQueueKeyProducer, RunQueuePriorityStrategy, } from "./types.js"; -import { RunQueueShortKeyProducer } from "./keyProducer.js"; const SemanticAttributes = { QUEUE: "runqueue.queue", @@ -49,13 +50,6 @@ type DequeuedMessage = { message: OutputPayload; }; -/** - * RunQueue – the queue that's used to process runs - */ - -import { type RetryOptions } from "@trigger.dev/core/v3/schemas"; -import { calculateNextRetryDelay } from "@trigger.dev/core/v3"; - const defaultRetrySettings = { maxAttempts: 12, factor: 2, @@ -64,6 +58,9 @@ const defaultRetrySettings = { randomize: true, }; +/** + * RunQueue – the queue that's used to process runs + */ export class RunQueue { private retryOptions: RetryOptions; private subscriber: Redis; From 160e219a4340e4131089d9e76e2160500500cb88 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 15:14:35 +0000 Subject: [PATCH 271/485] Update/remove queue limits for MARQS and RunQueue --- apps/webapp/app/v3/runQueue.server.ts | 22 +++++++++++++++++++ .../services/createBackgroundWorker.server.ts | 14 ++++++------ 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/apps/webapp/app/v3/runQueue.server.ts b/apps/webapp/app/v3/runQueue.server.ts index 421445d006..7198456d39 100644 --- a/apps/webapp/app/v3/runQueue.server.ts +++ b/apps/webapp/app/v3/runQueue.server.ts @@ -12,3 +12,25 @@ export async function updateEnvConcurrencyLimits(environment: AuthenticatedEnvir ]); } +/** Updates MARQS and the RunQueue limits for a queue */ +export async function updateQueueConcurrencyLimits( + environment: AuthenticatedEnvironment, + queueName: string, + concurrency: number +) { + await Promise.allSettled([ + marqs?.updateQueueConcurrencyLimits(environment, queueName, concurrency), + engine.runQueue.updateQueueConcurrencyLimits(environment, queueName, concurrency), + ]); +} + +/** Removes MARQS and the RunQueue limits for a queue */ +export async function removeQueueConcurrencyLimits( + environment: AuthenticatedEnvironment, + queueName: string +) { + await Promise.allSettled([ + marqs?.removeQueueConcurrencyLimits(environment, queueName), + engine.runQueue.removeQueueConcurrencyLimits(environment, queueName), + ]); +} diff --git a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts index 5830ff8cf1..27c4d2b566 100644 --- a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts @@ -16,7 +16,11 @@ import { RegisterNextTaskScheduleInstanceService } from "./registerNextTaskSched import cronstrue from "cronstrue"; import { CheckScheduleService } from "./checkSchedule.server"; import { clampMaxDuration } from "../utils/maxDuration"; -import { updateEnvConcurrencyLimits } from "../runQueue.server"; +import { + removeQueueConcurrencyLimits, + updateEnvConcurrencyLimits, + updateQueueConcurrencyLimits, +} from "../runQueue.server"; export class CreateBackgroundWorkerService extends BaseService { public async call( @@ -203,13 +207,9 @@ export async function createBackgroundTasks( }); if (typeof taskQueue.concurrencyLimit === "number") { - await marqs?.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); + await updateQueueConcurrencyLimits(environment, taskQueue.name, taskQueue.concurrencyLimit); } else { - await marqs?.removeQueueConcurrencyLimits(environment, taskQueue.name); + await removeQueueConcurrencyLimits(environment, taskQueue.name); } } catch (error) { if (error instanceof Prisma.PrismaClientKnownRequestError) { From 086c1bfdc04232a521b673946700e75ddf736199 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 13 Dec 2024 15:24:42 +0000 Subject: [PATCH 272/485] taskQueue/concurrencyLimit changes ported back into the RunEngine --- .../run-engine/src/engine/index.ts | 40 +++++++++++++------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index e3f48fe9c7..6efb35cd0b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -363,8 +363,8 @@ export class RunEngine { if (queue) { const concurrencyLimit = typeof queue.concurrencyLimit === "number" - ? Math.max(0, queue.concurrencyLimit) - : undefined; + ? Math.max(Math.min(queue.concurrencyLimit, environment.maximumConcurrencyLimit), 0) + : null; let taskQueue = await prisma.taskQueue.findFirst({ where: { @@ -373,15 +373,33 @@ export class RunEngine { }, }); + const existingConcurrencyLimit = + typeof taskQueue?.concurrencyLimit === "number" + ? taskQueue.concurrencyLimit + : undefined; + if (taskQueue) { + if (existingConcurrencyLimit !== concurrencyLimit) { taskQueue = await prisma.taskQueue.update({ where: { id: taskQueue.id, }, data: { - concurrencyLimit, + concurrencyLimit: + typeof concurrencyLimit === "number" ? concurrencyLimit : null, }, }); + + if (typeof taskQueue.concurrencyLimit === "number") { + await this.runQueue.updateQueueConcurrencyLimits( + environment, + taskQueue.name, + taskQueue.concurrencyLimit + ); + } else { + await this.runQueue.removeQueueConcurrencyLimits(environment, taskQueue.name); + } + } } else { taskQueue = await prisma.taskQueue.create({ data: { @@ -393,16 +411,14 @@ export class RunEngine { type: "NAMED", }, }); - } - if (typeof taskQueue.concurrencyLimit === "number") { - await this.runQueue.updateQueueConcurrencyLimits( - environment, - taskQueue.name, - taskQueue.concurrencyLimit - ); - } else { - await this.runQueue.removeQueueConcurrencyLimits(environment, taskQueue.name); + if (typeof taskQueue.concurrencyLimit === "number") { + await this.runQueue.updateQueueConcurrencyLimits( + environment, + taskQueue.name, + taskQueue.concurrencyLimit + ); + } } } From e1177f71ad074ca22dbe200a4e84efaacda90b8f Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 15 Dec 2024 16:16:54 +0000 Subject: [PATCH 273/485] Reworked completing waitpoints to improve performance and reduce race conditions --- .../run-engine/src/engine/index.ts | 135 +++++++++------ .../src/engine/tests/triggerAndWait.test.ts | 5 + .../src/engine/tests/waitpoints.test.ts | 161 ++++++++++++++++++ 3 files changed, 250 insertions(+), 51 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 6efb35cd0b..7129c4f951 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -105,6 +105,13 @@ const workerCatalog = { }), visibilityTimeoutMs: 10_000, }, + tryContinueRunForCompletedWaitpoint: { + schema: z.object({ + runId: z.string(), + waitpointId: z.string(), + }), + visibilityTimeoutMs: 10_000, + }, }; type EngineWorker = Worker; @@ -174,6 +181,12 @@ export class RunEngine { tryCompleteBatch: async ({ payload }) => { await this.#tryCompleteBatch({ batchId: payload.batchId }); }, + tryContinueRunForCompletedWaitpoint: async ({ payload }) => { + await this.#tryContinueRunForCompletedWaitpoint({ + runId: payload.runId, + waitpointId: payload.waitpointId, + }); + }, }, }); @@ -1654,10 +1667,10 @@ export class RunEngine { throw new Error(`Waitpoint ${id} not found`); } - const updatedWaitpoint = await $transaction( + const result = await $transaction( this.prisma, async (tx) => { - // 1. Find the TaskRuns associated with this waitpoint + // 1. Find the TaskRuns blocked by this waitpoint const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ where: { waitpointId: id }, select: { taskRunId: true }, @@ -1686,60 +1699,31 @@ export class RunEngine { }, }); - // 4. Add the completed waitpoints to the snapshots - for (const run of affectedTaskRuns) { - await this.runLock.lock([run.taskRunId], 5_000, async (signal) => { - const latestSnapshot = await getLatestExecutionSnapshot(tx, run.taskRunId); - - await tx.taskRunExecutionSnapshot.update({ - where: { id: latestSnapshot.id }, - data: { - completedWaitpoints: { - connect: { id }, - }, - }, - }); - }); - } - - // 5. Check which of the affected TaskRuns now have no waitpoints - const taskRunsToResume = await tx.taskRun.findMany({ - where: { - id: { in: affectedTaskRuns.map((run) => run.taskRunId) }, - blockedByWaitpoints: { none: {} }, - }, - include: { - runtimeEnvironment: { - select: { - id: true, - type: true, - maximumConcurrencyLimit: true, - project: { select: { id: true } }, - organization: { select: { id: true } }, - }, - }, - }, - }); - - // 5. Continue the runs that have no more waitpoints - for (const run of taskRunsToResume) { - await this.#continueRun(run, run.runtimeEnvironment, tx); - } - - return updatedWaitpoint; + return { updatedWaitpoint, affectedTaskRuns }; }, (error) => { this.logger.error(`Error completing waitpoint ${id}, retrying`, { error }); throw error; - }, - { isolationLevel: Prisma.TransactionIsolationLevel.ReadCommitted } + } ); - if (!updatedWaitpoint) { + if (!result) { throw new Error(`Waitpoint couldn't be updated`); } - return updatedWaitpoint; + //schedule trying to continue the runs + for (const run of result.affectedTaskRuns) { + await this.worker.enqueue({ + //this will debounce the call + id: `tryContinueRunForCompletedWaitpoint:${run.taskRunId}`, + job: "tryContinueRunForCompletedWaitpoint", + payload: { runId: run.taskRunId, waitpointId: id }, + //50ms in the future + availableAt: new Date(Date.now() + 50), + }); + } + + return result.updatedWaitpoint; } async createCheckpoint({ @@ -2452,6 +2436,56 @@ export class RunEngine { }); } + async #tryContinueRunForCompletedWaitpoint({ + runId, + waitpointId, + }: { + runId: string; + waitpointId: string; + }) { + // 1. Add the completed waitpoint to the latest snapshot + // We need a runLock to prevent a new snapshot being created (without the completedWaitpoints) + await this.runLock.lock([runId], 5_000, async (signal) => { + const latestSnapshot = await getLatestExecutionSnapshot(this.prisma, runId); + + await this.prisma.taskRunExecutionSnapshot.update({ + where: { id: latestSnapshot.id }, + data: { + completedWaitpoints: { + connect: { id: waitpointId }, + }, + }, + }); + }); + + // 2. Check which of the affected TaskRuns now have no waitpoints + const run = await this.prisma.taskRun.findFirst({ + where: { + id: runId, + blockedByWaitpoints: { none: {} }, + }, + include: { + runtimeEnvironment: { + select: { + id: true, + type: true, + maximumConcurrencyLimit: true, + project: { select: { id: true } }, + organization: { select: { id: true } }, + }, + }, + }, + }); + + // 3. Run isn't totally unblocked + if (!run) { + return; + } + + // 4. Continue the runs that have no more waitpoints + await this.#continueRun(run, run.runtimeEnvironment, this.prisma); + } + async #continueRun( run: TaskRun, env: MinimalAuthenticatedEnvironment, @@ -2470,7 +2504,7 @@ export class RunEngine { //run is still executing, send a message to the worker if (isExecuting(snapshot.executionStatus)) { const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: run, + run, snapshot: { executionStatus: "EXECUTING", description: "Run was continued, whilst still executing.", @@ -2481,11 +2515,10 @@ export class RunEngine { //we reacquire the concurrency if it's still running because we're not going to be dequeuing (which also does this) await this.runQueue.reacquireConcurrency(env.organization.id, run.id); - //todo publish a notification in Redis that the Workers listen to - //this will cause the Worker to check for new execution snapshots for its runs + await this.#sendNotificationToWorker({ runId: run.id }); } else { const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run: run, + run, snapshot: { executionStatus: "QUEUED", description: "Run is QUEUED, because all waitpoints are completed.", diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 028d1bd6a8..3f9bae95b4 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -7,6 +7,7 @@ import { import { trace } from "@opentelemetry/api"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; +import { setTimeout } from "node:timers/promises"; describe("RunEngine triggerAndWait", () => { containerTest("triggerAndWait", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { @@ -176,6 +177,8 @@ describe("RunEngine triggerAndWait", () => { }); expect(runWaitpointAfter).toBeNull(); + await setTimeout(500); + //parent snapshot const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); assertNonNullable(parentExecutionDataAfter); @@ -421,6 +424,8 @@ describe("RunEngine triggerAndWait", () => { }); expect(parent2RunWaitpointAfter).toBeNull(); + await setTimeout(500); + //parent snapshot const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun1.id }); assertNonNullable(parentExecutionDataAfter); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index b3996d0491..0e0ee7abf5 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -8,6 +8,7 @@ import { trace } from "@opentelemetry/api"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; +import { EventBusEventArgs } from "../eventBus.js"; describe("RunEngine Waitpoints", () => { containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisContainer }) => { @@ -348,11 +349,22 @@ describe("RunEngine Waitpoints", () => { }); expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; + engine.eventBus.on("workerNotification", (result) => { + event = result; + }); + //complete the waitpoint await engine.completeWaitpoint({ id: waitpoint.id, }); + await setTimeout(200); + + assertNonNullable(event); + const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; + expect(notificationEvent.run.id).toBe(run.id); + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); @@ -489,4 +501,153 @@ describe("RunEngine Waitpoints", () => { } } ); + + containerTest( + "Race condition with multiple waitpoints completing simultaneously", + { timeout: 60_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + const iterationCount = 50; + + for (let i = 0; i < iterationCount; i++) { + const waitpointCount = 5; + + //create waitpoints + const waitpoints = await Promise.all( + Array.from({ length: waitpointCount }).map(() => + engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }) + ) + ); + + //block the run with them + await Promise.all( + waitpoints.map((waitpoint) => + engine.blockRunWithWaitpoint({ + runId: run.id, + waitpointId: waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }) + ) + ); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check there is a waitpoint blocking the parent run + const runWaitpointsBefore = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointsBefore.length).toBe(waitpointCount); + + //complete the waitpoints + await Promise.all( + waitpoints.map((waitpoint) => + engine.completeWaitpoint({ + id: waitpoint.id, + }) + ) + ); + + await setTimeout(500); + + //expect the run to be executing again + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + + //check there are no waitpoints blocking the parent run + const runWaitpoints = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoints.length).toBe(0); + } + } finally { + engine.quit(); + } + } + ); }); From b57e913f4410699a8f04705268f92598fae7f8b0 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 15 Dec 2024 16:43:27 +0000 Subject: [PATCH 274/485] Improved test robustness --- .../src/engine/tests/triggerAndWait.test.ts | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 3f9bae95b4..5192084b13 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -167,6 +167,8 @@ describe("RunEngine triggerAndWait", () => { expect(waitpointAfter?.status).toBe("COMPLETED"); expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); + await setTimeout(500); + const runWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ where: { taskRunId: parentRun.id, @@ -177,8 +179,6 @@ describe("RunEngine triggerAndWait", () => { }); expect(runWaitpointAfter).toBeNull(); - await setTimeout(500); - //parent snapshot const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun.id }); assertNonNullable(parentExecutionDataAfter); @@ -404,13 +404,12 @@ describe("RunEngine triggerAndWait", () => { expect(waitpointAfter?.status).toBe("COMPLETED"); expect(waitpointAfter?.output).toBe('{"foo":"bar"}'); + await setTimeout(500); + const parent1RunWaitpointAfter = await prisma.taskRunWaitpoint.findFirst({ where: { taskRunId: parentRun1.id, }, - include: { - waitpoint: true, - }, }); expect(parent1RunWaitpointAfter).toBeNull(); @@ -418,14 +417,9 @@ describe("RunEngine triggerAndWait", () => { where: { taskRunId: parentRun2.id, }, - include: { - waitpoint: true, - }, }); expect(parent2RunWaitpointAfter).toBeNull(); - await setTimeout(500); - //parent snapshot const parentExecutionDataAfter = await engine.getRunExecutionData({ runId: parentRun1.id }); assertNonNullable(parentExecutionDataAfter); From ab9e268a88d38e1f4fec8195b3ff50fd30fba85e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Sun, 15 Dec 2024 16:45:36 +0000 Subject: [PATCH 275/485] Down to a single run lock only when a run is totally unblocked and ready to continue --- .../run-engine/src/engine/index.ts | 108 +++++++----------- 1 file changed, 44 insertions(+), 64 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 7129c4f951..90e9ef56bd 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -105,10 +105,9 @@ const workerCatalog = { }), visibilityTimeoutMs: 10_000, }, - tryContinueRunForCompletedWaitpoint: { + continueRunIfUnblocked: { schema: z.object({ runId: z.string(), - waitpointId: z.string(), }), visibilityTimeoutMs: 10_000, }, @@ -181,10 +180,9 @@ export class RunEngine { tryCompleteBatch: async ({ payload }) => { await this.#tryCompleteBatch({ batchId: payload.batchId }); }, - tryContinueRunForCompletedWaitpoint: async ({ payload }) => { - await this.#tryContinueRunForCompletedWaitpoint({ + continueRunIfUnblocked: async ({ payload }) => { + await this.#continueRunIfUnblocked({ runId: payload.runId, - waitpointId: payload.waitpointId, }); }, }, @@ -1682,12 +1680,7 @@ export class RunEngine { }); } - // 2. Delete the TaskRunWaitpoint entries for this specific waitpoint - await tx.taskRunWaitpoint.deleteMany({ - where: { waitpointId: id }, - }); - - // 3. Update the waitpoint status + // 2. Update the waitpoint to completed const updatedWaitpoint = await tx.waitpoint.update({ where: { id }, data: { @@ -1715,9 +1708,9 @@ export class RunEngine { for (const run of result.affectedTaskRuns) { await this.worker.enqueue({ //this will debounce the call - id: `tryContinueRunForCompletedWaitpoint:${run.taskRunId}`, - job: "tryContinueRunForCompletedWaitpoint", - payload: { runId: run.taskRunId, waitpointId: id }, + id: `continueRunIfUnblocked:${run.taskRunId}`, + job: "continueRunIfUnblocked", + payload: { runId: run.taskRunId }, //50ms in the future availableAt: new Date(Date.now() + 50), }); @@ -2436,33 +2429,26 @@ export class RunEngine { }); } - async #tryContinueRunForCompletedWaitpoint({ - runId, - waitpointId, - }: { - runId: string; - waitpointId: string; - }) { - // 1. Add the completed waitpoint to the latest snapshot - // We need a runLock to prevent a new snapshot being created (without the completedWaitpoints) - await this.runLock.lock([runId], 5_000, async (signal) => { - const latestSnapshot = await getLatestExecutionSnapshot(this.prisma, runId); - - await this.prisma.taskRunExecutionSnapshot.update({ - where: { id: latestSnapshot.id }, - data: { - completedWaitpoints: { - connect: { id: waitpointId }, - }, + async #continueRunIfUnblocked({ runId }: { runId: string }) { + // 1. Get the any blocking waitpoints + const blockingWaitpoints = await this.prisma.taskRunWaitpoint.findMany({ + where: { taskRunId: runId }, + select: { + waitpoint: { + select: { id: true, status: true }, }, - }); + }, }); - // 2. Check which of the affected TaskRuns now have no waitpoints + // 2. There are blockers still, so do nothing + if (blockingWaitpoints.some((w) => w.waitpoint.status !== "COMPLETED")) { + return; + } + + // 3. Get the run with environment const run = await this.prisma.taskRun.findFirst({ where: { id: runId, - blockedByWaitpoints: { none: {} }, }, include: { runtimeEnvironment: { @@ -2477,65 +2463,59 @@ export class RunEngine { }, }); - // 3. Run isn't totally unblocked if (!run) { - return; + throw new Error(`#continueRunIfUnblocked: run not found: ${runId}`); } - // 4. Continue the runs that have no more waitpoints - await this.#continueRun(run, run.runtimeEnvironment, this.prisma); - } - - async #continueRun( - run: TaskRun, - env: MinimalAuthenticatedEnvironment, - tx?: PrismaClientOrTransaction - ) { - const prisma = tx ?? this.prisma; - - await this.runLock.lock([run.id], 5000, async (signal) => { - const snapshot = await getLatestExecutionSnapshot(prisma, run.id); - - const completedWaitpoints = await getExecutionSnapshotCompletedWaitpoints( - prisma, - snapshot.id - ); + //4. Continue the run whether it's executing or not + await this.runLock.lock([runId], 5000, async (signal) => { + const snapshot = await getLatestExecutionSnapshot(this.prisma, runId); //run is still executing, send a message to the worker if (isExecuting(snapshot.executionStatus)) { - const newSnapshot = await this.#createExecutionSnapshot(prisma, { - run, + const newSnapshot = await this.#createExecutionSnapshot(this.prisma, { + run: { + id: runId, + status: snapshot.runStatus, + attemptNumber: snapshot.attemptNumber, + }, snapshot: { executionStatus: "EXECUTING", description: "Run was continued, whilst still executing.", }, - completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), + completedWaitpointIds: blockingWaitpoints.map((b) => b.waitpoint.id), }); //we reacquire the concurrency if it's still running because we're not going to be dequeuing (which also does this) - await this.runQueue.reacquireConcurrency(env.organization.id, run.id); + await this.runQueue.reacquireConcurrency(run.runtimeEnvironment.organization.id, runId); - await this.#sendNotificationToWorker({ runId: run.id }); + await this.#sendNotificationToWorker({ runId: runId }); } else { - const newSnapshot = await this.#createExecutionSnapshot(prisma, { + const newSnapshot = await this.#createExecutionSnapshot(this.prisma, { run, snapshot: { executionStatus: "QUEUED", description: "Run is QUEUED, because all waitpoints are completed.", }, - completedWaitpointIds: completedWaitpoints.map((waitpoint) => waitpoint.id), + completedWaitpointIds: blockingWaitpoints.map((b) => b.waitpoint.id), }); //put it back in the queue, with the original timestamp (w/ priority) //this prioritizes dequeuing waiting runs over new runs await this.#enqueueRun({ run, - env, + env: run.runtimeEnvironment, timestamp: run.createdAt.getTime() - run.priorityMs, - tx: prisma, }); } }); + + //5. Remove the blocking waitpoints + await this.prisma.taskRunWaitpoint.deleteMany({ + where: { + taskRunId: runId, + }, + }); } async #queueRunsWaitingForWorker({ backgroundWorkerId }: { backgroundWorkerId: string }) { From 872cea389130868b12d2b3c7c8ff4fb78762fb29 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 15 Dec 2024 18:51:38 +0000 Subject: [PATCH 276/485] warm starts, worker notifications, wait fixes --- ...ployments.$deploymentFriendlyId.dequeue.ts | 58 +++ ...Id.snapshots.$snapshotId.attempts.start.ts | 8 +- ...nId.snapshots.$snapshotId.wait.duration.ts | 32 ++ ...er-actions.runs.$runId.snapshots.latest.ts | 28 ++ apps/webapp/app/v3/handleSocketIo.server.ts | 159 ++++++-- .../webapp/app/v3/runEngineHandlers.server.ts | 3 +- .../worker/workerGroupTokenService.server.ts | 110 ++++- .../run-engine/src/engine/index.ts | 12 +- .../src/entryPoints/managed-run-controller.ts | 381 ++++++++++++++++-- .../entryPoints/unmanaged-run-controller.ts | 6 +- packages/core/src/v3/schemas/runEngine.ts | 4 + packages/worker/package.json | 2 + packages/worker/src/client/util.ts | 14 - packages/worker/src/client/websocket.ts | 52 --- packages/worker/src/index.ts | 9 +- packages/worker/src/messages.ts | 80 ---- .../worker/src/{ => supervisor}/events.ts | 8 + .../worker/src/{client => supervisor}/http.ts | 65 ++- .../src/{ => supervisor}/queueConsumer.ts | 10 +- .../worker/src/{ => supervisor}/schemas.ts | 42 +- .../session.ts} | 100 ++--- .../src/{client => supervisor}/types.ts | 2 +- packages/worker/src/supervisor/util.ts | 40 ++ packages/worker/src/types.ts | 22 + packages/worker/src/workload/http.ts | 59 ++- packages/worker/src/workload/schemas.ts | 55 +++ packages/worker/src/workload/websocket.ts | 52 --- pnpm-lock.yaml | 10 +- 28 files changed, 1054 insertions(+), 369 deletions(-) create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts create mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts delete mode 100644 packages/worker/src/client/util.ts delete mode 100644 packages/worker/src/client/websocket.ts delete mode 100644 packages/worker/src/messages.ts rename packages/worker/src/{ => supervisor}/events.ts (88%) rename packages/worker/src/{client => supervisor}/http.ts (72%) rename packages/worker/src/{ => supervisor}/queueConsumer.ts (84%) rename packages/worker/src/{ => supervisor}/schemas.ts (68%) rename packages/worker/src/{workerSession.ts => supervisor/session.ts} (51%) rename packages/worker/src/{client => supervisor}/types.ts (73%) create mode 100644 packages/worker/src/supervisor/util.ts create mode 100644 packages/worker/src/types.ts create mode 100644 packages/worker/src/workload/schemas.ts delete mode 100644 packages/worker/src/workload/websocket.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts new file mode 100644 index 0000000000..2e48ee75be --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -0,0 +1,58 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; +import { z } from "zod"; +import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; +import { $replica, prisma } from "~/db.server"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute( + { + params: z.object({ + deploymentFriendlyId: z.string(), + }), + }, + async ({ authenticatedWorker, params }): Promise> => { + const deployment = await $replica.workerDeployment.findUnique({ + where: { + friendlyId: params.deploymentFriendlyId, + }, + include: { + worker: true, + }, + }); + + if (!deployment) { + throw new Error("Deployment not found"); + } + + if (!deployment.worker) { + throw new Error("Worker not found"); + } + + const dequeuedMessages = (await isCurrentDeployment(deployment.id, deployment.environmentId)) + ? await authenticatedWorker.dequeueFromEnvironment( + deployment.worker.id, + deployment.environmentId + ) + : await authenticatedWorker.dequeueFromVersion(deployment.worker.id); + + return json(dequeuedMessages); + } +); + +async function isCurrentDeployment(deploymentId: string, environmentId: string): Promise { + const promotion = await prisma.workerDeploymentPromotion.findUnique({ + where: { + environmentId_label: { + environmentId, + label: CURRENT_DEPLOYMENT_LABEL, + }, + }, + }); + + if (!promotion) { + return false; + } + + return promotion.deploymentId === deploymentId; +} diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts index 37cd18c63c..cfddbe10d3 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts @@ -1,10 +1,14 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiRunAttemptStartResponseBody } from "@trigger.dev/worker"; +import { + WorkerApiRunAttemptStartRequestBody, + WorkerApiRunAttemptStartResponseBody, +} from "@trigger.dev/worker"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( { + body: WorkerApiRunAttemptStartRequestBody, params: z.object({ runId: z.string(), snapshotId: z.string(), @@ -12,6 +16,7 @@ export const action = createActionWorkerApiRoute( }, async ({ authenticatedWorker, + body, params, }): Promise> => { const { runId, snapshotId } = params; @@ -19,6 +24,7 @@ export const action = createActionWorkerApiRoute( const runExecutionData = await authenticatedWorker.startRunAttempt({ runId, snapshotId, + isWarmStart: body.isWarmStart, }); return json(runExecutionData); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts new file mode 100644 index 0000000000..d2b645c097 --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts @@ -0,0 +1,32 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { + WorkerApiWaitForDurationRequestBody, + WorkerApiWaitForDurationResponseBody, +} from "@trigger.dev/worker"; +import { z } from "zod"; +import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; + +export const action = createActionWorkerApiRoute( + { + body: WorkerApiWaitForDurationRequestBody, + params: z.object({ + runId: z.string(), + snapshotId: z.string(), + }), + }, + async ({ + authenticatedWorker, + body, + params, + }): Promise> => { + const { runId, snapshotId } = params; + + const waitResult = await authenticatedWorker.waitForDuration({ + runId, + snapshotId, + date: body.date, + }); + + return json(waitResult); + } +); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts new file mode 100644 index 0000000000..7e2f9c3abf --- /dev/null +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts @@ -0,0 +1,28 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WorkerApiRunLatestSnapshotResponseBody } from "@trigger.dev/worker"; +import { z } from "zod"; +import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; + +export const loader = createLoaderWorkerApiRoute( + { + params: z.object({ + runId: z.string(), + }), + }, + async ({ + authenticatedWorker, + params, + }): Promise> => { + const { runId } = params; + + const executionData = await authenticatedWorker.getLatestSnapshot({ + runId, + }); + + if (!executionData) { + throw new Error("Failed to retrieve latest snapshot"); + } + + return json({ execution: executionData }); + } +); diff --git a/apps/webapp/app/v3/handleSocketIo.server.ts b/apps/webapp/app/v3/handleSocketIo.server.ts index b041def01a..8c35617ddb 100644 --- a/apps/webapp/app/v3/handleSocketIo.server.ts +++ b/apps/webapp/app/v3/handleSocketIo.server.ts @@ -8,7 +8,7 @@ import { SharedQueueToClientMessages, } from "@trigger.dev/core/v3"; import { ZodNamespace } from "@trigger.dev/core/v3/zodNamespace"; -import { Server, Socket } from "socket.io"; +import { Namespace, Server, Socket } from "socket.io"; import { env } from "~/env.server"; import { singleton } from "~/utils/singleton"; import { SharedSocketConnection } from "./sharedSocketConnection"; @@ -26,6 +26,7 @@ import { CrashTaskRunService } from "./services/crashTaskRun.server"; import { CreateTaskRunAttemptService } from "./services/createTaskRunAttempt.server"; import { UpdateFatalRunErrorService } from "./services/updateFatalRunError.server"; import { WorkerGroupTokenService } from "./services/worker/workerGroupTokenService.server"; +import type { WorkerClientToServerEvents, WorkerServerToClientEvents } from "@trigger.dev/worker"; export const socketIo = singleton("socketIo", initalizeIoServer); @@ -382,55 +383,139 @@ function headersFromHandshake(handshake: Socket["handshake"]) { } function createWorkerNamespace(io: Server) { - const provider = new ZodNamespace({ - // @ts-ignore - for some reason the built ZodNamespace Server type is not compatible with the Server type here, but only when doing typechecking - io, - name: "worker", - clientMessages: ProviderToPlatformMessages, - serverMessages: PlatformToProviderMessages, - preAuth: async (socket, next, logger) => { + const worker: Namespace = + io.of("/worker"); + + worker.use(async (socket, next) => { + try { + const headers = headersFromHandshake(socket.handshake); + + logger.debug("Worker authentication", { + socketId: socket.id, + headers: Object.fromEntries(headers), + }); + const request = new Request("https://example.com", { - headers: headersFromHandshake(socket.handshake), + headers, }); const tokenService = new WorkerGroupTokenService(); const authenticatedInstance = await tokenService.authenticate(request); if (!authenticatedInstance) { - logger.error("authentication failed", { handshake: socket.handshake }); - next(new Error("unauthorized")); - return; + throw new Error("unauthorized"); } - logger.debug("authentication succeeded", { authenticatedInstance }); - next(); - }, - handlers: { - WORKER_CRASHED: async (message) => { - try { - if (message.overrideCompletion) { - const updateErrorService = new UpdateFatalRunErrorService(); - await updateErrorService.call(message.runId, { ...message }); - } else { - const crashRunService = new CrashTaskRunService(); - await crashRunService.call(message.runId, { ...message }); - } - } catch (error) { - logger.error("Error while handling crashed worker", { error }); + } catch (error) { + logger.error("Worker authentication failed", { + error: error instanceof Error ? error.message : error, + }); + + socket.disconnect(true); + } + }); + + worker.on("connection", async (socket) => { + logger.debug("worker connected", { socketId: socket.id }); + + const rooms = new Set(); + + const interval = setInterval(() => { + logger.debug("Rooms for socket", { + socketId: socket.id, + rooms: Array.from(rooms), + }); + }, 5000); + + socket.on("disconnect", (reason, description) => { + logger.debug("worker disconnected", { + socketId: socket.id, + reason, + description, + }); + clearInterval(interval); + }); + + socket.on("disconnecting", (reason, description) => { + logger.debug("worker disconnecting", { + socketId: socket.id, + reason, + description, + }); + clearInterval(interval); + }); + + socket.on("error", (error) => { + logger.error("worker error", { + socketId: socket.id, + error: JSON.parse(JSON.stringify(error)), + }); + clearInterval(interval); + }); + + socket.on("run:subscribe", async ({ version, runIds }) => { + logger.debug("run:subscribe", { version, runIds }); + + const settledResult = await Promise.allSettled( + runIds.map((runId) => { + const room = roomFromRunId(runId); + + logger.debug("Joining room", { room }); + + socket.join(room); + rooms.add(room); + }) + ); + + for (const result of settledResult) { + if (result.status === "rejected") { + logger.error("Error joining room", { + runIds, + error: result.reason instanceof Error ? result.reason.message : result.reason, + }); } - }, - INDEXING_FAILED: async (message) => { - try { - const service = new DeploymentIndexFailed(); + } - await service.call(message.deploymentId, message.error, message.overrideCompletion); - } catch (e) { - logger.error("Error while indexing", { error: e }); + logger.debug("Rooms for socket after subscribe", { + socketId: socket.id, + rooms: Array.from(rooms), + }); + }); + + socket.on("run:unsubscribe", async ({ version, runIds }) => { + logger.debug("run:unsubscribe", { version, runIds }); + + const settledResult = await Promise.allSettled( + runIds.map((runId) => { + const room = roomFromRunId(runId); + + logger.debug("Leaving room", { room }); + + socket.leave(room); + rooms.delete(room); + }) + ); + + for (const result of settledResult) { + if (result.status === "rejected") { + logger.error("Error leaving room", { + runIds, + error: result.reason instanceof Error ? result.reason.message : result.reason, + }); } - }, - }, + } + + logger.debug("Rooms for socket after unsubscribe", { + socketId: socket.id, + rooms: Array.from(rooms), + }); + }); }); - return provider.namespace; + return worker; +} + +function roomFromRunId(runId: string) { + return `run:${runId}`; } diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index fedfad2417..7410ecf9ec 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -285,8 +285,7 @@ export function registerRunEngineEventBusHandlers() { logger.debug("[workerNotification] Notifying worker", { time, runId: run.id }); try { - // TODO: re-enable this - // socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); + socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); } catch (error) { logger.error("[workerNotification] Failed to notify worker", { error: error instanceof Error ? error.message : error, diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index e4d363ef81..32ddfe4594 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -3,6 +3,7 @@ import { WithRunEngine, WithRunEngineOptions } from "../baseService.server"; import { createHash, timingSafeEqual } from "crypto"; import { logger } from "~/services/logger.server"; import { + Prisma, RuntimeEnvironment, WorkerInstanceGroup, WorkerInstanceGroupType, @@ -21,7 +22,6 @@ import { import { env } from "~/env.server"; import { $transaction } from "~/db.server"; import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; -import { EnvironmentVariable } from "~/v3/environmentVariables/repository"; import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; @@ -290,18 +290,50 @@ export class WorkerGroupTokenService extends WithRunEngine { ); } - return tx.workerInstance.create({ - data: { - workerGroupId: workerGroup.id, - name: instanceName, - resourceIdentifier, - }, - include: { - // This will always be empty for shared worker instances, but required for types - deployment: true, - environment: true, - }, - }); + try { + const newWorkerInstance = await tx.workerInstance.create({ + data: { + workerGroupId: workerGroup.id, + name: instanceName, + resourceIdentifier, + }, + include: { + // This will always be empty for shared worker instances, but required for types + deployment: true, + environment: true, + }, + }); + return newWorkerInstance; + } catch (error) { + // Gracefully handle race conditions when connecting for the first time + if (error instanceof Prisma.PrismaClientKnownRequestError) { + // Unique constraint violation + if (error.code === "P2002") { + try { + const existingWorkerInstance = await tx.workerInstance.findUnique({ + where: { + workerGroupId_resourceIdentifier: { + workerGroupId: workerGroup.id, + resourceIdentifier, + }, + }, + include: { + deployment: true, + environment: true, + }, + }); + return existingWorkerInstance; + } catch (error) { + logger.error("[WorkerGroupTokenService] Failed to find worker instance", { + workerGroup, + workerInstance, + deploymentId, + }); + return; + } + } + } + } } if (!workerGroup.projectId || !workerGroup.organizationId) { @@ -531,6 +563,46 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } + /** Allows managed workers to dequeue from a specific version */ + async dequeueFromVersion( + backgroundWorkerId: string, + maxRunCount = 1 + ): Promise { + if (this.type !== WorkerInstanceGroupType.MANAGED) { + logger.error("[AuthenticatedWorkerInstance] Worker instance is not managed", { + ...this.toJSON(), + }); + return []; + } + + return await this._engine.dequeueFromBackgroundWorkerMasterQueue({ + consumerId: this.workerInstanceId, + backgroundWorkerId, + maxRunCount, + }); + } + + /** Allows managed workers to dequeue from a specific environment */ + async dequeueFromEnvironment( + backgroundWorkerId: string, + environmentId: string, + maxRunCount = 1 + ): Promise { + if (this.type !== WorkerInstanceGroupType.MANAGED) { + logger.error("[AuthenticatedWorkerInstance] Worker instance is not managed", { + ...this.toJSON(), + }); + return []; + } + + return await this._engine.dequeueFromEnvironmentMasterQueue({ + consumerId: this.workerInstanceId, + backgroundWorkerId, + environmentId, + maxRunCount, + }); + } + async heartbeatWorkerInstance() { await this._prisma.workerInstance.update({ where: { @@ -552,12 +624,20 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { return await this._engine.heartbeatRun({ runId, snapshotId }); } - async startRunAttempt({ runId, snapshotId }: { runId: string; snapshotId: string }): Promise< + async startRunAttempt({ + runId, + snapshotId, + isWarmStart, + }: { + runId: string; + snapshotId: string; + isWarmStart?: boolean; + }): Promise< StartRunAttemptResult & { envVars: Record; } > { - const engineResult = await this._engine.startRunAttempt({ runId, snapshotId }); + const engineResult = await this._engine.startRunAttempt({ runId, snapshotId, isWarmStart }); const defaultMachinePreset = { name: "small-1x", diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 90e9ef56bd..3a8a956043 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -783,6 +783,10 @@ export class RunEngine { id: result.worker.id, version: result.worker.version, }, + deployment: { + id: result.deployment?.id, + friendlyId: result.deployment?.friendlyId, + }, run: { id: lockedTaskRun.id, friendlyId: lockedTaskRun.friendlyId, @@ -802,7 +806,7 @@ export class RunEngine { project: { id: lockedTaskRun.projectId, }, - }; + } satisfies DequeuedMessage; }); if (dequeuedRun !== null) { @@ -903,10 +907,12 @@ export class RunEngine { async startRunAttempt({ runId, snapshotId, + isWarmStart, tx, }: { runId: string; snapshotId: string; + isWarmStart?: boolean; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; @@ -1044,7 +1050,9 @@ export class RunEngine { run, snapshot: { executionStatus: "EXECUTING", - description: "Attempt created, starting execution", + description: `Attempt created, starting execution${ + isWarmStart ? " (warm start)" : "" + }`, }, }); diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 03c81e6518..4d161047ab 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -1,14 +1,26 @@ import { logger } from "../utilities/logger.js"; -import { TaskRunProcess } from "../executions/taskRunProcess.js"; +import { OnWaitMessage, TaskRunProcess } from "../executions/taskRunProcess.js"; import { env as stdEnv } from "std-env"; import { z } from "zod"; import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; -import { HeartbeatService, WorkerManifest } from "@trigger.dev/core/v3"; -import { WorkloadHttpClient, type WorkloadRunAttemptStartResponseBody } from "@trigger.dev/worker"; +import { + DequeuedMessage, + HeartbeatService, + RunExecutionData, + WorkerManifest, +} from "@trigger.dev/core/v3"; +import { + WORKLOAD_HEADER_NAME, + WorkloadClientToServerEvents, + WorkloadHttpClient, + WorkloadServerToClientEvents, + type WorkloadRunAttemptStartResponseBody, +} from "@trigger.dev/worker"; import { assertExhaustive } from "../utilities/assertExhaustive.js"; import { setTimeout as wait } from "timers/promises"; +import { io, Socket } from "socket.io-client"; const Env = z.object({ TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), @@ -25,6 +37,9 @@ const Env = z.object({ NODE_ENV: z.string().default("production"), NODE_EXTRA_CA_CERTS: z.string().optional(), OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), + TRIGGER_WARM_START_URL: z.string().optional(), + TRIGGER_MACHINE_CPU: z.string().default("0"), + TRIGGER_MACHINE_MEMORY: z.string().default("0"), }); const env = Env.parse(stdEnv); @@ -40,10 +55,17 @@ class ManagedRunController { private taskRunProcess?: TaskRunProcess; private workerManifest: WorkerManifest; + private readonly httpClient: WorkloadHttpClient; + + private socket?: Socket; + private readonly heartbeatService: HeartbeatService; private readonly heartbeatIntervalSeconds: number; + private readonly snapshotPollService: HeartbeatService; + private readonly snapshotPollIntervalSeconds: number; + private runId?: string; private snapshotId?: string; @@ -53,6 +75,7 @@ class ManagedRunController { this.workerManifest = opts.workerManifest; // TODO: This should be dynamic and set by (or at least overridden by) the managed worker / platform this.heartbeatIntervalSeconds = opts.heartbeatIntervalSeconds || 30; + this.snapshotPollIntervalSeconds = 5; this.runId = env.TRIGGER_RUN_ID; this.snapshotId = env.TRIGGER_SNAPSHOT_ID; @@ -62,6 +85,47 @@ class ManagedRunController { deploymentId: env.TRIGGER_DEPLOYMENT_ID, }); + this.snapshotPollService = new HeartbeatService({ + heartbeat: async () => { + if (!this.runId) { + logger.debug("[ManagedRunController] Skipping snapshot poll, no run ID"); + return; + } + + console.debug("[ManagedRunController] Polling for latest snapshot"); + + const response = await this.httpClient.getRunExecutionData(this.runId); + + if (!response.success) { + console.error("[ManagedRunController] Snapshot poll failed", { error: response.error }); + return; + } + + const { snapshot } = response.data.execution; + + if (snapshot.id === this.snapshotId) { + console.debug("[ManagedRunController] Snapshot not changed", { + snapshotId: this.snapshotId, + }); + return; + } + + console.log("Snapshot changed", { + oldSnapshotId: this.snapshotId, + newSnapshotId: snapshot.id, + }); + + this.snapshotId = snapshot.id; + + await this.handleSnapshotChange(response.data.execution); + }, + intervalMs: this.snapshotPollIntervalSeconds * 1000, + leadingEdge: false, + onError: async (error) => { + console.error("[ManagedRunController] Failed to poll for snapshot", { error }); + }, + }); + this.heartbeatService = new HeartbeatService({ heartbeat: async () => { if (!this.runId || !this.snapshotId) { @@ -93,7 +157,43 @@ class ManagedRunController { }); } - private async startAndExecuteRunAttempt() { + private async handleSnapshotChange({ run, snapshot, completedWaitpoints }: RunExecutionData) { + console.log("Got latest snapshot", { snapshot, currentSnapshotId: this.snapshotId }); + + this.snapshotId = snapshot.id; + + switch (snapshot.executionStatus) { + case "PENDING_CANCEL": { + try { + await this.cancelAttempt(run.id); + } catch (error) { + console.error("Failed to cancel attempt, shutting down", { + error, + }); + process.exit(1); + } + break; + } + case "FINISHED": { + console.log("Run is finished, shutting down shortly"); + return; + } + default: { + console.log("Status change not handled yet", { status: snapshot.executionStatus }); + // assertExhaustive(snapshot.executionStatus); + break; + } + } + + if (completedWaitpoints.length > 0) { + console.log("Got completed waitpoints", { completedWaitpoints }); + completedWaitpoints.forEach((waitpoint) => { + this.taskRunProcess?.waitpointCompleted(waitpoint); + }); + } + } + + private async startAndExecuteRunAttempt(isWarmStart = false) { if (!this.runId || !this.snapshotId) { logger.debug("[ManagedRunController] Missing run ID or snapshot ID", { runId: this.runId, @@ -102,19 +202,34 @@ class ManagedRunController { process.exit(1); } - const start = await this.httpClient.startRunAttempt(this.runId, this.snapshotId); + if (!this.socket) { + console.warn("[ManagedRunController] Starting run without socket connection"); + } + + this.socket?.emit("run:start", { + version: "1", + run: { id: this.runId }, + snapshot: { id: this.snapshotId }, + }); + + const start = await this.httpClient.startRunAttempt(this.runId, this.snapshotId, { + isWarmStart, + }); if (!start.success) { console.error("[ManagedRunController] Failed to start run", { error: start.error }); process.exit(1); } + const { run, snapshot, execution, envVars } = start.data; + logger.debug("[ManagedRunController] Started run", { - runId: start.data.run.id, - snapshot: start.data.snapshot.id, + runId: run.id, + snapshot: snapshot.id, }); - const { run, snapshot, execution, envVars } = start.data; + this.runId = run.id; + this.snapshotId = snapshot.id; const taskRunEnv = { ...gatherProcessEnv(), @@ -128,7 +243,13 @@ class ManagedRunController { error, }); - const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + console.log("Submitting attempt completion", { + runId: run.id, + snapshotId: snapshot.id, + updatedSnapshotId: this.snapshotId, + }); + + const completionResult = await this.httpClient.completeRunAttempt(run.id, this.snapshotId, { completion: { id: execution.run.id, ok: false, @@ -144,23 +265,99 @@ class ManagedRunController { process.exit(1); } - logger.log("completed run", completionResult.data.result); + logger.log("Attempt completion submitted", completionResult.data.result); + } finally { + this.runId = undefined; + this.snapshotId = undefined; + + this.waitForNextRun(); } } - async start() { - logger.debug("[ManagedRunController] Starting up"); + private async waitForNextRun() { + try { + const warmStartUrl = new URL( + "/warm-start", + env.TRIGGER_WARM_START_URL ?? env.TRIGGER_WORKER_API_URL + ); + + const res = await longPoll( + warmStartUrl.href, + { + method: "GET", + headers: { + "x-trigger-workload-controller-id": env.TRIGGER_WORKLOAD_CONTROLLER_ID, + "x-trigger-deployment-id": env.TRIGGER_DEPLOYMENT_ID, + "x-trigger-deployment-version": env.TRIGGER_DEPLOYMENT_VERSION, + "x-trigger-machine-cpu": env.TRIGGER_MACHINE_CPU, + "x-trigger-machine-memory": env.TRIGGER_MACHINE_MEMORY, + }, + }, + { + timeoutMs: 10_000, + totalDurationMs: 60_000, + } + ); - // TODO: remove this after testing - setTimeout(() => { - // exit after 5 minutes - console.error("[ManagedRunController] Exiting after 5 minutes"); + if (!res.ok) { + console.error("Failed to poll for next run", { error: res.error }); + process.exit(0); + } + + const nextRun = DequeuedMessage.parse(res.data); + + console.log("Got next run", { nextRun }); + + this.runId = nextRun.run.id; + this.snapshotId = nextRun.snapshot.id; + + this.startAndExecuteRunAttempt(true); + } catch (error) { + console.error("Unexpected error while polling for next run", { error }); process.exit(1); - }, 60 * 5000); + } + } - this.heartbeatService.start(); + createSocket() { + const wsUrl = new URL(env.TRIGGER_WORKER_API_URL); + wsUrl.pathname = "/workload"; - this.startAndExecuteRunAttempt(); + this.socket = io(wsUrl.href, { + transports: ["websocket"], + extraHeaders: { + [WORKLOAD_HEADER_NAME.WORKLOAD_DEPLOYMENT_ID]: env.TRIGGER_DEPLOYMENT_ID, + }, + }); + this.socket.on("run:notify", async ({ version, run }) => { + console.log("[ManagedRunController] Received run notification", { version, run }); + + if (run.id !== this.runId) { + console.log("[ManagedRunController] Ignoring notification for different run", { + runId: run.id, + currentRunId: this.runId, + currentSnapshotId: this.snapshotId, + }); + return; + } + + const latestSnapshot = await this.httpClient.getRunExecutionData(run.id); + + if (!latestSnapshot.success) { + console.error("Failed to get latest snapshot data", latestSnapshot.error); + return; + } + + await this.handleSnapshotChange(latestSnapshot.data.execution); + }); + this.socket.on("connect", () => { + console.log("[ManagedRunController] Connected to platform"); + }); + this.socket.on("connect_error", (error) => { + console.error("[ManagedRunController] Connection error", { error }); + }); + this.socket.on("disconnect", (reason, description) => { + console.log("[ManagedRunController] Disconnected from platform", { reason, description }); + }); } private async executeRun({ @@ -184,6 +381,8 @@ class ManagedRunController { messageId: run.id, }); + this.taskRunProcess.onWait.attach(this.handleWait.bind(this)); + await this.taskRunProcess.initialize(); logger.log("executing task run process", { @@ -203,7 +402,15 @@ class ManagedRunController { }); } - const completionResult = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { + if (!this.runId || !this.snapshotId) { + console.error("Missing run ID or snapshot ID after execution", { + runId: this.runId, + snapshotId: this.snapshotId, + }); + process.exit(1); + } + + const completionResult = await this.httpClient.completeRunAttempt(run.id, this.snapshotId, { completion, }); @@ -218,21 +425,22 @@ class ManagedRunController { const { attemptStatus } = completionResult.data.result; + this.runId = completionResult.data.result.run.id; this.snapshotId = completionResult.data.result.snapshot.id; if (attemptStatus === "RUN_FINISHED") { - logger.debug("Run finished, shutting down"); - process.exit(0); + logger.debug("Run finished"); + return; } if (attemptStatus === "RUN_PENDING_CANCEL") { - logger.debug("Run pending cancel, shutting down"); - process.exit(0); + logger.debug("Run pending cancel"); + return; } if (attemptStatus === "RETRY_QUEUED") { - logger.debug("Retry queued, shutting down"); - process.exit(0); + logger.debug("Retry queued"); + return; } if (attemptStatus === "RETRY_IMMEDIATELY") { @@ -253,6 +461,59 @@ class ManagedRunController { assertExhaustive(attemptStatus); } + private async handleWait({ wait }: OnWaitMessage) { + if (!this.runId || !this.snapshotId) { + logger.debug("[ManagedRunController] Ignoring wait, no run ID or snapshot ID"); + return; + } + + switch (wait.type) { + case "DATETIME": { + logger.log("Waiting for duration", { wait }); + + const waitpoint = await this.httpClient.waitForDuration(this.runId, this.snapshotId, { + date: wait.date, + }); + + if (!waitpoint.success) { + console.error("Failed to wait for datetime", { error: waitpoint.error }); + return; + } + + logger.log("Waitpoint created", { waitpointData: waitpoint.data }); + + this.taskRunProcess?.waitpointCreated(wait.id, waitpoint.data.waitpoint.id); + + break; + } + default: { + console.error("Wait type not implemented", { wait }); + } + } + } + + async cancelAttempt(runId: string) { + logger.log("cancelling attempt", { runId }); + + await this.taskRunProcess?.cancel(); + } + + async start() { + logger.debug("[ManagedRunController] Starting up"); + + // TODO: remove this after testing + setTimeout(() => { + console.error("[ManagedRunController] Exiting after 5 minutes"); + process.exit(1); + }, 60 * 5000); + + this.heartbeatService.start(); + this.createSocket(); + + this.startAndExecuteRunAttempt(); + this.snapshotPollService.start(); + } + async stop() { logger.debug("[ManagedRunController] Shutting down"); @@ -261,6 +522,7 @@ class ManagedRunController { } this.heartbeatService.stop(); + this.socket?.close(); } } @@ -286,3 +548,70 @@ async function loadWorkerManifest() { const manifest = await readJSONFile("./index.json"); return WorkerManifest.parse(manifest); } + +const longPoll = async ( + url: string, + requestInit: Omit, + { + timeoutMs, + totalDurationMs, + }: { + timeoutMs: number; + totalDurationMs: number; + } +): Promise< + | { + ok: true; + data: T; + } + | { + ok: false; + error: string; + } +> => { + const endTime = Date.now() + totalDurationMs; + + while (Date.now() < endTime) { + try { + const controller = new AbortController(); + const signal = controller.signal; + + // TODO: Think about using a random timeout instead + const timeoutId = setTimeout(() => controller.abort(), timeoutMs); + + const response = await fetch(url, { ...requestInit, signal }); + + clearTimeout(timeoutId); + + if (response.ok) { + const data = await response.json(); + + return { + ok: true, + data, + }; + } else { + return { + ok: false, + error: `Server error: ${response.status}`, + }; + } + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + console.log("Request timed out, retrying..."); + continue; + } else { + console.error("Error during fetch, retrying...", error); + + // TODO: exponential backoff + await wait(1000); + continue; + } + } + } + + return { + ok: false, + error: "TotalDurationExceeded", + }; +}; diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index ab35ef7638..26345e5b5a 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -6,7 +6,7 @@ import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; import { WorkerManifest } from "@trigger.dev/core/v3"; -import { WorkerSession } from "@trigger.dev/worker"; +import { SupervisorSession } from "@trigger.dev/worker"; const Env = z.object({ TRIGGER_API_URL: z.string().default(CLOUD_API_URL), @@ -26,11 +26,11 @@ logger.loggerLevel = "debug"; logger.debug("Creating unmanaged worker", { env }); class UnmanagedRunController { - private readonly session: WorkerSession; + private readonly session: SupervisorSession; private taskRunProcess?: TaskRunProcess; constructor(private workerManifest: WorkerManifest) { - this.session = new WorkerSession({ + this.session = new SupervisorSession({ workerToken: env.TRIGGER_WORKER_TOKEN, apiUrl: env.TRIGGER_API_URL, instanceName: env.TRIGGER_WORKER_INSTANCE_NAME, diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index d59f4af51c..02414620d5 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -100,6 +100,10 @@ export const DequeuedMessage = z.object({ id: z.string(), version: z.string(), }), + deployment: z.object({ + id: z.string().optional(), + friendlyId: z.string().optional(), + }), run: z.object({ id: z.string(), friendlyId: z.string(), diff --git a/packages/worker/package.json b/packages/worker/package.json index 0824feab2b..393c6224d6 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -38,6 +38,8 @@ }, "dependencies": { "@trigger.dev/core": "workspace:*", + "socket.io": "4.7.4", + "socket.io-client": "4.7.5", "zod": "3.23.8" }, "devDependencies": { diff --git a/packages/worker/src/client/util.ts b/packages/worker/src/client/util.ts deleted file mode 100644 index 953d71766d..0000000000 --- a/packages/worker/src/client/util.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { HEADER_NAME } from "../consts.js"; -import { createHeaders } from "../util.js"; -import { WorkerClientCommonOptions } from "./types.js"; - -export function getDefaultWorkerHeaders( - options: WorkerClientCommonOptions -): Record { - return createHeaders({ - Authorization: `Bearer ${options.workerToken}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, - [HEADER_NAME.WORKER_DEPLOYMENT_ID]: options.deploymentId, - [HEADER_NAME.WORKER_MANAGED_SECRET]: options.managedWorkerSecret, - }); -} diff --git a/packages/worker/src/client/websocket.ts b/packages/worker/src/client/websocket.ts deleted file mode 100644 index 4df6633e02..0000000000 --- a/packages/worker/src/client/websocket.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { ZodSocketConnection } from "@trigger.dev/core/v3/zodSocket"; -import { PlatformToWorkerMessages, WorkerToPlatformMessages } from "../messages.js"; -import { WorkerClientCommonOptions } from "./types.js"; -import { getDefaultWorkerHeaders } from "./util.js"; - -type WorkerWebsocketClientOptions = WorkerClientCommonOptions; - -export class WorkerWebsocketClient { - private readonly defaultHeaders: Record; - private platformSocket?: ZodSocketConnection< - typeof WorkerToPlatformMessages, - typeof PlatformToWorkerMessages - >; - - constructor(private opts: WorkerWebsocketClientOptions) { - this.defaultHeaders = getDefaultWorkerHeaders(opts); - } - - start() { - const websocketPort = this.getPort(this.opts.apiUrl); - this.platformSocket = new ZodSocketConnection({ - namespace: "worker", - host: this.getHost(this.opts.apiUrl), - port: websocketPort, - secure: websocketPort === 443, - extraHeaders: this.defaultHeaders, - clientMessages: WorkerToPlatformMessages, - serverMessages: PlatformToWorkerMessages, - handlers: {}, - }); - } - - stop() { - this.platformSocket?.close(); - } - - private getHost(apiUrl: string): string { - const url = new URL(apiUrl); - return url.hostname; - } - - private getPort(apiUrl: string): number { - const url = new URL(apiUrl); - const port = Number(url.port); - - if (!isNaN(port) && port !== 0) { - return port; - } - - return url.protocol === "https" ? 443 : 80; - } -} diff --git a/packages/worker/src/index.ts b/packages/worker/src/index.ts index d4845c3ca2..ae89268885 100644 --- a/packages/worker/src/index.ts +++ b/packages/worker/src/index.ts @@ -1,7 +1,8 @@ export { VERSION as WORKER_VERSION } from "./version.js"; export * from "./consts.js"; -export * from "./client/http.js"; -export * from "./workerSession.js"; -export * from "./events.js"; +export * from "./supervisor/http.js"; +export * from "./supervisor/schemas.js"; +export * from "./supervisor/session.js"; export * from "./workload/http.js"; -export * from "./schemas.js"; +export * from "./workload/schemas.js"; +export * from "./types.js"; diff --git a/packages/worker/src/messages.ts b/packages/worker/src/messages.ts deleted file mode 100644 index 4b0e5e0673..0000000000 --- a/packages/worker/src/messages.ts +++ /dev/null @@ -1,80 +0,0 @@ -import { EnvironmentType, MachinePreset, TaskRunInternalError } from "@trigger.dev/core/v3"; -import { z } from "zod"; - -export const WorkerToPlatformMessages = { - LOG: { - message: z.object({ - version: z.literal("v1").default("v1"), - data: z.string(), - }), - }, - LOG_WITH_ACK: { - message: z.object({ - version: z.literal("v1").default("v1"), - data: z.string(), - }), - callback: z.object({ - status: z.literal("ok"), - }), - }, - WORKER_CRASHED: { - message: z.object({ - version: z.literal("v1").default("v1"), - runId: z.string(), - reason: z.string().optional(), - exitCode: z.number().optional(), - message: z.string().optional(), - logs: z.string().optional(), - /** This means we should only update the error if one exists */ - overrideCompletion: z.boolean().optional(), - errorCode: TaskRunInternalError.shape.code.optional(), - }), - }, - INDEXING_FAILED: { - message: z.object({ - version: z.literal("v1").default("v1"), - deploymentId: z.string(), - error: z.object({ - name: z.string(), - message: z.string(), - stack: z.string().optional(), - stderr: z.string().optional(), - }), - overrideCompletion: z.boolean().optional(), - }), - }, -}; - -export const PlatformToWorkerMessages = { - RESTORE: { - message: z.object({ - version: z.literal("v1").default("v1"), - type: z.enum(["DOCKER", "KUBERNETES"]), - location: z.string(), - reason: z.string().optional(), - imageRef: z.string(), - attemptNumber: z.number().optional(), - machine: MachinePreset, - // identifiers - checkpointId: z.string(), - envId: z.string(), - envType: EnvironmentType, - orgId: z.string(), - projectId: z.string(), - runId: z.string(), - }), - }, - PRE_PULL_DEPLOYMENT: { - message: z.object({ - version: z.literal("v1").default("v1"), - imageRef: z.string(), - shortCode: z.string(), - // identifiers - envId: z.string(), - envType: EnvironmentType, - orgId: z.string(), - projectId: z.string(), - deploymentId: z.string(), - }), - }, -}; diff --git a/packages/worker/src/events.ts b/packages/worker/src/supervisor/events.ts similarity index 88% rename from packages/worker/src/events.ts rename to packages/worker/src/supervisor/events.ts index f22221d621..10fb8a59c1 100644 --- a/packages/worker/src/events.ts +++ b/packages/worker/src/supervisor/events.ts @@ -41,6 +41,14 @@ export type WorkerEvents = { completion: TaskRunExecutionResult; }, ]; + runNotification: [ + { + time: Date; + run: { + id: string; + }; + }, + ]; }; export type WorkerEventArgs = WorkerEvents[T]; diff --git a/packages/worker/src/client/http.ts b/packages/worker/src/supervisor/http.ts similarity index 72% rename from packages/worker/src/client/http.ts rename to packages/worker/src/supervisor/http.ts index b80c86c4b3..c5f50f4bf2 100644 --- a/packages/worker/src/client/http.ts +++ b/packages/worker/src/supervisor/http.ts @@ -8,22 +8,26 @@ import { WorkerApiHeartbeatResponseBody, WorkerApiRunAttemptCompleteRequestBody, WorkerApiRunAttemptCompleteResponseBody, + WorkerApiRunAttemptStartRequestBody, WorkerApiRunAttemptStartResponseBody, WorkerApiRunHeartbeatRequestBody, WorkerApiRunHeartbeatResponseBody, -} from "../schemas.js"; -import { WorkerClientCommonOptions } from "./types.js"; + WorkerApiRunLatestSnapshotResponseBody, + WorkerApiWaitForDurationRequestBody, + WorkerApiWaitForDurationResponseBody, +} from "./schemas.js"; +import { SupervisorClientCommonOptions } from "./types.js"; import { getDefaultWorkerHeaders } from "./util.js"; -type WorkerHttpClientOptions = WorkerClientCommonOptions; +type SupervisorHttpClientOptions = SupervisorClientCommonOptions; -export class WorkerHttpClient { +export class SupervisorHttpClient { private readonly apiUrl: string; private readonly workerToken: string; private readonly instanceName: string; private readonly defaultHeaders: Record; - constructor(opts: WorkerHttpClientOptions) { + constructor(opts: SupervisorHttpClientOptions) { this.apiUrl = opts.apiUrl.replace(/\/$/, ""); this.workerToken = opts.workerToken; this.instanceName = opts.instanceName; @@ -69,6 +73,18 @@ export class WorkerHttpClient { ); } + async dequeueFromVersion(deploymentId: string) { + return wrapZodFetch( + WorkerApiDequeueResponseBody, + `${this.apiUrl}/api/v1/worker-actions/deployments/${deploymentId}/dequeue`, + { + headers: { + ...this.defaultHeaders, + }, + } + ); + } + async heartbeatWorker(body: WorkerApiHeartbeatRequestBody) { return wrapZodFetch( WorkerApiHeartbeatResponseBody, @@ -99,7 +115,11 @@ export class WorkerHttpClient { ); } - async startRunAttempt(runId: string, snapshotId: string) { + async startRunAttempt( + runId: string, + snapshotId: string, + body: WorkerApiRunAttemptStartRequestBody + ) { return wrapZodFetch( WorkerApiRunAttemptStartResponseBody, `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, @@ -108,6 +128,7 @@ export class WorkerHttpClient { headers: { ...this.defaultHeaders, }, + body: JSON.stringify(body), } ); } @@ -129,6 +150,38 @@ export class WorkerHttpClient { } ); } + + async getLatestSnapshot(runId: string) { + return wrapZodFetch( + WorkerApiRunLatestSnapshotResponseBody, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/latest`, + { + method: "GET", + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async waitForDuration( + runId: string, + snapshotId: string, + body: WorkerApiWaitForDurationRequestBody + ) { + return wrapZodFetch( + WorkerApiWaitForDurationResponseBody, + `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/wait/duration`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + } + ); + } } type ApiResult = diff --git a/packages/worker/src/queueConsumer.ts b/packages/worker/src/supervisor/queueConsumer.ts similarity index 84% rename from packages/worker/src/queueConsumer.ts rename to packages/worker/src/supervisor/queueConsumer.ts index 342aef0b48..c9cdc8cc58 100644 --- a/packages/worker/src/queueConsumer.ts +++ b/packages/worker/src/supervisor/queueConsumer.ts @@ -1,14 +1,14 @@ -import { WorkerHttpClient } from "./client/http.js"; +import { SupervisorHttpClient } from "./http.js"; import { WorkerApiDequeueResponseBody } from "./schemas.js"; type RunQueueConsumerOptions = { - client: WorkerHttpClient; + client: SupervisorHttpClient; intervalMs?: number; onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; }; export class RunQueueConsumer { - private readonly client: WorkerHttpClient; + private readonly client: SupervisorHttpClient; private readonly onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; private intervalMs: number; @@ -39,7 +39,8 @@ export class RunQueueConsumer { } private async dequeue() { - console.debug("[RunQueueConsumer] dequeue()", { enabled: this.isEnabled }); + // Incredibly verbose logging for debugging purposes + // console.debug("[RunQueueConsumer] dequeue()", { enabled: this.isEnabled }); if (!this.isEnabled) { return; @@ -65,7 +66,6 @@ export class RunQueueConsumer { } scheduleNextDequeue(delay: number = this.intervalMs) { - console.debug("[RunQueueConsumer] Scheduling next dequeue", { delay }); setTimeout(this.dequeue.bind(this), delay); } } diff --git a/packages/worker/src/schemas.ts b/packages/worker/src/supervisor/schemas.ts similarity index 68% rename from packages/worker/src/schemas.ts rename to packages/worker/src/supervisor/schemas.ts index a49a5359f0..cfeee07458 100644 --- a/packages/worker/src/schemas.ts +++ b/packages/worker/src/supervisor/schemas.ts @@ -2,11 +2,12 @@ import { z } from "zod"; import { CompleteRunAttemptResult, DequeuedMessage, + RunExecutionData, StartRunAttemptResult, TaskRunExecutionResult, + WaitForDurationResult, } from "@trigger.dev/core/v3"; -// Worker export const WorkerApiHeartbeatRequestBody = z.object({ cpu: z.object({ used: z.number(), @@ -49,6 +50,13 @@ export const WorkerApiRunHeartbeatResponseBody = z.object({ }); export type WorkerApiRunHeartbeatResponseBody = z.infer; +export const WorkerApiRunAttemptStartRequestBody = z.object({ + isWarmStart: z.boolean().optional(), +}); +export type WorkerApiRunAttemptStartRequestBody = z.infer< + typeof WorkerApiRunAttemptStartRequestBody +>; + export const WorkerApiRunAttemptStartResponseBody = StartRunAttemptResult.and( z.object({ envVars: z.record(z.string()), @@ -72,24 +80,26 @@ export type WorkerApiRunAttemptCompleteResponseBody = z.infer< typeof WorkerApiRunAttemptCompleteResponseBody >; -// Workload -export const WorkloadHeartbeatRequestBody = WorkerApiRunHeartbeatRequestBody; -export type WorkloadHeartbeatRequestBody = z.infer; - -export const WorkloadHeartbeatResponseBody = WorkerApiHeartbeatResponseBody; -export type WorkloadHeartbeatResponseBody = z.infer; +export const WorkerApiRunLatestSnapshotResponseBody = z.object({ + execution: RunExecutionData, +}); +export type WorkerApiRunLatestSnapshotResponseBody = z.infer< + typeof WorkerApiRunLatestSnapshotResponseBody +>; -export const WorkloadRunAttemptCompleteRequestBody = WorkerApiRunAttemptCompleteRequestBody; -export type WorkloadRunAttemptCompleteRequestBody = z.infer< - typeof WorkloadRunAttemptCompleteRequestBody +export const WorkerApiDequeueFromVersionResponseBody = DequeuedMessage.array(); +export type WorkerApiDequeueFromVersionResponseBody = z.infer< + typeof WorkerApiDequeueFromVersionResponseBody >; -export const WorkloadRunAttemptCompleteResponseBody = WorkerApiRunAttemptCompleteResponseBody; -export type WorkloadRunAttemptCompleteResponseBody = z.infer< - typeof WorkloadRunAttemptCompleteResponseBody +export const WorkerApiWaitForDurationRequestBody = z.object({ + date: z.coerce.date(), +}); +export type WorkerApiWaitForDurationRequestBody = z.infer< + typeof WorkerApiWaitForDurationRequestBody >; -export const WorkloadRunAttemptStartResponseBody = WorkerApiRunAttemptStartResponseBody; -export type WorkloadRunAttemptStartResponseBody = z.infer< - typeof WorkloadRunAttemptStartResponseBody +export const WorkerApiWaitForDurationResponseBody = WaitForDurationResult; +export type WorkerApiWaitForDurationResponseBody = z.infer< + typeof WorkerApiWaitForDurationResponseBody >; diff --git a/packages/worker/src/workerSession.ts b/packages/worker/src/supervisor/session.ts similarity index 51% rename from packages/worker/src/workerSession.ts rename to packages/worker/src/supervisor/session.ts index bff1431bdd..e06b66de24 100644 --- a/packages/worker/src/workerSession.ts +++ b/packages/worker/src/supervisor/session.ts @@ -1,31 +1,33 @@ import { HeartbeatService } from "@trigger.dev/core/v3"; -import { WorkerHttpClient } from "./client/http.js"; -import { WorkerClientCommonOptions } from "./client/types.js"; -import { WorkerWebsocketClient } from "./client/websocket.js"; +import { SupervisorHttpClient } from "./http.js"; +import { SupervisorClientCommonOptions } from "./types.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; import { RunQueueConsumer } from "./queueConsumer.js"; -import { WorkerEventArgs, WorkerEvents } from "./events.js"; +import { WorkerEvents } from "./events.js"; import EventEmitter from "events"; -import { VERSION } from "./version.js"; +import { VERSION } from "../version.js"; +import { io, Socket } from "socket.io-client"; +import { WorkerClientToServerEvents, WorkerServerToClientEvents } from "../types.js"; +import { getDefaultWorkerHeaders } from "./util.js"; -type WorkerSessionOptions = WorkerClientCommonOptions & { +type SupervisorSessionOptions = SupervisorClientCommonOptions & { heartbeatIntervalSeconds?: number; dequeueIntervalMs?: number; }; -export class WorkerSession extends EventEmitter { - public readonly httpClient: WorkerHttpClient; +export class SupervisorSession extends EventEmitter { + public readonly httpClient: SupervisorHttpClient; + + private socket?: Socket; - private readonly websocketClient: WorkerWebsocketClient; private readonly queueConsumer: RunQueueConsumer; private readonly heartbeatService: HeartbeatService; private readonly heartbeatIntervalSeconds: number; - constructor(opts: WorkerSessionOptions) { + constructor(private opts: SupervisorSessionOptions) { super(); - this.httpClient = new WorkerHttpClient(opts); - this.websocketClient = new WorkerWebsocketClient(opts); + this.httpClient = new SupervisorHttpClient(opts); this.queueConsumer = new RunQueueConsumer({ client: this.httpClient, onDequeue: this.onDequeue.bind(this), @@ -51,14 +53,12 @@ export class WorkerSession extends EventEmitter { console.error("[WorkerSession] Failed to send heartbeat", { error }); }, }); - - this.on("requestRunAttemptStart", this.onRequestRunAttemptStart.bind(this)); - this.on("runAttemptCompleted", this.onRunAttemptCompleted.bind(this)); } private async onDequeue(messages: WorkerApiDequeueResponseBody): Promise { - console.log("[WorkerSession] Dequeued messages", { count: messages.length }); - console.debug("[WorkerSession] Dequeued messages with contents", messages); + // Incredibly verbose logging for debugging purposes + // console.log("[WorkerSession] Dequeued messages", { count: messages.length }); + // console.debug("[WorkerSession] Dequeued messages with contents", messages); for (const message of messages) { console.log("[WorkerSession] Emitting message", { message }); @@ -69,46 +69,48 @@ export class WorkerSession extends EventEmitter { } } - private async onRequestRunAttemptStart( - ...[{ time, run, snapshot }]: WorkerEventArgs<"requestRunAttemptStart"> - ): Promise { - console.log("[WorkerSession] onRequestRunAttemptStart", { time, run, snapshot }); + subscribeToRunNotifications(runIds: string[]) { + console.log("[WorkerSession] Subscribing to run notifications", { runIds }); - const start = await this.httpClient.startRunAttempt(run.id, snapshot.id); - - if (!start.success) { - console.error("[WorkerSession] Failed to start run", { error: start.error }); + if (!this.socket) { + console.error("[WorkerSession] Socket not connected"); return; } - console.log("[WorkerSession] Started run", { - runId: start.data.run.id, - snapshot: start.data.snapshot.id, - }); - - this.emit("runAttemptStarted", { - time: new Date(), - ...start.data, - }); + this.socket.emit("run:subscribe", { version: "1", runIds }); } - private async onRunAttemptCompleted( - ...[{ time, run, snapshot, completion }]: WorkerEventArgs<"runAttemptCompleted"> - ): Promise { - console.log("[WorkerSession] onRunAttemptCompleted", { time, run, snapshot, completion }); - - const complete = await this.httpClient.completeRunAttempt(run.id, snapshot.id, { - completion: completion, - }); + unsubscribeFromRunNotifications(runIds: string[]) { + console.log("[WorkerSession] Unsubscribing from run notifications", { runIds }); - if (!complete.success) { - console.error("[WorkerSession] Failed to complete run", { error: complete.error }); + if (!this.socket) { + console.error("[WorkerSession] Socket not connected"); return; } - console.log("[WorkerSession] Completed run", { - runId: run.id, - result: complete.data.result, + this.socket.emit("run:unsubscribe", { version: "1", runIds }); + } + + private createSocket() { + const wsUrl = new URL(this.opts.apiUrl); + wsUrl.pathname = "/worker"; + + this.socket = io(wsUrl.href, { + transports: ["websocket"], + extraHeaders: getDefaultWorkerHeaders(this.opts), + }); + this.socket.on("run:notify", ({ version, run }) => { + console.log("[WorkerSession] Received run notification", { version, run }); + this.emit("runNotification", { time: new Date(), run }); + }); + this.socket.on("connect", () => { + console.log("[WorkerSession] Connected to platform"); + }); + this.socket.on("connect_error", (error) => { + console.error("[WorkerSession] Connection error", { error }); + }); + this.socket.on("disconnect", (reason, description) => { + console.log("[WorkerSession] Disconnected from platform", { reason, description }); }); } @@ -126,12 +128,12 @@ export class WorkerSession extends EventEmitter { this.queueConsumer.start(); this.heartbeatService.start(); - this.websocketClient.start(); + this.createSocket(); } async stop() { this.heartbeatService.stop(); - this.websocketClient.stop(); + this.socket?.disconnect(); } private getHeartbeatBody(): WorkerApiHeartbeatRequestBody { diff --git a/packages/worker/src/client/types.ts b/packages/worker/src/supervisor/types.ts similarity index 73% rename from packages/worker/src/client/types.ts rename to packages/worker/src/supervisor/types.ts index 2da3415bb8..dfc3d21ed0 100644 --- a/packages/worker/src/client/types.ts +++ b/packages/worker/src/supervisor/types.ts @@ -1,4 +1,4 @@ -export type WorkerClientCommonOptions = { +export type SupervisorClientCommonOptions = { apiUrl: string; workerToken: string; instanceName: string; diff --git a/packages/worker/src/supervisor/util.ts b/packages/worker/src/supervisor/util.ts new file mode 100644 index 0000000000..2ed2b41c8c --- /dev/null +++ b/packages/worker/src/supervisor/util.ts @@ -0,0 +1,40 @@ +import { HEADER_NAME } from "../consts.js"; +import { createHeaders } from "../util.js"; +import { SupervisorClientCommonOptions } from "./types.js"; + +export function getDefaultWorkerHeaders( + options: SupervisorClientCommonOptions +): Record { + return createHeaders({ + Authorization: `Bearer ${options.workerToken}`, + [HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, + [HEADER_NAME.WORKER_DEPLOYMENT_ID]: options.deploymentId, + [HEADER_NAME.WORKER_MANAGED_SECRET]: options.managedWorkerSecret, + }); +} + +function redactString(value: string, end = 10) { + return value.slice(0, end) + "*".repeat(value.length - end); +} + +function redactNumber(value: number, end = 10) { + const str = String(value); + const redacted = redactString(str, end); + return Number(redacted); +} + +export function redactKeys>(obj: T, keys: Array): T { + const redacted = { ...obj }; + for (const key of keys) { + const value = obj[key]; + + if (typeof value === "number") { + redacted[key] = redactNumber(value) as any; + } else if (typeof value === "string") { + redacted[key] = redactString(value) as any; + } else { + continue; + } + } + return redacted; +} diff --git a/packages/worker/src/types.ts b/packages/worker/src/types.ts new file mode 100644 index 0000000000..f9fa1704aa --- /dev/null +++ b/packages/worker/src/types.ts @@ -0,0 +1,22 @@ +export interface WorkerServerToClientEvents { + "run:notify": (message: { version: "1"; run: { id: string } }) => void; +} + +export interface WorkerClientToServerEvents { + "run:subscribe": (message: { version: "1"; runIds: string[] }) => void; + "run:unsubscribe": (message: { version: "1"; runIds: string[] }) => void; +} + +export interface WorkloadServerToClientEvents { + "run:notify": (message: { version: "1"; run: { id: string } }) => void; +} + +export interface WorkloadClientToServerEvents { + "run:start": (message: { version: "1"; run: { id: string }; snapshot: { id: string } }) => void; +} + +export type WorkloadClientSocketData = { + deploymentId: string; + runId?: string; + snapshotId?: string; +}; diff --git a/packages/worker/src/workload/http.ts b/packages/worker/src/workload/http.ts index c643b91e30..f64248ba58 100644 --- a/packages/worker/src/workload/http.ts +++ b/packages/worker/src/workload/http.ts @@ -6,7 +6,12 @@ import { WorkloadRunAttemptCompleteRequestBody, WorkloadRunAttemptCompleteResponseBody, WorkloadRunAttemptStartResponseBody, -} from "../schemas.js"; + WorkloadRunLatestSnapshotResponseBody, + WorkloadDequeueFromVersionResponseBody, + WorkloadRunAttemptStartRequestBody, + WorkloadWaitForDurationRequestBody, + WorkloadWaitForDurationResponseBody, +} from "./schemas.js"; import { WorkloadClientCommonOptions } from "./types.js"; import { getDefaultWorkloadHeaders } from "./util.js"; @@ -46,7 +51,11 @@ export class WorkloadHttpClient { ); } - async startRunAttempt(runId: string, snapshotId: string) { + async startRunAttempt( + runId: string, + snapshotId: string, + body: WorkloadRunAttemptStartRequestBody + ) { return wrapZodFetch( WorkloadRunAttemptStartResponseBody, `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, @@ -55,6 +64,7 @@ export class WorkloadHttpClient { headers: { ...this.defaultHeaders, }, + body: JSON.stringify(body), } ); } @@ -76,6 +86,51 @@ export class WorkloadHttpClient { } ); } + + async getRunExecutionData(runId: string) { + return wrapZodFetch( + WorkloadRunLatestSnapshotResponseBody, + `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/latest`, + { + method: "GET", + headers: { + ...this.defaultHeaders, + }, + } + ); + } + + async waitForDuration( + runId: string, + snapshotId: string, + body: WorkloadWaitForDurationRequestBody + ) { + return wrapZodFetch( + WorkloadWaitForDurationResponseBody, + `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/wait/duration`, + { + method: "POST", + headers: { + ...this.defaultHeaders, + "Content-Type": "application/json", + }, + body: JSON.stringify(body), + } + ); + } + + async dequeue() { + return wrapZodFetch( + WorkloadDequeueFromVersionResponseBody, + `${this.apiUrl}/api/v1/workload-actions/deployments/${this.deploymentId}/dequeue`, + { + method: "GET", + headers: { + ...this.defaultHeaders, + }, + } + ); + } } type ApiResult = diff --git a/packages/worker/src/workload/schemas.ts b/packages/worker/src/workload/schemas.ts new file mode 100644 index 0000000000..83971a1707 --- /dev/null +++ b/packages/worker/src/workload/schemas.ts @@ -0,0 +1,55 @@ +import { z } from "zod"; +import { + WorkerApiRunHeartbeatRequestBody, + WorkerApiHeartbeatResponseBody, + WorkerApiRunAttemptCompleteRequestBody, + WorkerApiRunAttemptCompleteResponseBody, + WorkerApiRunAttemptStartRequestBody, + WorkerApiRunAttemptStartResponseBody, + WorkerApiRunLatestSnapshotResponseBody, + WorkerApiDequeueFromVersionResponseBody, + WorkerApiWaitForDurationRequestBody, + WorkerApiWaitForDurationResponseBody, +} from "../supervisor/schemas.js"; + +export const WorkloadHeartbeatRequestBody = WorkerApiRunHeartbeatRequestBody; +export type WorkloadHeartbeatRequestBody = z.infer; + +export const WorkloadHeartbeatResponseBody = WorkerApiHeartbeatResponseBody; +export type WorkloadHeartbeatResponseBody = z.infer; + +export const WorkloadRunAttemptCompleteRequestBody = WorkerApiRunAttemptCompleteRequestBody; +export type WorkloadRunAttemptCompleteRequestBody = z.infer< + typeof WorkloadRunAttemptCompleteRequestBody +>; + +export const WorkloadRunAttemptCompleteResponseBody = WorkerApiRunAttemptCompleteResponseBody; +export type WorkloadRunAttemptCompleteResponseBody = z.infer< + typeof WorkloadRunAttemptCompleteResponseBody +>; + +export const WorkloadRunAttemptStartRequestBody = WorkerApiRunAttemptStartRequestBody; +export type WorkloadRunAttemptStartRequestBody = z.infer; + +export const WorkloadRunAttemptStartResponseBody = WorkerApiRunAttemptStartResponseBody; +export type WorkloadRunAttemptStartResponseBody = z.infer< + typeof WorkloadRunAttemptStartResponseBody +>; + +export const WorkloadRunLatestSnapshotResponseBody = WorkerApiRunLatestSnapshotResponseBody; +export type WorkloadRunLatestSnapshotResponseBody = z.infer< + typeof WorkloadRunLatestSnapshotResponseBody +>; + +export const WorkloadDequeueFromVersionResponseBody = WorkerApiDequeueFromVersionResponseBody; +export type WorkloadDequeueFromVersionResponseBody = z.infer< + typeof WorkloadDequeueFromVersionResponseBody +>; + +export const WorkloadWaitForDurationRequestBody = WorkerApiWaitForDurationRequestBody; +export type WorkloadWaitForDurationRequestBody = z.infer; + +export const WorkloadWaitForDurationResponseBody = WorkerApiWaitForDurationResponseBody; +export type WorkloadWaitForDurationResponseBody = z.infer< + typeof WorkloadWaitForDurationResponseBody +>; diff --git a/packages/worker/src/workload/websocket.ts b/packages/worker/src/workload/websocket.ts deleted file mode 100644 index 12bf1ac736..0000000000 --- a/packages/worker/src/workload/websocket.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { ZodSocketConnection } from "@trigger.dev/core/v3/zodSocket"; -import { PlatformToWorkerMessages, WorkerToPlatformMessages } from "../messages.js"; -import { WorkloadClientCommonOptions } from "./types.js"; -import { getDefaultWorkloadHeaders } from "./util.js"; - -type WorkerWebsocketClientOptions = WorkloadClientCommonOptions; - -export class WorkerWebsocketClient { - private readonly defaultHeaders: Record; - private platformSocket?: ZodSocketConnection< - typeof WorkerToPlatformMessages, - typeof PlatformToWorkerMessages - >; - - constructor(private opts: WorkerWebsocketClientOptions) { - this.defaultHeaders = getDefaultWorkloadHeaders(opts); - } - - start() { - const websocketPort = this.getPort(this.opts.workerApiUrl); - this.platformSocket = new ZodSocketConnection({ - namespace: "worker", - host: this.getHost(this.opts.workerApiUrl), - port: websocketPort, - secure: websocketPort === 443, - extraHeaders: this.defaultHeaders, - clientMessages: WorkerToPlatformMessages, - serverMessages: PlatformToWorkerMessages, - handlers: {}, - }); - } - - stop() { - this.platformSocket?.close(); - } - - private getHost(apiUrl: string): string { - const url = new URL(apiUrl); - return url.hostname; - } - - private getPort(apiUrl: string): number { - const url = new URL(apiUrl); - const port = Number(url.port); - - if (!isNaN(port) && port !== 0) { - return port; - } - - return url.protocol === "https" ? 443 : 80; - } -} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 6bcfe58105..59fa3fa6c5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1618,6 +1618,12 @@ importers: '@trigger.dev/core': specifier: workspace:* version: link:../core + socket.io: + specifier: 4.7.4 + version: 4.7.4 + socket.io-client: + specifier: 4.7.5 + version: 4.7.5 zod: specifier: 3.23.8 version: 3.23.8 @@ -28049,7 +28055,7 @@ packages: engines: {node: '>=10.0.0'} dependencies: '@socket.io/component-emitter': 3.1.0 - debug: 4.3.4 + debug: 4.3.7 engine.io-client: 6.5.3 socket.io-parser: 4.2.4 transitivePeerDependencies: @@ -28091,7 +28097,7 @@ packages: accepts: 1.3.8 base64id: 2.0.0 cors: 2.8.5 - debug: 4.3.4 + debug: 4.3.7 engine.io: 6.5.4 socket.io-adapter: 2.5.4 socket.io-parser: 4.2.4 From 93d25016faaff64803f9a173cc8a8fb8127eb3c7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 16 Dec 2024 14:55:27 +0000 Subject: [PATCH 277/485] Fix for Run Engine poll interval env var --- apps/webapp/app/v3/runEngine.server.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 11a91a383c..78004631a5 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -24,7 +24,7 @@ function createRunEngine() { worker: { workers: env.RUN_ENGINE_WORKER_COUNT, tasksPerWorker: env.RUN_ENGINE_TASKS_PER_WORKER, - pollIntervalMs: env.WORKER_POLL_INTERVAL, + pollIntervalMs: env.RUN_ENGINE_WORKER_POLL_INTERVAL, }, machines: { defaultMachine: defaultMachine, From f572c0f590641013b1f1c8f891fa918591778f88 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 16 Dec 2024 15:40:32 +0000 Subject: [PATCH 278/485] Expect the waitpoint to be completed quickly --- .../run-engine/src/engine/tests/waitpoints.test.ts | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 0e0ee7abf5..419efacb85 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -84,8 +84,10 @@ describe("RunEngine Waitpoints", () => { }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + const durationMs = 1_000; + //waitForDuration - const date = new Date(Date.now() + 1000); + const date = new Date(Date.now() + durationMs); const result = await engine.waitForDuration({ runId: run.id, snapshotId: attemptResult.snapshot.id, @@ -101,6 +103,14 @@ describe("RunEngine Waitpoints", () => { await setTimeout(1_500); + const waitpoint = await prisma.waitpoint.findFirst({ + where: { + id: result.waitpoint.id, + }, + }); + expect(waitpoint?.status).toBe("COMPLETED"); + expect(waitpoint?.completedAt?.getTime()).toBeLessThanOrEqual(date.getTime() + 200); + const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); } finally { @@ -578,7 +588,7 @@ describe("RunEngine Waitpoints", () => { }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - const iterationCount = 50; + const iterationCount = 10; for (let i = 0; i < iterationCount; i++) { const waitpointCount = 5; From fb5693201d2a04bd977094cb1898c644e1e5fe8d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 10:35:14 +0000 Subject: [PATCH 279/485] =?UTF-8?q?If=20a=20run=20is=20locked=20then=20it?= =?UTF-8?q?=E2=80=99s=20too=20late=20to=20expire=20it?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal-packages/run-engine/src/engine/index.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 3a8a956043..760389d301 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1941,6 +1941,13 @@ export class RunEngine { return; } + if (run.lockedAt) { + this.logger.debug("Run cannot be expired because it's locked, so will run", { + run, + }); + return; + } + const error: TaskRunError = { type: "STRING_ERROR", raw: `Run expired because the TTL (${run.ttl}) was reached`, From e01288d79821dea4b4695cb11247de15ea418844 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 11:18:09 +0000 Subject: [PATCH 280/485] Added VALKEY_ env vars and plugged them into the run engine --- apps/webapp/app/env.server.ts | 27 ++++++++++++++++++++++++++ apps/webapp/app/v3/runEngine.server.ts | 10 +++++----- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts index 6d5c26dd64..358107eff6 100644 --- a/apps/webapp/app/env.server.ts +++ b/apps/webapp/app/env.server.ts @@ -84,6 +84,33 @@ const EnvironmentSchema = z.object({ REDIS_PASSWORD: z.string().optional(), REDIS_TLS_DISABLED: z.string().optional(), + // Valkey options (used in Run Engine 2.0+) + VALKEY_HOST: z + .string() + .nullish() + .default(process.env.REDIS_HOST ?? null), + VALKEY_READER_HOST: z + .string() + .nullish() + .default(process.env.REDIS_READER_HOST ?? null), + VALKEY_READER_PORT: z.coerce + .number() + .nullish() + .default(process.env.REDIS_READER_PORT ? parseInt(process.env.REDIS_READER_PORT) : null), + VALKEY_PORT: z.coerce + .number() + .nullish() + .default(process.env.REDIS_PORT ? parseInt(process.env.REDIS_PORT) : null), + VALKEY_USERNAME: z + .string() + .nullish() + .default(process.env.REDIS_USERNAME ?? null), + VALKEY_PASSWORD: z + .string() + .nullish() + .default(process.env.REDIS_PASSWORD ?? null), + VALKEY_TLS_DISABLED: z.string().default(process.env.REDIS_TLS_DISABLED ?? "false"), + DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT: z.coerce.number().int().default(10), DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT: z.coerce.number().int().default(10), DEFAULT_DEV_ENV_EXECUTION_ATTEMPTS: z.coerce.number().int().positive().default(1), diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 78004631a5..5e30fe6321 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -14,12 +14,12 @@ function createRunEngine() { const engine = new RunEngine({ prisma, redis: { - port: env.REDIS_PORT, - host: env.REDIS_HOST, - username: env.REDIS_USERNAME, - password: env.REDIS_PASSWORD, + port: env.VALKEY_PORT ?? undefined, + host: env.VALKEY_HOST ?? undefined, + username: env.VALKEY_USERNAME ?? undefined, + password: env.VALKEY_PASSWORD ?? undefined, enableAutoPipelining: true, - ...(env.REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), + ...(env.VALKEY_TLS_DISABLED === "true" ? {} : { tls: {} }), }, worker: { workers: env.RUN_ENGINE_WORKER_COUNT, From 4c9e0240966df166d4627b554fd7b6d71f32fe2e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 12:16:15 +0000 Subject: [PATCH 281/485] Extracted and updated the guard queue function so it can be used when batching --- .../app/v3/services/triggerTaskV2.server.ts | 40 +++++++++++-------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index efcfebebf3..737a582a97 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -27,6 +27,7 @@ import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask. import { Prisma } from "@trigger.dev/database"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; import { clampMaxDuration } from "../utils/maxDuration"; +import { RunEngine } from "@internal/run-engine"; /** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV2 extends WithRunEngine { @@ -117,7 +118,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } if (!options.skipChecks) { - const queueSizeGuard = await this.#guardQueueSizeLimitsForEnv(environment); + const queueSizeGuard = await guardQueueSizeLimitsForEnv(this._engine, environment); logger.debug("Queue size guard result", { queueSizeGuard, @@ -465,22 +466,6 @@ export class TriggerTaskServiceV2 extends WithRunEngine { return { dataType: payloadType }; } - - async #guardQueueSizeLimitsForEnv(environment: AuthenticatedEnvironment) { - const maximumSize = getMaximumSizeForEnvironment(environment); - - if (typeof maximumSize === "undefined") { - return { isWithinLimits: true }; - } - - const queueSize = await this._engine.lengthOfEnvQueue(environment); - - return { - isWithinLimits: queueSize < maximumSize, - maximumSize, - queueSize, - }; - } } function getMaximumSizeForEnvironment(environment: AuthenticatedEnvironment): number | undefined { @@ -490,3 +475,24 @@ function getMaximumSizeForEnvironment(environment: AuthenticatedEnvironment): nu return environment.organization.maximumDeployedQueueSize ?? env.MAXIMUM_DEPLOYED_QUEUE_SIZE; } } + +export async function guardQueueSizeLimitsForEnv( + engine: RunEngine, + environment: AuthenticatedEnvironment, + itemsToAdd: number = 1 +) { + const maximumSize = getMaximumSizeForEnvironment(environment); + + if (typeof maximumSize === "undefined") { + return { isWithinLimits: true }; + } + + const queueSize = await engine.lengthOfEnvQueue(environment); + const projectedSize = queueSize + itemsToAdd; + + return { + isWithinLimits: projectedSize <= maximumSize, + maximumSize, + queueSize, + }; +} From f1e2736d355eaac60fd0dfb47b0526f6e0bc9384 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 12:50:36 +0000 Subject: [PATCH 282/485] Added logging and universal concurrency changes to trigger task v1 --- .../app/v3/services/triggerTaskV1.server.ts | 38 ++++++++++++++++--- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts index 81f7e58804..0725122891 100644 --- a/apps/webapp/app/v3/services/triggerTaskV1.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -29,6 +29,7 @@ import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; import { Prisma } from "@trigger.dev/database"; import { parseDelay } from "~/utils/delays"; import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; +import { removeQueueConcurrencyLimits, updateQueueConcurrencyLimits } from "../runQueue.server"; export class TriggerTaskServiceV1 extends BaseService { public async call( @@ -421,9 +422,16 @@ export class TriggerTaskServiceV1 extends BaseService { if (body.options?.queue) { const concurrencyLimit = - typeof body.options.queue.concurrencyLimit === "number" - ? Math.max(0, body.options.queue.concurrencyLimit) - : undefined; + typeof body.options.queue?.concurrencyLimit === "number" + ? Math.max( + Math.min( + body.options.queue.concurrencyLimit, + environment.maximumConcurrencyLimit, + environment.organization.maximumConcurrencyLimit + ), + 0 + ) + : null; let taskQueue = await tx.taskQueue.findFirst({ where: { @@ -450,13 +458,33 @@ export class TriggerTaskServiceV1 extends BaseService { }); if (typeof taskQueue.concurrencyLimit === "number") { - await marqs?.updateQueueConcurrencyLimits( + logger.debug("TriggerTaskService: updating concurrency limit", { + runId: taskRun.id, + friendlyId: taskRun.friendlyId, + taskQueue, + orgId: environment.organizationId, + projectId: environment.projectId, + existingConcurrencyLimit, + concurrencyLimit, + queueOptions: body.options?.queue, + }); + await updateQueueConcurrencyLimits( environment, taskQueue.name, taskQueue.concurrencyLimit ); } else { - await marqs?.removeQueueConcurrencyLimits(environment, taskQueue.name); + logger.debug("TriggerTaskService: removing concurrency limit", { + runId: taskRun.id, + friendlyId: taskRun.friendlyId, + taskQueue, + orgId: environment.organizationId, + projectId: environment.projectId, + existingConcurrencyLimit, + concurrencyLimit, + queueOptions: body.options?.queue, + }); + await removeQueueConcurrencyLimits(environment, taskQueue.name); } } } else { From fb433de78623ad72bce605c8306bedfe17ce8e3c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 12:50:56 +0000 Subject: [PATCH 283/485] Added notes back in --- .../app/v3/services/triggerTask.server.ts | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 072cb2b625..9543460e57 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -52,6 +52,26 @@ export class TriggerTaskService extends WithRunEngine { return await this.callV1(taskId, environment, body, options); } + //todo Additional checks + /* + - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. + - Add an `engine` column to `Project` in the database. + + Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. + + You run `npx trigger.dev@latest deploy` with config v2. + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + You run `npx trigger.dev@latest dev` with config v2 + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + When triggering + - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + */ + return await this.callV2(taskId, environment, body, options); }); } From 1238ef756134357e973c345b2c0a5e83133f4758 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 12:51:38 +0000 Subject: [PATCH 284/485] Bump @trigger.dev/worker to 3.3.7 --- packages/worker/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/worker/package.json b/packages/worker/package.json index 393c6224d6..ed5eb5e6fb 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.3.5", + "version": "3.3.7", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { From 24908828a0bcd25bcc4635dc8cb629457dadb131 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 14:08:51 +0000 Subject: [PATCH 285/485] reportInvocationUsage for the runAttemptStarted event --- apps/webapp/app/v3/runEngineHandlers.server.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 7410ecf9ec..361a3d32de 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -220,13 +220,13 @@ export function registerRunEngineEventBusHandlers() { } }); - engine.eventBus.on("runRetryScheduled", async ({ time, run, organization }) => { + engine.eventBus.on("runAttemptStarted", async ({ time, run, organization }) => { try { if (run.attemptNumber === 1 && run.baseCostInCents > 0) { await reportInvocationUsage(organization.id, run.baseCostInCents, { runId: run.id }); } } catch (error) { - logger.error("[runRetryScheduled] Failed to report invocation usage", { + logger.error("[runAttemptStarted] Failed to report invocation usage", { error: error instanceof Error ? error.message : error, runId: run.id, orgId: organization.id, From a24644b2ab02ab33752e993dfc9e9c4fb760f0d4 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:14:33 +0000 Subject: [PATCH 286/485] improve execution snapshot span debug span start times --- apps/webapp/app/v3/runEngineHandlers.server.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 361a3d32de..ac9089d5e4 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -271,6 +271,7 @@ export function registerRunEngineEventBusHandlers() { }, }, duration: 0, + startTime: BigInt(time.getTime() * 1_000_000), } ); } catch (error) { From 3df81585154d10f94f12388169d09741f2ae78cb Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:18:51 +0000 Subject: [PATCH 287/485] Unfriendly IDs --- .../route.tsx | 2 +- .../routes/api.v1.tasks.$taskId.trigger.ts | 1 - ....$snapshotFriendlyId.attempts.complete.ts} | 10 +- ...ots.$snapshotFriendlyId.attempts.start.ts} | 10 +- ...napshots.$snapshotFriendlyId.heartbeat.ts} | 10 +- ...hots.$snapshotFriendlyId.wait.duration.ts} | 10 +- ...s.runs.$runFriendlyId.snapshots.latest.ts} | 6 +- apps/webapp/app/v3/handleSocketIo.server.ts | 24 +- .../webapp/app/v3/runEngineHandlers.server.ts | 10 +- .../app/v3/services/batchTriggerV2.server.ts | 16 +- .../services/createBackgroundWorker.server.ts | 3 +- .../v3/services/createCheckpoint.server.ts | 4 +- .../createDeployedBackgroundWorker.server.ts | 3 +- ...createDeploymentBackgroundWorker.server.ts | 4 +- .../app/v3/services/replayTaskRun.server.ts | 4 +- .../app/v3/services/triggerTask.server.ts | 2 +- .../app/v3/services/triggerTaskV1.server.ts | 3 +- .../app/v3/services/triggerTaskV2.server.ts | 8 +- .../worker/workerGroupTokenService.server.ts | 62 +++-- .../database/prisma/schema.prisma | 1 + .../src/engine/executionSnapshots.ts | 60 +++-- .../run-engine/src/engine/index.ts | 36 ++- .../src/entryPoints/managed-run-controller.ts | 221 ++++++++++++------ .../entryPoints/unmanaged-run-controller.ts | 12 +- packages/core/package.json | 1 + packages/core/src/v3/apps/friendlyId.ts | 75 ++++++ packages/core/src/v3/runtime/index.ts | 6 +- .../src/v3/runtime/managedRuntimeManager.ts | 15 +- packages/core/src/v3/runtime/manager.ts | 6 +- packages/core/src/v3/schemas/api.ts | 3 +- packages/core/src/v3/schemas/common.ts | 1 - packages/core/src/v3/schemas/runEngine.ts | 12 +- .../react-hooks/src/hooks/useTaskTrigger.ts | 1 - packages/trigger-sdk/src/v3/shared.ts | 3 - packages/worker/src/supervisor/events.ts | 10 +- packages/worker/src/supervisor/session.ts | 12 +- packages/worker/src/types.ts | 16 +- pnpm-lock.yaml | 7 + 38 files changed, 457 insertions(+), 233 deletions(-) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts => api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts} (81%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts => api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts} (80%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts => api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts} (75%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts => api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts} (79%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runId.snapshots.latest.ts => api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts} (88%) diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.test.tasks.$taskParam/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.test.tasks.$taskParam/route.tsx index 711c26742d..5869e6bc74 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.test.tasks.$taskParam/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.test.tasks.$taskParam/route.tsx @@ -121,7 +121,7 @@ export const action: ActionFunction = async ({ request, params }) => { ); } - logger.error("Failed to start a test run", { error: e }); + logger.error("Failed to start a test run", { error: e instanceof Error ? e.message : e }); return redirectBackWithErrorMessage( request, diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index 8cd4681154..e6e3398e69 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -96,7 +96,6 @@ const { action, loader } = createActionApiRoute( return json( { id: run.friendlyId, - internalId: run.id, }, { headers: $responseHeaders, diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts similarity index 81% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts rename to apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts index cabd1b37f9..4e33f04fec 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.complete.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts @@ -10,8 +10,8 @@ export const action = createActionWorkerApiRoute( { body: WorkerApiRunAttemptCompleteRequestBody, params: z.object({ - runId: z.string(), - snapshotId: z.string(), + runFriendlyId: z.string(), + snapshotFriendlyId: z.string(), }), }, async ({ @@ -20,11 +20,11 @@ export const action = createActionWorkerApiRoute( params, }): Promise> => { const { completion } = body; - const { runId, snapshotId } = params; + const { runFriendlyId, snapshotFriendlyId } = params; const completeResult = await authenticatedWorker.completeRunAttempt({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, completion, }); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts similarity index 80% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts rename to apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts index cfddbe10d3..d8137e9b90 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.attempts.start.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts @@ -10,8 +10,8 @@ export const action = createActionWorkerApiRoute( { body: WorkerApiRunAttemptStartRequestBody, params: z.object({ - runId: z.string(), - snapshotId: z.string(), + runFriendlyId: z.string(), + snapshotFriendlyId: z.string(), }), }, async ({ @@ -19,11 +19,11 @@ export const action = createActionWorkerApiRoute( body, params, }): Promise> => { - const { runId, snapshotId } = params; + const { runFriendlyId, snapshotFriendlyId } = params; const runExecutionData = await authenticatedWorker.startRunAttempt({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, isWarmStart: body.isWarmStart, }); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts similarity index 75% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts rename to apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts index 226ab12373..96cd8c7e45 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts @@ -6,19 +6,19 @@ import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder. export const action = createActionWorkerApiRoute( { params: z.object({ - runId: z.string(), - snapshotId: z.string(), + runFriendlyId: z.string(), + snapshotFriendlyId: z.string(), }), }, async ({ authenticatedWorker, params, }): Promise> => { - const { runId, snapshotId } = params; + const { runFriendlyId, snapshotFriendlyId } = params; await authenticatedWorker.heartbeatRun({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, }); return json({ ok: true }); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts similarity index 79% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts rename to apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts index d2b645c097..f8676f6454 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.$snapshotId.wait.duration.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts @@ -10,8 +10,8 @@ export const action = createActionWorkerApiRoute( { body: WorkerApiWaitForDurationRequestBody, params: z.object({ - runId: z.string(), - snapshotId: z.string(), + runFriendlyId: z.string(), + snapshotFriendlyId: z.string(), }), }, async ({ @@ -19,11 +19,11 @@ export const action = createActionWorkerApiRoute( body, params, }): Promise> => { - const { runId, snapshotId } = params; + const { runFriendlyId, snapshotFriendlyId } = params; const waitResult = await authenticatedWorker.waitForDuration({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, date: body.date, }); diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts similarity index 88% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts rename to apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts index 7e2f9c3abf..37422bf42a 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runId.snapshots.latest.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts @@ -6,17 +6,17 @@ import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder. export const loader = createLoaderWorkerApiRoute( { params: z.object({ - runId: z.string(), + runFriendlyId: z.string(), }), }, async ({ authenticatedWorker, params, }): Promise> => { - const { runId } = params; + const { runFriendlyId } = params; const executionData = await authenticatedWorker.getLatestSnapshot({ - runId, + runFriendlyId, }); if (!executionData) { diff --git a/apps/webapp/app/v3/handleSocketIo.server.ts b/apps/webapp/app/v3/handleSocketIo.server.ts index 3dee6f6ce3..e8aa628a39 100644 --- a/apps/webapp/app/v3/handleSocketIo.server.ts +++ b/apps/webapp/app/v3/handleSocketIo.server.ts @@ -455,12 +455,12 @@ function createWorkerNamespace(io: Server) { clearInterval(interval); }); - socket.on("run:subscribe", async ({ version, runIds }) => { - logger.debug("run:subscribe", { version, runIds }); + socket.on("run:subscribe", async ({ version, runFriendlyIds }) => { + logger.debug("run:subscribe", { version, runFriendlyIds }); const settledResult = await Promise.allSettled( - runIds.map((runId) => { - const room = roomFromRunId(runId); + runFriendlyIds.map((friendlyId) => { + const room = roomFromFriendlyRunId(friendlyId); logger.debug("Joining room", { room }); @@ -472,7 +472,7 @@ function createWorkerNamespace(io: Server) { for (const result of settledResult) { if (result.status === "rejected") { logger.error("Error joining room", { - runIds, + runFriendlyIds, error: result.reason instanceof Error ? result.reason.message : result.reason, }); } @@ -484,12 +484,12 @@ function createWorkerNamespace(io: Server) { }); }); - socket.on("run:unsubscribe", async ({ version, runIds }) => { - logger.debug("run:unsubscribe", { version, runIds }); + socket.on("run:unsubscribe", async ({ version, runFriendlyIds }) => { + logger.debug("run:unsubscribe", { version, runFriendlyIds }); const settledResult = await Promise.allSettled( - runIds.map((runId) => { - const room = roomFromRunId(runId); + runFriendlyIds.map((friendlyId) => { + const room = roomFromFriendlyRunId(friendlyId); logger.debug("Leaving room", { room }); @@ -501,7 +501,7 @@ function createWorkerNamespace(io: Server) { for (const result of settledResult) { if (result.status === "rejected") { logger.error("Error leaving room", { - runIds, + runFriendlyIds, error: result.reason instanceof Error ? result.reason.message : result.reason, }); } @@ -517,6 +517,6 @@ function createWorkerNamespace(io: Server) { return worker; } -function roomFromRunId(runId: string) { - return `run:${runId}`; +export function roomFromFriendlyRunId(id: string) { + return `room:${id}`; } diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index ac9089d5e4..952f72847c 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -5,9 +5,10 @@ import { logger } from "~/services/logger.server"; import { safeJsonParse } from "~/utils/json"; import type { Attributes } from "@opentelemetry/api"; import { reportInvocationUsage } from "~/services/platform.v3.server"; -import { socketIo } from "./handleSocketIo.server"; +import { roomFromFriendlyRunId, socketIo } from "./handleSocketIo.server"; import { engine } from "./runEngine.server"; import { PerformTaskRunAlertsService } from "./services/alerts/performTaskRunAlerts.server"; +import { RunId } from "@trigger.dev/core/v3/apps"; export function registerRunEngineEventBusHandlers() { engine.eventBus.on("runSucceeded", async ({ time, run }) => { @@ -286,7 +287,12 @@ export function registerRunEngineEventBusHandlers() { logger.debug("[workerNotification] Notifying worker", { time, runId: run.id }); try { - socketIo.workerNamespace.to(`run:${run.id}`).emit("run:notify", { version: "1", run }); + const runFriendlyId = RunId.toFriendlyId(run.id); + const room = roomFromFriendlyRunId(runFriendlyId); + + socketIo.workerNamespace + .to(room) + .emit("run:notify", { version: "1", run: { friendlyId: runFriendlyId } }); } catch (error) { logger.error("[workerNotification] Failed to notify worker", { error: error instanceof Error ? error.message : error, diff --git a/apps/webapp/app/v3/services/batchTriggerV2.server.ts b/apps/webapp/app/v3/services/batchTriggerV2.server.ts index 02a0cb768d..ff39679fc5 100644 --- a/apps/webapp/app/v3/services/batchTriggerV2.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerV2.server.ts @@ -709,18 +709,18 @@ export class BatchTriggerV2Service extends BaseService { | { status: "ERROR"; error: string; workingIndex: number } > { // Grab the next PROCESSING_BATCH_SIZE runIds - const runIds = batch.runIds.slice(currentIndex, currentIndex + batchSize); + const runFriendlyIds = batch.runIds.slice(currentIndex, currentIndex + batchSize); logger.debug("[BatchTriggerV2][processBatchTaskRun] Processing batch items", { batchId: batch.friendlyId, currentIndex, - runIds, + runIds: runFriendlyIds, runCount: batch.runCount, }); // Combine the "window" between currentIndex and currentIndex + PROCESSING_BATCH_SIZE with the runId and the item in the payload which is an array - const itemsToProcess = runIds.map((runId, index) => ({ - runId, + const itemsToProcess = runFriendlyIds.map((runFriendlyId, index) => ({ + runFriendlyId, item: items[index + currentIndex], })); @@ -757,13 +757,13 @@ export class BatchTriggerV2Service extends BaseService { async #processBatchTaskRunItem( batch: BatchTaskRun, environment: AuthenticatedEnvironment, - task: { runId: string; item: BatchTriggerTaskV2RequestBody["items"][number] }, + task: { runFriendlyId: string; item: BatchTriggerTaskV2RequestBody["items"][number] }, currentIndex: number, options?: BatchTriggerTaskServiceOptions ) { logger.debug("[BatchTriggerV2][processBatchTaskRunItem] Processing item", { batchId: batch.friendlyId, - runId: task.runId, + runId: task.runFriendlyId, currentIndex, }); @@ -786,12 +786,12 @@ export class BatchTriggerV2Service extends BaseService { spanParentAsLink: options?.spanParentAsLink, batchId: batch.friendlyId, skipChecks: true, - runId: task.runId, + runFriendlyId: task.runFriendlyId, } ); if (!run) { - throw new Error(`Failed to trigger run ${task.runId} for batch ${batch.friendlyId}`); + throw new Error(`Failed to trigger run ${task.runFriendlyId} for batch ${batch.friendlyId}`); } await this._prisma.batchTaskRunItem.create({ diff --git a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts index 30938b54d4..0627fc83f7 100644 --- a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts @@ -21,6 +21,7 @@ import { updateEnvConcurrencyLimits, updateQueueConcurrencyLimits, } from "../runQueue.server"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; export class CreateBackgroundWorkerService extends BaseService { public async call( @@ -68,7 +69,7 @@ export class CreateBackgroundWorkerService extends BaseService { const backgroundWorker = await this._prisma.backgroundWorker.create({ data: { - friendlyId: generateFriendlyId("worker"), + ...BackgroundWorkerId.generate(), version: nextVersion, runtimeEnvironmentId: environment.id, projectId: project.id, diff --git a/apps/webapp/app/v3/services/createCheckpoint.server.ts b/apps/webapp/app/v3/services/createCheckpoint.server.ts index 7290424248..f8028fffff 100644 --- a/apps/webapp/app/v3/services/createCheckpoint.server.ts +++ b/apps/webapp/app/v3/services/createCheckpoint.server.ts @@ -3,12 +3,12 @@ import type { InferSocketMessageSchema } from "@trigger.dev/core/v3/zodSocket"; import type { Checkpoint, CheckpointRestoreEvent } from "@trigger.dev/database"; import { logger } from "~/services/logger.server"; import { marqs } from "~/v3/marqs/index.server"; -import { generateFriendlyId } from "../friendlyIdentifiers"; import { isFreezableAttemptStatus, isFreezableRunStatus } from "../taskStatus"; import { BaseService } from "./baseService.server"; import { CreateCheckpointRestoreEventService } from "./createCheckpointRestoreEvent.server"; import { ResumeBatchRunService } from "./resumeBatchRun.server"; import { ResumeDependentParentsService } from "./resumeDependentParents.server"; +import { CheckpointId } from "@trigger.dev/core/v3/apps"; export class CreateCheckpointService extends BaseService { public async call( @@ -98,7 +98,7 @@ export class CreateCheckpointService extends BaseService { const checkpoint = await this._prisma.checkpoint.create({ data: { - friendlyId: generateFriendlyId("checkpoint"), + ...CheckpointId.generate(), runtimeEnvironmentId: attempt.taskRun.runtimeEnvironmentId, projectId: attempt.taskRun.projectId, attemptId: attempt.id, diff --git a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts index 3dc3ca20db..da5e5d9507 100644 --- a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts @@ -12,6 +12,7 @@ import { createBackgroundTasks, syncDeclarativeSchedules } from "./createBackgro import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; import { projectPubSub } from "./projectPubSub.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; export class CreateDeployedBackgroundWorkerService extends BaseService { public async call( @@ -39,7 +40,7 @@ export class CreateDeployedBackgroundWorkerService extends BaseService { const backgroundWorker = await this._prisma.backgroundWorker.create({ data: { - friendlyId: generateFriendlyId("worker"), + ...BackgroundWorkerId.generate(), version: deployment.version, runtimeEnvironmentId: environment.id, projectId: environment.projectId, diff --git a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts index 7ace78ae86..67e10d9b0f 100644 --- a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts @@ -9,7 +9,7 @@ import { syncDeclarativeSchedules, } from "./createBackgroundWorker.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; -import { logger } from "~/services/logger.server"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; export class CreateDeploymentBackgroundWorkerService extends BaseService { public async call( @@ -36,7 +36,7 @@ export class CreateDeploymentBackgroundWorkerService extends BaseService { const backgroundWorker = await this._prisma.backgroundWorker.create({ data: { - friendlyId: generateFriendlyId("worker"), + ...BackgroundWorkerId.generate(), version: deployment.version, runtimeEnvironmentId: environment.id, projectId: environment.projectId, diff --git a/apps/webapp/app/v3/services/replayTaskRun.server.ts b/apps/webapp/app/v3/services/replayTaskRun.server.ts index 601bb8a075..96415a270d 100644 --- a/apps/webapp/app/v3/services/replayTaskRun.server.ts +++ b/apps/webapp/app/v3/services/replayTaskRun.server.ts @@ -119,7 +119,9 @@ export class ReplayTaskRunService extends BaseService { return; } - logger.error("Failed to replay a run", { error: error }); + logger.error("Failed to replay a run", { + error: error instanceof Error ? error.message : error, + }); return; } diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 9543460e57..e5a602cdc0 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -14,7 +14,7 @@ export type TriggerTaskServiceOptions = { parentAsLinkType?: "replay" | "trigger"; batchId?: string; customIcon?: string; - runId?: string; + runFriendlyId?: string; skipChecks?: boolean; oneTimeUseToken?: string; }; diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts index 0725122891..51c4e4cffc 100644 --- a/apps/webapp/app/v3/services/triggerTaskV1.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -31,6 +31,7 @@ import { parseDelay } from "~/utils/delays"; import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; import { removeQueueConcurrencyLimits, updateQueueConcurrencyLimits } from "../runQueue.server"; +/** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV1 extends BaseService { public async call( taskId: string, @@ -126,7 +127,7 @@ export class TriggerTaskServiceV1 extends BaseService { ); } - const runFriendlyId = options?.runId ?? generateFriendlyId("run"); + const runFriendlyId = options?.runFriendlyId ?? generateFriendlyId("run"); const payloadPacket = await this.#handlePayloadPacket( body.payload, diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 737a582a97..781c45cec7 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -10,7 +10,6 @@ import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; import { sanitizeQueueName } from "~/v3/marqs/index.server"; import { eventRepository } from "../eventRepository.server"; -import { generateFriendlyId } from "../friendlyIdentifiers"; import { uploadPacketToObjectStore } from "../r2.server"; import { startActiveSpan } from "../tracer.server"; import { getEntitlement } from "~/services/platform.v3.server"; @@ -22,7 +21,7 @@ import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.ser import { handleMetadataPacket } from "~/utils/packets"; import { WorkerGroupService } from "./worker/workerGroupService.server"; import { parseDelay } from "~/utils/delays"; -import { stringifyDuration } from "@trigger.dev/core/v3/apps"; +import { RunId, stringifyDuration } from "@trigger.dev/core/v3/apps"; import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; import { Prisma } from "@trigger.dev/database"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; @@ -93,11 +92,10 @@ export class TriggerTaskServiceV2 extends WithRunEngine { if ( existingRun.associatedWaitpoint?.status === "PENDING" && body.options?.resumeParentOnCompletion && - // FIXME: This is currently the friendly ID body.options?.parentRunId ) { await this._engine.blockRunWithWaitpoint({ - runId: body.options.parentRunId, + runId: RunId.fromFriendlyId(body.options.parentRunId), waitpointId: existingRun.associatedWaitpoint.id, environmentId: environment.id, projectId: environment.projectId, @@ -147,7 +145,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { ); } - const runFriendlyId = options?.runId ?? generateFriendlyId("run"); + const runFriendlyId = options?.runFriendlyId ?? RunId.generate().friendlyId; const payloadPacket = await this.#handlePayloadPacket( body.payload, diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 32ddfe4594..96e40639e7 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -24,6 +24,7 @@ import { $transaction } from "~/db.server"; import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; +import { fromFriendlyId } from "@trigger.dev/core/v3/apps"; export class WorkerGroupTokenService extends WithRunEngine { private readonly tokenPrefix = "tr_wgt_"; @@ -615,29 +616,36 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { } async heartbeatRun({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, }: { - runId: string; - snapshotId: string; + runFriendlyId: string; + snapshotFriendlyId: string; }): Promise { - return await this._engine.heartbeatRun({ runId, snapshotId }); + return await this._engine.heartbeatRun({ + runId: fromFriendlyId(runFriendlyId), + snapshotId: fromFriendlyId(snapshotFriendlyId), + }); } async startRunAttempt({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, isWarmStart, }: { - runId: string; - snapshotId: string; + runFriendlyId: string; + snapshotFriendlyId: string; isWarmStart?: boolean; }): Promise< StartRunAttemptResult & { envVars: Record; } > { - const engineResult = await this._engine.startRunAttempt({ runId, snapshotId, isWarmStart }); + const engineResult = await this._engine.startRunAttempt({ + runId: fromFriendlyId(runFriendlyId), + snapshotId: fromFriendlyId(snapshotFriendlyId), + isWarmStart, + }); const defaultMachinePreset = { name: "small-1x", @@ -669,31 +677,41 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { } async completeRunAttempt({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, completion, }: { - runId: string; - snapshotId: string; + runFriendlyId: string; + snapshotFriendlyId: string; completion: TaskRunExecutionResult; }): Promise { - return await this._engine.completeRunAttempt({ runId, snapshotId, completion }); + return await this._engine.completeRunAttempt({ + runId: fromFriendlyId(runFriendlyId), + snapshotId: fromFriendlyId(snapshotFriendlyId), + completion, + }); } async waitForDuration({ - runId, - snapshotId, + runFriendlyId, + snapshotFriendlyId, date, }: { - runId: string; - snapshotId: string; + runFriendlyId: string; + snapshotFriendlyId: string; date: Date; }): Promise { - return await this._engine.waitForDuration({ runId, snapshotId, date }); + return await this._engine.waitForDuration({ + runId: fromFriendlyId(runFriendlyId), + snapshotId: fromFriendlyId(snapshotFriendlyId), + date, + }); } - async getLatestSnapshot({ runId }: { runId: string }) { - return await this._engine.getRunExecutionData({ runId }); + async getLatestSnapshot({ runFriendlyId }: { runFriendlyId: string }) { + return await this._engine.getRunExecutionData({ + runId: fromFriendlyId(runFriendlyId), + }); } toJSON(): WorkerGroupTokenAuthenticationResponse { diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index a6f0da2a8b..dc3effda79 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2481,6 +2481,7 @@ model BatchTaskRun { updatedAt DateTime @updatedAt // new columns + /// Friendly IDs runIds String[] @default([]) runCount Int @default(0) payload String? diff --git a/internal-packages/run-engine/src/engine/executionSnapshots.ts b/internal-packages/run-engine/src/engine/executionSnapshots.ts index 547e379fea..eb2dfcf42e 100644 --- a/internal-packages/run-engine/src/engine/executionSnapshots.ts +++ b/internal-packages/run-engine/src/engine/executionSnapshots.ts @@ -1,8 +1,23 @@ -import { ExecutionResult } from "@trigger.dev/core/v3"; -import { PrismaClientOrTransaction, TaskRunExecutionSnapshot } from "@trigger.dev/database"; +import { CompletedWaitpoint, ExecutionResult } from "@trigger.dev/core/v3"; +import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; +import { + PrismaClientOrTransaction, + TaskRunCheckpoint, + TaskRunExecutionSnapshot, +} from "@trigger.dev/database"; + +interface LatestExecutionSnapshot extends TaskRunExecutionSnapshot { + friendlyId: string; + runFriendlyId: string; + checkpoint: TaskRunCheckpoint | null; + completedWaitpoints: CompletedWaitpoint[]; +} /* Gets the most recent valid snapshot for a run */ -export async function getLatestExecutionSnapshot(prisma: PrismaClientOrTransaction, runId: string) { +export async function getLatestExecutionSnapshot( + prisma: PrismaClientOrTransaction, + runId: string +): Promise { const snapshot = await prisma.taskRunExecutionSnapshot.findFirst({ where: { runId, isValid: true }, include: { @@ -18,18 +33,31 @@ export async function getLatestExecutionSnapshot(prisma: PrismaClientOrTransacti return { ...snapshot, - completedWaitpoints: snapshot.completedWaitpoints.map((w) => ({ - id: w.id, - type: w.type, - completedAt: w.completedAt ?? new Date(), - idempotencyKey: - w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey ? w.idempotencyKey : undefined, - completedByTaskRunId: w.completedByTaskRunId ?? undefined, - completedAfter: w.completedAfter ?? undefined, - output: w.output ?? undefined, - outputType: w.outputType, - outputIsError: w.outputIsError, - })), + friendlyId: SnapshotId.toFriendlyId(snapshot.id), + runFriendlyId: RunId.toFriendlyId(snapshot.runId), + completedWaitpoints: snapshot.completedWaitpoints.map( + (w) => + ({ + id: w.id, + friendlyId: w.friendlyId, + type: w.type, + completedAt: w.completedAt ?? new Date(), + idempotencyKey: + w.userProvidedIdempotencyKey && !w.inactiveIdempotencyKey + ? w.idempotencyKey + : undefined, + completedByTaskRun: w.completedByTaskRunId + ? { + id: w.completedByTaskRunId, + friendlyId: RunId.toFriendlyId(w.completedByTaskRunId), + } + : undefined, + completedAfter: w.completedAfter ?? undefined, + output: w.output ?? undefined, + outputType: w.outputType, + outputIsError: w.outputIsError, + }) satisfies CompletedWaitpoint + ), }; } @@ -62,11 +90,13 @@ export function executionResultFromSnapshot(snapshot: TaskRunExecutionSnapshot): return { snapshot: { id: snapshot.id, + friendlyId: SnapshotId.toFriendlyId(snapshot.id), executionStatus: snapshot.executionStatus, description: snapshot.description, }, run: { id: snapshot.runId, + friendlyId: RunId.toFriendlyId(snapshot.runId), status: snapshot.runStatus, attemptNumber: snapshot.attemptNumber, }, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 760389d301..d4358b7786 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -21,10 +21,13 @@ import { WaitForDurationResult, } from "@trigger.dev/core/v3"; import { - generateFriendlyId, getMaxDuration, parseNaturalLanguageDuration, + QueueId, + RunId, sanitizeQueueName, + SnapshotId, + WaitpointId, } from "@trigger.dev/core/v3/apps"; import { $transaction, @@ -49,11 +52,7 @@ import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; import { getRunWithBackgroundWorkerTasks } from "./db/worker"; import { runStatusFromError } from "./errors"; import { EventBusEvents } from "./eventBus"; -import { - executionResultFromSnapshot, - getExecutionSnapshotCompletedWaitpoints, - getLatestExecutionSnapshot, -} from "./executionSnapshots"; +import { executionResultFromSnapshot, getLatestExecutionSnapshot } from "./executionSnapshots"; import { RunLocker } from "./locking"; import { machinePresetFromConfig } from "./machinePresets"; import { @@ -270,6 +269,7 @@ export class RunEngine { //create run const taskRun = await prisma.taskRun.create({ data: { + id: RunId.fromFriendlyId(friendlyId), engine: "V2", status, number, @@ -414,7 +414,7 @@ export class RunEngine { } else { taskQueue = await prisma.taskQueue.create({ data: { - friendlyId: generateFriendlyId("queue"), + ...QueueId.generate(), name: queueName, concurrencyLimit, runtimeEnvironmentId: environment.id, @@ -773,6 +773,7 @@ export class RunEngine { version: "1" as const, snapshot: { id: newSnapshot.id, + friendlyId: newSnapshot.friendlyId, executionStatus: newSnapshot.executionStatus, description: newSnapshot.description, }, @@ -781,6 +782,7 @@ export class RunEngine { completedWaitpoints: snapshot.completedWaitpoints, backgroundWorker: { id: result.worker.id, + friendlyId: result.worker.friendlyId, version: result.worker.version, }, deployment: { @@ -1119,7 +1121,6 @@ export class RunEngine { }, run: { id: run.friendlyId, - internalId: run.id, payload: run.payload, payloadType: run.payloadType, createdAt: run.createdAt, @@ -1527,7 +1528,7 @@ export class RunEngine { return this.prisma.waitpoint.create({ data: { - friendlyId: generateFriendlyId("waitpoint"), + ...WaitpointId.generate(), type: "MANUAL", idempotencyKey: idempotencyKey ?? nanoid(24), userProvidedIdempotencyKey: !!idempotencyKey, @@ -1829,17 +1830,20 @@ export class RunEngine { version: "1" as const, snapshot: { id: snapshot.id, + friendlyId: snapshot.friendlyId, executionStatus: snapshot.executionStatus, description: snapshot.description, }, run: { id: snapshot.runId, + friendlyId: snapshot.runFriendlyId, status: snapshot.runStatus, attemptNumber: snapshot.attemptNumber ?? undefined, }, checkpoint: snapshot.checkpoint ? { id: snapshot.checkpoint.id, + friendlyId: snapshot.checkpoint.friendlyId, type: snapshot.checkpoint.type, location: snapshot.checkpoint.location, imageRef: snapshot.checkpoint.imageRef, @@ -1893,6 +1897,7 @@ export class RunEngine { snapshot: latestSnapshot, run: { id: runId, + friendlyId: latestSnapshot.runFriendlyId, status: latestSnapshot.runStatus, attemptNumber: latestSnapshot.attemptNumber, }, @@ -2065,6 +2070,7 @@ export class RunEngine { }, select: { id: true, + friendlyId: true, status: true, attemptNumber: true, spanId: true, @@ -2432,11 +2438,13 @@ export class RunEngine { wasRequeued: true, snapshot: { id: newSnapshot.id, + friendlyId: newSnapshot.friendlyId, executionStatus: newSnapshot.executionStatus, description: newSnapshot.description, }, run: { id: newSnapshot.runId, + friendlyId: newSnapshot.runFriendlyId, status: newSnapshot.runStatus, attemptNumber: newSnapshot.attemptNumber, }, @@ -2616,7 +2624,7 @@ export class RunEngine { ) { return tx.waitpoint.create({ data: { - friendlyId: generateFriendlyId("waitpoint"), + ...WaitpointId.generate(), type: "RUN", status: "PENDING", idempotencyKey: nanoid(24), @@ -2639,7 +2647,7 @@ export class RunEngine { ) { const waitpoint = await tx.waitpoint.create({ data: { - friendlyId: generateFriendlyId("waitpoint"), + ...WaitpointId.generate(), type: "DATETIME", status: "PENDING", idempotencyKey: idempotencyKey ?? nanoid(24), @@ -2772,7 +2780,11 @@ export class RunEngine { }, }); - return newSnapshot; + return { + ...newSnapshot, + friendlyId: SnapshotId.toFriendlyId(newSnapshot.id), + runFriendlyId: RunId.toFriendlyId(newSnapshot.runId), + }; } async #setExecutionSnapshotHeartbeat({ diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 4d161047ab..38e9bd6ad7 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -22,11 +22,12 @@ import { assertExhaustive } from "../utilities/assertExhaustive.js"; import { setTimeout as wait } from "timers/promises"; import { io, Socket } from "socket.io-client"; +// All IDs are friendly IDs const Env = z.object({ TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), TRIGGER_CONTENT_HASH: z.string(), TRIGGER_WORKER_API_URL: z.string().url(), - TRIGGER_WORKLOAD_CONTROLLER_ID: z.string().default(randomUUID()), + TRIGGER_WORKLOAD_CONTROLLER_ID: z.string().default(`controller_${randomUUID()}`), TRIGGER_DEPLOYMENT_ID: z.string(), TRIGGER_DEPLOYMENT_VERSION: z.string(), TRIGGER_ENV_ID: z.string(), @@ -51,6 +52,14 @@ type ManagedRunControllerOptions = { heartbeatIntervalSeconds?: number; }; +type Run = { + friendlyId: string; +}; + +type Snapshot = { + friendlyId: string; +}; + class ManagedRunController { private taskRunProcess?: TaskRunProcess; @@ -66,8 +75,51 @@ class ManagedRunController { private readonly snapshotPollService: HeartbeatService; private readonly snapshotPollIntervalSeconds: number; - private runId?: string; - private snapshotId?: string; + private state: + | { + phase: "RUN"; + run: Run; + snapshot: Snapshot; + } + | { + phase: "IDLE" | "WARM_START"; + }; + + private enterIdlePhase() { + this.state = { phase: "IDLE" }; + } + + private enterRunPhase(run: Run, snapshot: Snapshot) { + this.state = { phase: "RUN", run, snapshot }; + } + + private updateSnapshot(snapshot: Snapshot) { + if (this.state.phase !== "RUN") { + throw new Error(`Invalid phase for updating snapshot: ${this.state.phase}`); + } + + this.state.snapshot = snapshot; + } + + private enterWarmStartPhase() { + this.state = { phase: "WARM_START" }; + } + + private get runFriendlyId() { + if (this.state.phase !== "RUN") { + return undefined; + } + + return this.state.run.friendlyId; + } + + private get snapshotFriendlyId() { + if (this.state.phase !== "RUN") { + return undefined; + } + + return this.state.snapshot.friendlyId; + } constructor(opts: ManagedRunControllerOptions) { logger.debug("[ManagedRunController] Creating controller", { env }); @@ -77,8 +129,19 @@ class ManagedRunController { this.heartbeatIntervalSeconds = opts.heartbeatIntervalSeconds || 30; this.snapshotPollIntervalSeconds = 5; - this.runId = env.TRIGGER_RUN_ID; - this.snapshotId = env.TRIGGER_SNAPSHOT_ID; + if (env.TRIGGER_RUN_ID) { + if (!env.TRIGGER_SNAPSHOT_ID) { + throw new Error("Missing snapshot ID"); + } + + this.state = { + phase: "RUN", + run: { friendlyId: env.TRIGGER_RUN_ID }, + snapshot: { friendlyId: env.TRIGGER_SNAPSHOT_ID }, + }; + } else { + this.enterIdlePhase(); + } this.httpClient = new WorkloadHttpClient({ workerApiUrl: env.TRIGGER_WORKER_API_URL, @@ -87,14 +150,14 @@ class ManagedRunController { this.snapshotPollService = new HeartbeatService({ heartbeat: async () => { - if (!this.runId) { + if (!this.runFriendlyId) { logger.debug("[ManagedRunController] Skipping snapshot poll, no run ID"); return; } console.debug("[ManagedRunController] Polling for latest snapshot"); - const response = await this.httpClient.getRunExecutionData(this.runId); + const response = await this.httpClient.getRunExecutionData(this.runFriendlyId); if (!response.success) { console.error("[ManagedRunController] Snapshot poll failed", { error: response.error }); @@ -103,19 +166,19 @@ class ManagedRunController { const { snapshot } = response.data.execution; - if (snapshot.id === this.snapshotId) { + if (snapshot.friendlyId === this.snapshotFriendlyId) { console.debug("[ManagedRunController] Snapshot not changed", { - snapshotId: this.snapshotId, + snapshotId: this.snapshotFriendlyId, }); return; } console.log("Snapshot changed", { - oldSnapshotId: this.snapshotId, - newSnapshotId: snapshot.id, + oldSnapshotId: this.snapshotFriendlyId, + newSnapshotId: snapshot.friendlyId, }); - this.snapshotId = snapshot.id; + this.updateSnapshot(snapshot); await this.handleSnapshotChange(response.data.execution); }, @@ -128,17 +191,21 @@ class ManagedRunController { this.heartbeatService = new HeartbeatService({ heartbeat: async () => { - if (!this.runId || !this.snapshotId) { + if (!this.runFriendlyId || !this.snapshotFriendlyId) { logger.debug("[ManagedRunController] Skipping heartbeat, no run ID or snapshot ID"); return; } console.debug("[ManagedRunController] Sending heartbeat"); - const response = await this.httpClient.heartbeatRun(this.runId, this.snapshotId, { - cpu: 0, - memory: 0, - }); + const response = await this.httpClient.heartbeatRun( + this.runFriendlyId, + this.snapshotFriendlyId, + { + cpu: 0, + memory: 0, + } + ); if (!response.success) { console.error("[ManagedRunController] Heartbeat failed", { error: response.error }); @@ -158,14 +225,14 @@ class ManagedRunController { } private async handleSnapshotChange({ run, snapshot, completedWaitpoints }: RunExecutionData) { - console.log("Got latest snapshot", { snapshot, currentSnapshotId: this.snapshotId }); + console.log("Got latest snapshot", { snapshot, currentSnapshotId: this.snapshotFriendlyId }); - this.snapshotId = snapshot.id; + this.updateSnapshot(snapshot); switch (snapshot.executionStatus) { case "PENDING_CANCEL": { try { - await this.cancelAttempt(run.id); + await this.cancelAttempt(run.friendlyId); } catch (error) { console.error("Failed to cancel attempt, shutting down", { error, @@ -194,10 +261,10 @@ class ManagedRunController { } private async startAndExecuteRunAttempt(isWarmStart = false) { - if (!this.runId || !this.snapshotId) { + if (!this.runFriendlyId || !this.snapshotFriendlyId) { logger.debug("[ManagedRunController] Missing run ID or snapshot ID", { - runId: this.runId, - snapshotId: this.snapshotId, + runId: this.runFriendlyId, + snapshotId: this.snapshotFriendlyId, }); process.exit(1); } @@ -208,13 +275,17 @@ class ManagedRunController { this.socket?.emit("run:start", { version: "1", - run: { id: this.runId }, - snapshot: { id: this.snapshotId }, + run: { friendlyId: this.runFriendlyId }, + snapshot: { friendlyId: this.snapshotFriendlyId }, }); - const start = await this.httpClient.startRunAttempt(this.runId, this.snapshotId, { - isWarmStart, - }); + const start = await this.httpClient.startRunAttempt( + this.runFriendlyId, + this.snapshotFriendlyId, + { + isWarmStart, + } + ); if (!start.success) { console.error("[ManagedRunController] Failed to start run", { error: start.error }); @@ -224,12 +295,11 @@ class ManagedRunController { const { run, snapshot, execution, envVars } = start.data; logger.debug("[ManagedRunController] Started run", { - runId: run.id, - snapshot: snapshot.id, + runId: run.friendlyId, + snapshot: snapshot.friendlyId, }); - this.runId = run.id; - this.snapshotId = snapshot.id; + this.updateSnapshot(snapshot); const taskRunEnv = { ...gatherProcessEnv(), @@ -244,19 +314,23 @@ class ManagedRunController { }); console.log("Submitting attempt completion", { - runId: run.id, - snapshotId: snapshot.id, - updatedSnapshotId: this.snapshotId, + runId: run.friendlyId, + snapshotId: snapshot.friendlyId, + updatedSnapshotId: this.snapshotFriendlyId, }); - const completionResult = await this.httpClient.completeRunAttempt(run.id, this.snapshotId, { - completion: { - id: execution.run.id, - ok: false, - retry: undefined, - error: TaskRunProcess.parseExecuteError(error), - }, - }); + const completionResult = await this.httpClient.completeRunAttempt( + this.runFriendlyId, + this.snapshotFriendlyId, + { + completion: { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + }, + } + ); if (!completionResult.success) { console.error("Failed to submit completion after error", { @@ -265,16 +339,13 @@ class ManagedRunController { process.exit(1); } - logger.log("Attempt completion submitted", completionResult.data.result); - } finally { - this.runId = undefined; - this.snapshotId = undefined; - - this.waitForNextRun(); + logger.log("Attempt completion submitted after error", completionResult.data.result); } } private async waitForNextRun() { + this.enterWarmStartPhase(); + try { const warmStartUrl = new URL( "/warm-start", @@ -308,8 +379,7 @@ class ManagedRunController { console.log("Got next run", { nextRun }); - this.runId = nextRun.run.id; - this.snapshotId = nextRun.snapshot.id; + this.enterRunPhase(nextRun.run, nextRun.snapshot); this.startAndExecuteRunAttempt(true); } catch (error) { @@ -331,16 +401,16 @@ class ManagedRunController { this.socket.on("run:notify", async ({ version, run }) => { console.log("[ManagedRunController] Received run notification", { version, run }); - if (run.id !== this.runId) { + if (run.friendlyId !== this.runFriendlyId) { console.log("[ManagedRunController] Ignoring notification for different run", { - runId: run.id, - currentRunId: this.runId, - currentSnapshotId: this.snapshotId, + runId: run.friendlyId, + currentRunId: this.runFriendlyId, + currentSnapshotId: this.snapshotFriendlyId, }); return; } - const latestSnapshot = await this.httpClient.getRunExecutionData(run.id); + const latestSnapshot = await this.httpClient.getRunExecutionData(this.runFriendlyId); if (!latestSnapshot.success) { console.error("Failed to get latest snapshot data", latestSnapshot.error); @@ -378,7 +448,7 @@ class ManagedRunController { execution, traceContext: execution.run.traceContext ?? {}, }, - messageId: run.id, + messageId: run.friendlyId, }); this.taskRunProcess.onWait.attach(this.handleWait.bind(this)); @@ -402,17 +472,21 @@ class ManagedRunController { }); } - if (!this.runId || !this.snapshotId) { + if (!this.runFriendlyId || !this.snapshotFriendlyId) { console.error("Missing run ID or snapshot ID after execution", { - runId: this.runId, - snapshotId: this.snapshotId, + runId: this.runFriendlyId, + snapshotId: this.snapshotFriendlyId, }); process.exit(1); } - const completionResult = await this.httpClient.completeRunAttempt(run.id, this.snapshotId, { - completion, - }); + const completionResult = await this.httpClient.completeRunAttempt( + this.runFriendlyId, + this.snapshotFriendlyId, + { + completion, + } + ); if (!completionResult.success) { console.error("Failed to submit completion", { @@ -421,15 +495,15 @@ class ManagedRunController { process.exit(1); } - logger.log("Completion submitted", completionResult.data.result); + logger.log("Attempt completion submitted", completionResult.data.result); - const { attemptStatus } = completionResult.data.result; + const { attemptStatus, snapshot: completionSnapshot } = completionResult.data.result; - this.runId = completionResult.data.result.run.id; - this.snapshotId = completionResult.data.result.snapshot.id; + this.updateSnapshot(completionSnapshot); if (attemptStatus === "RUN_FINISHED") { logger.debug("Run finished"); + this.waitForNextRun(); return; } @@ -440,6 +514,7 @@ class ManagedRunController { if (attemptStatus === "RETRY_QUEUED") { logger.debug("Retry queued"); + this.waitForNextRun(); return; } @@ -462,7 +537,7 @@ class ManagedRunController { } private async handleWait({ wait }: OnWaitMessage) { - if (!this.runId || !this.snapshotId) { + if (!this.runFriendlyId || !this.snapshotFriendlyId) { logger.debug("[ManagedRunController] Ignoring wait, no run ID or snapshot ID"); return; } @@ -471,9 +546,13 @@ class ManagedRunController { case "DATETIME": { logger.log("Waiting for duration", { wait }); - const waitpoint = await this.httpClient.waitForDuration(this.runId, this.snapshotId, { - date: wait.date, - }); + const waitpoint = await this.httpClient.waitForDuration( + this.runFriendlyId, + this.snapshotFriendlyId, + { + date: wait.date, + } + ); if (!waitpoint.success) { console.error("Failed to wait for datetime", { error: waitpoint.error }); diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index 26345e5b5a..a027c2fe73 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -44,10 +44,10 @@ class UnmanagedRunController { this.session.emit("requestRunAttemptStart", { time: new Date(), run: { - id: message.run.id, + friendlyId: message.run.id, }, snapshot: { - id: message.snapshot.id, + friendlyId: message.snapshot.id, }, }); }); @@ -96,10 +96,10 @@ class UnmanagedRunController { this.session.emit("runAttemptCompleted", { time: new Date(), run: { - id: run.id, + friendlyId: run.id, }, snapshot: { - id: snapshot.id, + friendlyId: snapshot.id, }, completion, }); @@ -111,10 +111,10 @@ class UnmanagedRunController { this.session.emit("runAttemptCompleted", { time: new Date(), run: { - id: run.id, + friendlyId: run.id, }, snapshot: { - id: snapshot.id, + friendlyId: snapshot.id, }, completion: { id: execution.run.id, diff --git a/packages/core/package.json b/packages/core/package.json index c2452cc6f7..f63f787d4d 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -182,6 +182,7 @@ "check-exports": "attw --pack ." }, "dependencies": { + "@bugsnag/cuid": "^3.1.1", "@electric-sql/client": "1.0.0-beta.1", "@google-cloud/precise-date": "^4.0.0", "@jsonhero/path": "^1.0.21", diff --git a/packages/core/src/v3/apps/friendlyId.ts b/packages/core/src/v3/apps/friendlyId.ts index 1036edf297..f982ec280d 100644 --- a/packages/core/src/v3/apps/friendlyId.ts +++ b/packages/core/src/v3/apps/friendlyId.ts @@ -1,7 +1,82 @@ import { customAlphabet } from "nanoid"; +import cuid from "@bugsnag/cuid"; const idGenerator = customAlphabet("123456789abcdefghijkmnopqrstuvwxyz", 21); export function generateFriendlyId(prefix: string, size?: number) { return `${prefix}_${idGenerator(size)}`; } + +export function generateInternalId() { + return cuid(); +} + +/** Convert an internal ID to a friendly ID */ +export function toFriendlyId(entityName: string, internalId: string): string { + if (!entityName) { + throw new Error("Entity name cannot be empty"); + } + + if (!internalId) { + throw new Error("Internal ID cannot be empty"); + } + + return `${entityName}_${internalId}`; +} + +/** Convert a friendly ID to an internal ID */ +export function fromFriendlyId(friendlyId: string, expectedEntityName?: string): string { + if (!friendlyId) { + throw new Error("Friendly ID cannot be empty"); + } + + const parts = friendlyId.split("_"); + + if (parts.length !== 2) { + throw new Error("Invalid friendly ID format"); + } + + const [entityName, internalId] = parts; + + if (!entityName) { + throw new Error("Entity name cannot be empty"); + } + + if (!internalId) { + throw new Error("Internal ID cannot be empty"); + } + + if (expectedEntityName && entityName !== expectedEntityName) { + throw new Error(`Invalid entity name: ${entityName}`); + } + + return internalId; +} + +export class IdUtil { + constructor(private entityName: string) {} + + generate() { + const internalId = generateInternalId(); + + return { + id: internalId, + friendlyId: this.toFriendlyId(internalId), + }; + } + + toFriendlyId(internalId: string) { + return toFriendlyId(this.entityName, internalId); + } + + fromFriendlyId(friendlyId: string) { + return fromFriendlyId(friendlyId); + } +} + +export const BackgroundWorkerId = new IdUtil("worker"); +export const CheckpointId = new IdUtil("checkpoint"); +export const QueueId = new IdUtil("queue"); +export const RunId = new IdUtil("run"); +export const SnapshotId = new IdUtil("snapshot"); +export const WaitpointId = new IdUtil("waitpoint"); diff --git a/packages/core/src/v3/runtime/index.ts b/packages/core/src/v3/runtime/index.ts index 146c508e46..7eecb99296 100644 --- a/packages/core/src/v3/runtime/index.ts +++ b/packages/core/src/v3/runtime/index.ts @@ -33,11 +33,7 @@ export class RuntimeAPI { return usage.pauseAsync(() => this.#getRuntimeManager().waitUntil(date)); } - public waitForTask(params: { - id: string; - internalId: string; - ctx: TaskRunContext; - }): Promise { + public waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { return usage.pauseAsync(() => this.#getRuntimeManager().waitForTask(params)); } diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index c9b559040c..e29e06b5c9 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -32,8 +32,6 @@ export class ManagedRuntimeManager implements RuntimeManager { } async waitForDuration(ms: number): Promise { - console.log("waitForDuration", ms); - const wait = { type: "DATETIME", id: crypto.randomUUID(), @@ -54,15 +52,9 @@ export class ManagedRuntimeManager implements RuntimeManager { return this.waitForDuration(date.getTime() - Date.now()); } - async waitForTask(params: { - id: string; - internalId?: string; - ctx: TaskRunContext; - }): Promise { - console.log("waitForTask", params); - + async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { const promise = new Promise((resolve) => { - this.resolversByWaitId.set(params.internalId ?? params.id, resolve); + this.resolversByWaitId.set(params.id, resolve); }); const waitpoint = await promise; @@ -109,7 +101,8 @@ export class ManagedRuntimeManager implements RuntimeManager { private completeWaitpoint(waitpoint: CompletedWaitpoint): void { console.log("completeWaitpoint", waitpoint); - const waitId = waitpoint.completedByTaskRunId ?? this.resolversByWaitpoint.get(waitpoint.id); + const waitId = + waitpoint.completedByTaskRun?.friendlyId ?? this.resolversByWaitpoint.get(waitpoint.id); if (!waitId) { // TODO: Handle failures better diff --git a/packages/core/src/v3/runtime/manager.ts b/packages/core/src/v3/runtime/manager.ts index 856bcddee1..56acfe3cf2 100644 --- a/packages/core/src/v3/runtime/manager.ts +++ b/packages/core/src/v3/runtime/manager.ts @@ -8,11 +8,7 @@ export interface RuntimeManager { disable(): void; waitUntil(date: Date): Promise; waitForDuration(ms: number): Promise; - waitForTask(params: { - id: string; - internalId?: string; - ctx: TaskRunContext; - }): Promise; + waitForTask(params: { id: string; ctx: TaskRunContext }): Promise; waitForBatch(params: { id: string; runs: string[]; diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 9f86f50e60..2c52f740a0 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -80,7 +80,7 @@ export const TriggerTaskRequestBody = z.object({ parentBatch: z.string().optional(), /** * RunEngine v2 - * If triggered inside another run, the parentRunId is the id of the parent run. + * If triggered inside another run, the parentRunId is the friendly ID of the parent run. */ parentRunId: z.string().optional(), /** @@ -114,7 +114,6 @@ export type TriggerTaskRequestBody = z.infer; export const TriggerTaskResponse = z.object({ id: z.string(), - internalId: z.string(), }); export type TriggerTaskResponse = z.infer; diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index 60157ca293..1f9f18f4c3 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -228,7 +228,6 @@ export const TaskRunExecution = z.object({ run: TaskRun.and( z.object({ traceContext: z.record(z.unknown()).optional(), - internalId: z.string().optional(), }) ), queue: TaskRunExecutionQueue, diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 02414620d5..2eb5c52680 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -49,11 +49,17 @@ export type WaitpointType = (typeof WaitpointType)[keyof typeof WaitpointType]; export const CompletedWaitpoint = z.object({ id: z.string(), + friendlyId: z.string(), type: z.enum(Object.values(WaitpointType) as [WaitpointType]), completedAt: z.coerce.date(), idempotencyKey: z.string().optional(), /** For type === "RUN" */ - completedByTaskRunId: z.string().optional(), + completedByTaskRun: z + .object({ + id: z.string(), + friendlyId: z.string(), + }) + .optional(), /** For type === "DATETIME" */ completedAfter: z.coerce.date().optional(), output: z.string().optional(), @@ -65,12 +71,14 @@ export type CompletedWaitpoint = z.infer; const ExecutionSnapshot = z.object({ id: z.string(), + friendlyId: z.string(), executionStatus: z.enum(Object.values(TaskRunExecutionStatus) as [TaskRunExecutionStatus]), description: z.string(), }); const BaseRunMetadata = z.object({ id: z.string(), + friendlyId: z.string(), status: z.enum(Object.values(TaskRunStatus) as [TaskRunStatus]), attemptNumber: z.number().nullish(), }); @@ -98,6 +106,7 @@ export const DequeuedMessage = z.object({ completedWaitpoints: z.array(CompletedWaitpoint), backgroundWorker: z.object({ id: z.string(), + friendlyId: z.string(), version: z.string(), }), deployment: z.object({ @@ -158,6 +167,7 @@ export const RunExecutionData = z.object({ checkpoint: z .object({ id: z.string(), + friendlyId: z.string(), type: z.string(), location: z.string(), imageRef: z.string(), diff --git a/packages/react-hooks/src/hooks/useTaskTrigger.ts b/packages/react-hooks/src/hooks/useTaskTrigger.ts index ce35b37120..a65c274d4b 100644 --- a/packages/react-hooks/src/hooks/useTaskTrigger.ts +++ b/packages/react-hooks/src/hooks/useTaskTrigger.ts @@ -99,7 +99,6 @@ export function useTaskTrigger( mutation.trigger({ payload, options }); }, isLoading: mutation.isMutating, - // @ts-expect-error handle: mutation.data as RunHandleFromTypes>, error: mutation.error, }; diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index 400ae6758a..9bfca56ce4 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -1220,7 +1220,6 @@ async function trigger_internal( } ); - // @ts-expect-error return handle as RunHandleFromTypes; } @@ -1365,7 +1364,6 @@ async function triggerAndWait_internal { } } - subscribeToRunNotifications(runIds: string[]) { - console.log("[WorkerSession] Subscribing to run notifications", { runIds }); + subscribeToRunNotifications(runFriendlyIds: string[]) { + console.log("[WorkerSession] Subscribing to run notifications", { runFriendlyIds }); if (!this.socket) { console.error("[WorkerSession] Socket not connected"); return; } - this.socket.emit("run:subscribe", { version: "1", runIds }); + this.socket.emit("run:subscribe", { version: "1", runFriendlyIds }); } - unsubscribeFromRunNotifications(runIds: string[]) { - console.log("[WorkerSession] Unsubscribing from run notifications", { runIds }); + unsubscribeFromRunNotifications(runFriendlyIds: string[]) { + console.log("[WorkerSession] Unsubscribing from run notifications", { runFriendlyIds }); if (!this.socket) { console.error("[WorkerSession] Socket not connected"); return; } - this.socket.emit("run:unsubscribe", { version: "1", runIds }); + this.socket.emit("run:unsubscribe", { version: "1", runFriendlyIds }); } private createSocket() { diff --git a/packages/worker/src/types.ts b/packages/worker/src/types.ts index f9fa1704aa..928b03c86d 100644 --- a/packages/worker/src/types.ts +++ b/packages/worker/src/types.ts @@ -1,22 +1,26 @@ export interface WorkerServerToClientEvents { - "run:notify": (message: { version: "1"; run: { id: string } }) => void; + "run:notify": (message: { version: "1"; run: { friendlyId: string } }) => void; } export interface WorkerClientToServerEvents { - "run:subscribe": (message: { version: "1"; runIds: string[] }) => void; - "run:unsubscribe": (message: { version: "1"; runIds: string[] }) => void; + "run:subscribe": (message: { version: "1"; runFriendlyIds: string[] }) => void; + "run:unsubscribe": (message: { version: "1"; runFriendlyIds: string[] }) => void; } export interface WorkloadServerToClientEvents { - "run:notify": (message: { version: "1"; run: { id: string } }) => void; + "run:notify": (message: { version: "1"; run: { friendlyId: string } }) => void; } export interface WorkloadClientToServerEvents { - "run:start": (message: { version: "1"; run: { id: string }; snapshot: { id: string } }) => void; + "run:start": (message: { + version: "1"; + run: { friendlyId: string }; + snapshot: { friendlyId: string }; + }) => void; } export type WorkloadClientSocketData = { deploymentId: string; - runId?: string; + runFriendlyId?: string; snapshotId?: string; }; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d7be588b5a..719af38529 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1317,6 +1317,9 @@ importers: packages/core: dependencies: + '@bugsnag/cuid': + specifier: ^3.1.1 + version: 3.1.1 '@electric-sql/client': specifier: 1.0.0-beta.1 version: 1.0.0-beta.1 @@ -4554,6 +4557,10 @@ packages: resolution: {integrity: sha512-QDdVFLoN93Zjg36NoQPZfsVH9tZew7wKDKyV5qRdj8ntT4wQCOradQjRaTdwMhWUYsgKsvCINKKm87FdEk96Ag==} dev: false + /@bugsnag/cuid@3.1.1: + resolution: {integrity: sha512-d2z4b0rEo3chI07FNN1Xds8v25CNeekecU6FC/2Fs9MxY2EipkZTThVcV2YinMn8dvRUlViKOyC50evoUxg8tw==} + dev: false + /@bundled-es-modules/cookie@2.0.0: resolution: {integrity: sha512-Or6YHg/kamKHpxULAdSqhGqnWFneIXu1NKvvfBBzKGwpVsYuFIQ5aBPHDnnoR3ghW1nvSkALd+EF9iMtY7Vjxw==} dependencies: From 677a2d62dbe1f887a73af2ce6b42271bc0e4e551 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:23:24 +0000 Subject: [PATCH 288/485] update lockfile --- pnpm-lock.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 719af38529..0bcdbfb342 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1152,7 +1152,7 @@ importers: specifier: workspace:3.3.7 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.3.5 + specifier: workspace:3.3.7 version: link:../worker c12: specifier: ^1.11.1 @@ -10450,7 +10450,7 @@ packages: react: ^16.8 || ^17.0 || ^18.0 react-dom: ^16.8 || ^17.0 || ^18.0 dependencies: - '@babel/runtime': 7.20.7 + '@babel/runtime': 7.24.5 '@radix-ui/primitive': 1.0.0 '@radix-ui/react-context': 1.0.0(react@18.2.0) '@radix-ui/react-direction': 1.0.0(react@18.2.0) From 62c03bf211aaee3849bc8d95dbb73209de50c77e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 17 Dec 2024 15:26:12 +0000 Subject: [PATCH 289/485] Created a shared determineEngineVersion function --- apps/webapp/app/v3/engineVersion.server.ts | 53 +++++++++++++++++++ .../app/v3/services/triggerTask.server.ts | 45 ++++------------ 2 files changed, 62 insertions(+), 36 deletions(-) create mode 100644 apps/webapp/app/v3/engineVersion.server.ts diff --git a/apps/webapp/app/v3/engineVersion.server.ts b/apps/webapp/app/v3/engineVersion.server.ts new file mode 100644 index 0000000000..768a6635ca --- /dev/null +++ b/apps/webapp/app/v3/engineVersion.server.ts @@ -0,0 +1,53 @@ +import { RunEngineVersion, RuntimeEnvironmentType } from "@trigger.dev/database"; +import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; + +export async function determineEngineVersion({ + environment, + version, +}: { + environment: AuthenticatedEnvironment; + version?: RunEngineVersion; +}): Promise { + if (version) return version; + + // If the project is V1, then none of the background workers are running V2 + if (environment.project.engine === RunEngineVersion.V1) { + return "V1"; + } + + // For now, dev is always V1 + if (environment.type === RuntimeEnvironmentType.DEVELOPMENT) { + return "V1"; + } + + //todo we need to determine the version using the BackgroundWorker + //- triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + //- No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + // const workerWithTasks = workerId + // ? await getWorkerDeploymentFromWorker(prisma, workerId) + // : run.runtimeEnvironment.type === "DEVELOPMENT" + // ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) + // : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); + + //todo Additional checks + /* + - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. + - Add an `engine` column to `Project` in the database. + + Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. + + You run `npx trigger.dev@latest deploy` with config v2. + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + You run `npx trigger.dev@latest dev` with config v2 + - Create BackgroundWorker with `engine`: `v2`. + - Set the `project` `engine` column to `v2`. + + When triggering + - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. + - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW + */ + + return "V2"; +} diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index e5a602cdc0..5c7f1ad753 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -4,6 +4,7 @@ import { WithRunEngine } from "./baseService.server"; import { RunEngineVersion, RuntimeEnvironmentType } from "@trigger.dev/database"; import { TriggerTaskServiceV1 } from "./triggerTaskV1.server"; import { TriggerTaskServiceV2 } from "./triggerTaskV2.server"; +import { determineEngineVersion } from "../engineVersion.server"; export type TriggerTaskServiceOptions = { idempotencyKey?: string; @@ -35,44 +36,16 @@ export class TriggerTaskService extends WithRunEngine { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); - //todo we need to determine the version using the BackgroundWorker - //- triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. - //- No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW - // const workerWithTasks = workerId - // ? await getWorkerDeploymentFromWorker(prisma, workerId) - // : run.runtimeEnvironment.type === "DEVELOPMENT" - // ? await getMostRecentWorker(prisma, run.runtimeEnvironmentId) - // : await getWorkerFromCurrentlyPromotedDeployment(prisma, run.runtimeEnvironmentId); + const version = await determineEngineVersion({ environment }); - if (environment.project.engine === RunEngineVersion.V1) { - return await this.callV1(taskId, environment, body, options); + switch (version) { + case "V1": { + return await this.callV1(taskId, environment, body, options); + } + case "V2": { + return await this.callV2(taskId, environment, body, options); + } } - - if (environment.type === RuntimeEnvironmentType.DEVELOPMENT) { - return await this.callV1(taskId, environment, body, options); - } - - //todo Additional checks - /* - - If the `triggerVersion` is 3.2 or higher AND the project has engine V2, we will use the run engine. - - Add an `engine` column to `Project` in the database. - - Add `engine` to the trigger.config file. It would default to "V1" for now, but you can set it to V2. - - You run `npx trigger.dev@latest deploy` with config v2. - - Create BackgroundWorker with `engine`: `v2`. - - Set the `project` `engine` column to `v2`. - - You run `npx trigger.dev@latest dev` with config v2 - - Create BackgroundWorker with `engine`: `v2`. - - Set the `project` `engine` column to `v2`. - - When triggering - - triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. - - No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW - */ - - return await this.callV2(taskId, environment, body, options); }); } From 0b375393490d936b622d1f9f6951a93f37b005ec Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 15:25:39 +0000 Subject: [PATCH 290/485] disable unfinished commands --- packages/cli-v3/src/cli/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/cli-v3/src/cli/index.ts b/packages/cli-v3/src/cli/index.ts index 8be6a60028..b061a1cf14 100644 --- a/packages/cli-v3/src/cli/index.ts +++ b/packages/cli-v3/src/cli/index.ts @@ -30,7 +30,7 @@ configureLogoutCommand(program); configureListProfilesCommand(program); configureSwitchProfilesCommand(program); configureUpdateCommand(program); -configureWorkersCommand(program); -configureTriggerTaskCommand(program); +// configureWorkersCommand(program); +// configureTriggerTaskCommand(program); installExitHandler(); From da4172889eece81929828f6d25fe374bece0cbf9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 15:33:09 +0000 Subject: [PATCH 291/485] save new cli config to different location, misc fixes --- packages/cli-v3/src/utilities/configFiles.ts | 79 +++++++++++--------- 1 file changed, 45 insertions(+), 34 deletions(-) diff --git a/packages/cli-v3/src/utilities/configFiles.ts b/packages/cli-v3/src/utilities/configFiles.ts index 2a06358671..6e1c9052e7 100644 --- a/packages/cli-v3/src/utilities/configFiles.ts +++ b/packages/cli-v3/src/utilities/configFiles.ts @@ -1,4 +1,4 @@ -import { mkdirSync, writeFileSync } from "node:fs"; +import { existsSync, mkdirSync, writeFileSync } from "node:fs"; import path from "node:path"; import { z } from "zod"; import { xdgAppPaths } from "../imports/xdg-app-paths.js"; @@ -13,6 +13,9 @@ function getGlobalConfigFolderPath() { export const DEFFAULT_PROFILE = "default"; +const CONFIG_FILE = "config.json"; +const OLD_CONFIG_FILE = "default.json"; + const CliConfigProfileSettings = z.object({ accessToken: z.string().optional(), apiUrl: z.string().optional(), @@ -29,44 +32,60 @@ const CliConfigFile = z.object({ }); type CliConfigFile = z.infer; +function getOldAuthConfigFilePath() { + return path.join(getGlobalConfigFolderPath(), OLD_CONFIG_FILE); +} + function getAuthConfigFilePath() { - return path.join(getGlobalConfigFolderPath(), "default.json"); + return path.join(getGlobalConfigFolderPath(), CONFIG_FILE); } function getAuthConfigFileBackupPath() { // Multiple calls won't overwrite old backups - return path.join(getGlobalConfigFolderPath(), `default.json.bak-${Date.now()}`); + return path.join(getGlobalConfigFolderPath(), `${CONFIG_FILE}.bak-${Date.now()}`); +} + +function getBlankConfig(): CliConfigFile { + return { + version: 2, + currentProfile: DEFFAULT_PROFILE, + profiles: {}, + }; +} + +function getConfig() { + return readAuthConfigFile() ?? getBlankConfig(); } export function writeAuthConfigCurrentProfileName(profile: string) { - const existingConfig = readAuthConfigFile(); + const config = getConfig(); - existingConfig.currentProfile = profile; + config.currentProfile = profile; - writeAuthConfigFile(existingConfig); + writeAuthConfigFile(config); } export function readAuthConfigCurrentProfileName(): string { - const existingConfig = readAuthConfigFile(); - return existingConfig.currentProfile; + const config = getConfig(); + return config.currentProfile; } export function writeAuthConfigProfile( settings: CliConfigProfileSettings, profile: string = DEFFAULT_PROFILE ) { - const existingConfig = readAuthConfigFile(); + const config = getConfig(); - existingConfig.profiles[profile] = settings; + config.profiles[profile] = settings; - writeAuthConfigFile(existingConfig); + writeAuthConfigFile(config); } export function readAuthConfigProfile( profile: string = DEFFAULT_PROFILE ): CliConfigProfileSettings | undefined { try { - const config = readAuthConfigFile(); + const config = getConfig(); return config.profiles[profile]; } catch (error) { logger.debug(`Error reading auth config file: ${error}`); @@ -75,20 +94,25 @@ export function readAuthConfigProfile( } export function deleteAuthConfigProfile(profile: string = DEFFAULT_PROFILE) { - const existingConfig = readAuthConfigFile(); + const config = getConfig(); - delete existingConfig.profiles[profile]; + delete config.profiles[profile]; + + if (config.currentProfile === profile) { + config.currentProfile = DEFFAULT_PROFILE; + } - writeAuthConfigFile(existingConfig); + writeAuthConfigFile(config); } -export function readAuthConfigFile(): CliConfigFile { +export function readAuthConfigFile(): CliConfigFile | null { try { - const authConfigFilePath = getAuthConfigFilePath(); + const configFilePath = getAuthConfigFilePath(); + const configFileExists = existsSync(configFilePath); - logger.debug(`Reading auth config file`, { authConfigFilePath }); + logger.debug(`Reading auth config file`, { configFilePath, configFileExists }); - const json = readJSONFileSync(authConfigFilePath); + const json = readJSONFileSync(configFileExists ? configFilePath : getOldAuthConfigFilePath()); if ("currentProfile" in json) { // This is the new format @@ -105,16 +129,13 @@ export function readAuthConfigFile(): CliConfigFile { profiles: oldConfigFormat, } satisfies CliConfigFile; - // Save a backup - backupOldConfigFile(oldConfigFormat); - - // Then overwrite the old config with the new format + // Save to new config file location, the old file will remain untouched writeAuthConfigFile(newConfigFormat); return newConfigFormat; } catch (error) { logger.debug(`Error reading auth config file: ${error}`); - throw new Error(`Error reading auth config file: ${error}`); + return null; } } @@ -127,13 +148,3 @@ export function writeAuthConfigFile(config: CliConfigFile) { encoding: "utf-8", }); } - -export function backupOldConfigFile(config: OldCliConfigFile) { - const authConfigFilePath = getAuthConfigFileBackupPath(); - mkdirSync(path.dirname(authConfigFilePath), { - recursive: true, - }); - writeFileSync(path.join(authConfigFilePath), JSON.stringify(config, undefined, 2), { - encoding: "utf-8", - }); -} From d3a3951f2c0250e2adbb5a1b633e876231026695 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 16:11:22 +0000 Subject: [PATCH 292/485] add basic engine version check via current deploy --- apps/webapp/app/v3/engineVersion.server.ts | 15 ++++++++++++- .../app/v3/models/workerDeployment.server.ts | 21 ++++++++++++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/apps/webapp/app/v3/engineVersion.server.ts b/apps/webapp/app/v3/engineVersion.server.ts index 768a6635ca..26b268fd5c 100644 --- a/apps/webapp/app/v3/engineVersion.server.ts +++ b/apps/webapp/app/v3/engineVersion.server.ts @@ -1,5 +1,6 @@ import { RunEngineVersion, RuntimeEnvironmentType } from "@trigger.dev/database"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { findCurrentWorkerDeploymentWithoutTasks } from "./models/workerDeployment.server"; export async function determineEngineVersion({ environment, @@ -8,7 +9,9 @@ export async function determineEngineVersion({ environment: AuthenticatedEnvironment; version?: RunEngineVersion; }): Promise { - if (version) return version; + if (version) { + return version; + } // If the project is V1, then none of the background workers are running V2 if (environment.project.engine === RunEngineVersion.V1) { @@ -20,6 +23,16 @@ export async function determineEngineVersion({ return "V1"; } + /** + * The project has V2 enabled and this isn't dev + */ + + // Check the current deployment for this environment + const currentDeployment = await findCurrentWorkerDeploymentWithoutTasks(environment.id); + if (currentDeployment?.type === "V1") { + return "V1"; + } + //todo we need to determine the version using the BackgroundWorker //- triggerAndWait we can lookup the BackgroundWorker easily, and get the engine. //- No locked version: lookup the BackgroundWorker via the Deployment/latest dev BW diff --git a/apps/webapp/app/v3/models/workerDeployment.server.ts b/apps/webapp/app/v3/models/workerDeployment.server.ts index 0ea330a1a4..71575a32bf 100644 --- a/apps/webapp/app/v3/models/workerDeployment.server.ts +++ b/apps/webapp/app/v3/models/workerDeployment.server.ts @@ -1,5 +1,5 @@ import type { Prettify } from "@trigger.dev/core"; -import { BackgroundWorker } from "@trigger.dev/database"; +import { BackgroundWorker, WorkerDeployment } from "@trigger.dev/database"; import { CURRENT_DEPLOYMENT_LABEL, CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; import { Prisma, prisma } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; @@ -45,6 +45,25 @@ export async function findCurrentWorkerDeployment( return promotion?.deployment; } +export async function findCurrentWorkerDeploymentWithoutTasks( + environmentId: string, + label = CURRENT_DEPLOYMENT_LABEL +): Promise { + const promotion = await prisma.workerDeploymentPromotion.findUnique({ + where: { + environmentId_label: { + environmentId, + label, + }, + }, + include: { + deployment: true, + }, + }); + + return promotion?.deployment; +} + export async function findCurrentUnmanagedWorkerDeployment( environmentId: string ): Promise { From 476b20f23e44ebd07156713da6b0b5d1e6d0f849 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 17 Dec 2024 17:19:33 +0000 Subject: [PATCH 293/485] new run engine will default to node 22 runtime --- packages/cli-v3/src/commands/deploy.ts | 6 +++--- packages/cli-v3/src/config.ts | 22 ++++++++++++++++++-- packages/core/src/v3/build/resolvedConfig.ts | 3 ++- packages/core/src/v3/config.ts | 4 ++++ 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/packages/cli-v3/src/commands/deploy.ts b/packages/cli-v3/src/commands/deploy.ts index 91caa8f523..42f6952cdf 100644 --- a/packages/cli-v3/src/commands/deploy.ts +++ b/packages/cli-v3/src/commands/deploy.ts @@ -211,10 +211,10 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { const forcedExternals = await resolveAlwaysExternal(projectClient.client); - const isRunEngineV2 = resolvedConfig.compatibilityFlags.includes("run_engine_v2"); + const { features } = resolvedConfig; const buildManifest = await buildWorker({ - target: isRunEngineV2 ? "managed" : "deploy", + target: features.run_engine_v2 ? "managed" : "deploy", environment: options.env, destination: destination.path, resolvedConfig, @@ -246,7 +246,7 @@ async function _deployCommand(dir: string, options: DeployCommandOptions) { selfHosted: options.selfHosted, registryHost: options.registry, namespace: options.namespace, - type: isRunEngineV2 ? "MANAGED" : "V1", + type: features.run_engine_v2 ? "MANAGED" : "V1", }); if (!deploymentResponse.success) { diff --git a/packages/cli-v3/src/config.ts b/packages/cli-v3/src/config.ts index 1108f70235..3a7235cdc3 100644 --- a/packages/cli-v3/src/config.ts +++ b/packages/cli-v3/src/config.ts @@ -1,4 +1,10 @@ -import { ResolveEnvironmentVariablesFunction, TriggerConfig } from "@trigger.dev/core/v3"; +import { + BuildRuntime, + CompatibilityFlag, + CompatibilityFlagFeatures, + ResolveEnvironmentVariablesFunction, + TriggerConfig, +} from "@trigger.dev/core/v3"; import { DEFAULT_RUNTIME, ResolvedConfig } from "@trigger.dev/core/v3/build"; import * as c12 from "c12"; import { defu } from "defu"; @@ -131,6 +137,12 @@ export function configPlugin(resolvedConfig: ResolvedConfig): esbuild.Plugin | u }; } +function featuresFromCompatibilityFlags(flags: CompatibilityFlag[]): CompatibilityFlagFeatures { + return { + run_engine_v2: flags.includes("run_engine_v2"), + }; +} + async function resolveConfig( cwd: string, result: c12.ResolvedConfig, @@ -157,6 +169,10 @@ async function resolveConfig( dirs = dirs.map((dir) => (isAbsolute(dir) ? relative(workingDir, dir) : dir)); + const features = featuresFromCompatibilityFlags(config.compatibilityFlags ?? []); + + const defaultRuntime: BuildRuntime = features.run_engine_v2 ? "node-22" : DEFAULT_RUNTIME; + const mergedConfig = defu( { workingDir: packageJsonPath ? dirname(packageJsonPath) : cwd, @@ -170,7 +186,7 @@ async function resolveConfig( config, { dirs, - runtime: DEFAULT_RUNTIME, + runtime: defaultRuntime, tsconfig: tsconfigPath, build: { jsx: { @@ -183,6 +199,7 @@ async function resolveConfig( conditions: [], }, compatibilityFlags: [], + features, } ) as ResolvedConfig; // TODO: For some reason, without this, there is a weird type error complaining about tsconfigPath being string | nullish, which can't be assigned to string | undefined @@ -190,6 +207,7 @@ async function resolveConfig( ...mergedConfig, dirs: Array.from(new Set(mergedConfig.dirs)), instrumentedPackageNames: getInstrumentedPackageNames(mergedConfig), + runtime: mergedConfig.runtime, }; } diff --git a/packages/core/src/v3/build/resolvedConfig.ts b/packages/core/src/v3/build/resolvedConfig.ts index b2536fdff0..06caa8d256 100644 --- a/packages/core/src/v3/build/resolvedConfig.ts +++ b/packages/core/src/v3/build/resolvedConfig.ts @@ -1,6 +1,6 @@ import { type Defu } from "defu"; import type { Prettify } from "ts-essentials"; -import { CompatibilityFlag, TriggerConfig } from "../config.js"; +import { CompatibilityFlag, CompatibilityFlagFeatures, TriggerConfig } from "../config.js"; import { BuildRuntime } from "../schemas/build.js"; import { ResolveEnvironmentVariablesFunction } from "../types/index.js"; @@ -17,6 +17,7 @@ export type ResolvedConfig = Prettify< jsx: { factory: string; fragment: string; automatic: true }; } & Omit, "jsx">; compatibilityFlags: CompatibilityFlag[]; + features: CompatibilityFlagFeatures; }, ] > & { diff --git a/packages/core/src/v3/config.ts b/packages/core/src/v3/config.ts index 30afdaf91a..a21534df73 100644 --- a/packages/core/src/v3/config.ts +++ b/packages/core/src/v3/config.ts @@ -12,6 +12,10 @@ import type { BuildRuntime, RetryOptions } from "./index.js"; export type CompatibilityFlag = "run_engine_v2"; +export type CompatibilityFlagFeatures = { + [key in CompatibilityFlag]: boolean; +}; + export type TriggerConfig = { /** * @default "node" From e155aada667c20197dd31a5f9e12d20851cdfb5b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 09:29:32 +0000 Subject: [PATCH 294/485] block some actions for projects on previous run engine --- apps/webapp/app/routes/api.v1.deployments.ts | 41 ++++++++++++------- apps/webapp/app/routes/api.v1.workers.ts | 19 +++++++-- .../services/initializeDeployment.server.ts | 6 ++- .../v3/services/rollbackDeployment.server.ts | 2 +- .../cli-v3/src/commands/workers/create.ts | 2 +- packages/cli-v3/src/commands/workers/list.ts | 2 +- 6 files changed, 51 insertions(+), 21 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.deployments.ts b/apps/webapp/app/routes/api.v1.deployments.ts index 2f4b9bdd54..c3dcfb13d0 100644 --- a/apps/webapp/app/routes/api.v1.deployments.ts +++ b/apps/webapp/app/routes/api.v1.deployments.ts @@ -6,6 +6,7 @@ import { import { env } from "~/env.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; +import { ServiceValidationError } from "~/v3/services/baseService.server"; import { InitializeDeploymentService } from "~/v3/services/initializeDeployment.server"; export async function action({ request, params }: ActionFunctionArgs) { @@ -33,18 +34,30 @@ export async function action({ request, params }: ActionFunctionArgs) { const service = new InitializeDeploymentService(); - const { deployment, imageTag } = await service.call(authenticatedEnv, body.data); - - const responseBody: InitializeDeploymentResponseBody = { - id: deployment.friendlyId, - contentHash: deployment.contentHash, - shortCode: deployment.shortCode, - version: deployment.version, - externalBuildData: - deployment.externalBuildData as InitializeDeploymentResponseBody["externalBuildData"], - imageTag, - registryHost: body.data.registryHost ?? env.DEPLOY_REGISTRY_HOST, - }; - - return json(responseBody, { status: 200 }); + try { + const { deployment, imageTag } = await service.call(authenticatedEnv, body.data); + + const responseBody: InitializeDeploymentResponseBody = { + id: deployment.friendlyId, + contentHash: deployment.contentHash, + shortCode: deployment.shortCode, + version: deployment.version, + externalBuildData: + deployment.externalBuildData as InitializeDeploymentResponseBody["externalBuildData"], + imageTag, + registryHost: body.data.registryHost ?? env.DEPLOY_REGISTRY_HOST, + }; + + return json(responseBody, { status: 200 }); + } catch (error) { + if (error instanceof ServiceValidationError) { + return json({ error: error.message }, { status: 400 }); + } else if (error instanceof Error) { + logger.error("Error initializing deployment", { error: error.message }); + return json({ error: `Internal server error: ${error.message}` }, { status: 500 }); + } else { + logger.error("Error initializing deployment", { error: String(error) }); + return json({ error: "Internal server error" }, { status: 500 }); + } + } } diff --git a/apps/webapp/app/routes/api.v1.workers.ts b/apps/webapp/app/routes/api.v1.workers.ts index 8625ae959d..4008d64f1a 100644 --- a/apps/webapp/app/routes/api.v1.workers.ts +++ b/apps/webapp/app/routes/api.v1.workers.ts @@ -15,7 +15,13 @@ export const loader = createLoaderApiRoute( corsStrategy: "all", findResource: async () => 1, // This is a dummy function, we don't need to find a resource }, - async ({ authentication }): Promise> => { + async ({ + authentication, + }): Promise> => { + if (authentication.environment.project.engine !== "V2") { + return json({ error: "Not supported for V1 projects" }, { status: 400 }); + } + const service = new WorkerGroupService(); const workers = await service.listWorkerGroups({ projectId: authentication.environment.projectId, @@ -33,12 +39,19 @@ export const loader = createLoaderApiRoute( } ); -export const action = createActionApiRoute( +export const { action } = createActionApiRoute( { corsStrategy: "all", body: WorkersCreateRequestBody, }, - async ({ authentication, body }): Promise> => { + async ({ + authentication, + body, + }): Promise> => { + if (authentication.environment.project.engine !== "V2") { + return json({ error: "Not supported" }, { status: 400 }); + } + const service = new WorkerGroupService(); const { workerGroup, token } = await service.createWorkerGroup({ projectId: authentication.environment.projectId, diff --git a/apps/webapp/app/v3/services/initializeDeployment.server.ts b/apps/webapp/app/v3/services/initializeDeployment.server.ts index 44678addf8..c5a375ba90 100644 --- a/apps/webapp/app/v3/services/initializeDeployment.server.ts +++ b/apps/webapp/app/v3/services/initializeDeployment.server.ts @@ -4,7 +4,7 @@ import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { generateFriendlyId } from "../friendlyIdentifiers"; import { createRemoteImageBuild } from "../remoteImageBuilder.server"; import { calculateNextBuildVersion } from "../utils/calculateNextBuildVersion"; -import { BaseService } from "./baseService.server"; +import { BaseService, ServiceValidationError } from "./baseService.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; import { env } from "~/env.server"; import { WorkerDeploymentType } from "@trigger.dev/database"; @@ -18,6 +18,10 @@ export class InitializeDeploymentService extends BaseService { payload: InitializeDeploymentRequestBody ) { return this.traceWithEnv("call", environment, async (span) => { + if (payload.type !== "V1" && environment.project.engine !== "V2") { + throw new ServiceValidationError("Only V1 deployments are supported for this project"); + } + const latestDeployment = await this._prisma.workerDeployment.findFirst({ where: { environmentId: environment.id, diff --git a/apps/webapp/app/v3/services/rollbackDeployment.server.ts b/apps/webapp/app/v3/services/rollbackDeployment.server.ts index b5ee41f1b6..ceeca4fed8 100644 --- a/apps/webapp/app/v3/services/rollbackDeployment.server.ts +++ b/apps/webapp/app/v3/services/rollbackDeployment.server.ts @@ -12,7 +12,7 @@ export class RollbackDeploymentService extends BaseService { } if (deployment.type !== WorkerInstanceGroupType.MANAGED) { - logger.error("Can only roll back shared deployments", { + logger.error("Can only roll back managed deployments", { id: deployment.id, type: deployment.type, }); diff --git a/packages/cli-v3/src/commands/workers/create.ts b/packages/cli-v3/src/commands/workers/create.ts index d2e927bf07..9f93c7ad73 100644 --- a/packages/cli-v3/src/commands/workers/create.ts +++ b/packages/cli-v3/src/commands/workers/create.ts @@ -126,7 +126,7 @@ async function _workersCreateCommand(dir: string, options: WorkersCreateCommandO }); if (!newWorker.success) { - throw new Error("Failed to create worker"); + throw new Error(`Failed to create worker: ${newWorker.error}`); } outro( diff --git a/packages/cli-v3/src/commands/workers/list.ts b/packages/cli-v3/src/commands/workers/list.ts index 47bab8c303..ae4467a107 100644 --- a/packages/cli-v3/src/commands/workers/list.ts +++ b/packages/cli-v3/src/commands/workers/list.ts @@ -104,7 +104,7 @@ async function _workersListCommand(dir: string, options: WorkersListCommandOptio const workers = await projectClient.client.workers.list(); if (!workers.success) { - throw new Error("Failed to list workers"); + throw new Error(`Failed to list workers: ${workers.error}`); } logger.table( From 23e8d37f6ca8f4fe228029b326e043e81c53222a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 09:45:31 +0000 Subject: [PATCH 295/485] fix worker group tests --- apps/webapp/test/workerGroup.test.ts | 11 +++++++++-- internal-packages/testcontainers/src/setup.ts | 5 ++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts index 828dfe39a4..1d80bcd873 100644 --- a/apps/webapp/test/workerGroup.test.ts +++ b/apps/webapp/test/workerGroup.test.ts @@ -19,6 +19,7 @@ import { HEADER_NAME } from "@trigger.dev/worker"; import { RunEngine } from "@internal/run-engine"; import { trace } from "@opentelemetry/api"; import { TriggerTaskService } from "~/v3/services/triggerTask.server"; +import { env } from "~/env.server"; describe("worker", () => { const defaultInstanceName = "test_worker"; @@ -69,6 +70,7 @@ describe("worker", () => { headers: { Authorization: `Bearer ${token.plaintext}`, [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, + [HEADER_NAME.WORKER_MANAGED_SECRET]: env.MANAGED_WORKER_SECRET, }, }); @@ -86,6 +88,7 @@ describe("worker", () => { headers: { Authorization: `Bearer ${token.plaintext}`, [HEADER_NAME.WORKER_INSTANCE_NAME]: secondInstanceName, + [HEADER_NAME.WORKER_MANAGED_SECRET]: env.MANAGED_WORKER_SECRET, }, }); const secondAuth = await tokenService.authenticate(secondRequest); @@ -99,7 +102,11 @@ describe("worker", () => { containerTest("dequeue - unmanaged", async ({ prisma, redisContainer }) => { const taskIdentifier = "test-task"; - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + const authenticatedEnvironment = await setupAuthenticatedEnvironment( + prisma, + "PRODUCTION", + "V2" + ); const { deployment } = await setupBackgroundWorker( prisma, @@ -136,7 +143,7 @@ describe("worker", () => { const tokenService = new WorkerGroupTokenService({ prisma, engine }); const authenticatedInstance = await tokenService.authenticate(request); - assert(authenticatedInstance, "authenticationInstance should be defined"); + assert(authenticatedInstance, "authenticatedInstance should be defined"); expect(authenticatedInstance.type).toBe(WorkerInstanceGroupType.UNMANAGED); assert( authenticatedInstance.type === WorkerInstanceGroupType.UNMANAGED, diff --git a/internal-packages/testcontainers/src/setup.ts b/internal-packages/testcontainers/src/setup.ts index cc7dada97d..a45d3df1b2 100644 --- a/internal-packages/testcontainers/src/setup.ts +++ b/internal-packages/testcontainers/src/setup.ts @@ -8,6 +8,7 @@ import { BackgroundWorkerTask, Prisma, PrismaClient, + RunEngineVersion, RuntimeEnvironmentType, } from "@trigger.dev/database"; @@ -17,7 +18,8 @@ export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ export async function setupAuthenticatedEnvironment( prisma: PrismaClient, - type: RuntimeEnvironmentType + type: RuntimeEnvironmentType, + engine?: RunEngineVersion ) { // Your database setup logic here const org = await prisma.organization.create({ @@ -33,6 +35,7 @@ export async function setupAuthenticatedEnvironment( slug: "test-project", externalRef: "proj_1234", organizationId: org.id, + engine, }, }); From fb5b2e76d0e9069311cc8e6ffaac3028139ef025 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:12:31 +0000 Subject: [PATCH 296/485] fix triggerAndWait test --- .../run-engine/src/engine/tests/triggerAndWait.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 5192084b13..8c8bab2bd0 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -185,7 +185,7 @@ describe("RunEngine triggerAndWait", () => { expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); - expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRun?.id).toBe( childRun.id ); expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); @@ -426,7 +426,7 @@ describe("RunEngine triggerAndWait", () => { expect(parentExecutionDataAfter.snapshot.executionStatus).toBe("EXECUTING"); expect(parentExecutionDataAfter.completedWaitpoints?.length).toBe(1); expect(parentExecutionDataAfter.completedWaitpoints![0].id).toBe(runWaitpoint.waitpointId); - expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRun?.id).toBe( childRun.id ); expect(parentExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); @@ -441,7 +441,7 @@ describe("RunEngine triggerAndWait", () => { expect(parent2ExecutionDataAfter.completedWaitpoints![0].id).toBe( childRunWithWaitpoint.associatedWaitpoint!.id ); - expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRunId).toBe( + expect(parentExecutionDataAfter.completedWaitpoints![0].completedByTaskRun?.id).toBe( childRun.id ); expect(parent2ExecutionDataAfter.completedWaitpoints![0].output).toBe('{"foo":"bar"}'); From f94154a0122de90d159d5ea7a539a2a8b8c146b1 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:31:34 +0000 Subject: [PATCH 297/485] one typescript version to rule them all --- apps/coordinator/package.json | 3 +- apps/docker-provider/package.json | 3 +- apps/kubernetes-provider/package.json | 3 +- apps/proxy/package.json | 1 - apps/webapp/package.json | 1 - internal-packages/database/package.json | 3 +- internal-packages/emails/package.json | 3 +- internal-packages/otlp-importer/package.json | 3 +- internal-packages/redis-worker/package.json | 1 - internal-packages/run-engine/package.json | 1 - internal-packages/testcontainers/package.json | 3 +- internal-packages/zod-worker/package.json | 1 - package.json | 2 +- packages/build/package.json | 1 - packages/cli-v3/package.json | 1 - packages/core/package.json | 1 - packages/react-hooks/package.json | 5 +- packages/rsc/package.json | 3 +- packages/trigger-sdk/package.json | 1 - packages/worker/package.json | 3 +- pnpm-lock.yaml | 344 +++++------------- references/bun-catalog/package.json | 3 +- references/hello-world/package.json | 3 +- references/init-shell/package.json | 3 +- references/nextjs-realtime/package.json | 3 +- references/v3-catalog/package.json | 3 +- 26 files changed, 109 insertions(+), 293 deletions(-) diff --git a/apps/coordinator/package.json b/apps/coordinator/package.json index c860adb1c8..20e0e267e9 100644 --- a/apps/coordinator/package.json +++ b/apps/coordinator/package.json @@ -26,7 +26,6 @@ "@types/node": "^18", "dotenv": "^16.4.2", "esbuild": "^0.19.11", - "tsx": "^4.7.0", - "typescript": "^5.3.3" + "tsx": "^4.7.0" } } \ No newline at end of file diff --git a/apps/docker-provider/package.json b/apps/docker-provider/package.json index 56d8f89b7e..e68115451b 100644 --- a/apps/docker-provider/package.json +++ b/apps/docker-provider/package.json @@ -23,7 +23,6 @@ "@types/node": "^18.19.8", "dotenv": "^16.4.2", "esbuild": "^0.19.11", - "tsx": "^4.7.0", - "typescript": "^5.3.3" + "tsx": "^4.7.0" } } \ No newline at end of file diff --git a/apps/kubernetes-provider/package.json b/apps/kubernetes-provider/package.json index 3b62f65449..6cb26e2c70 100644 --- a/apps/kubernetes-provider/package.json +++ b/apps/kubernetes-provider/package.json @@ -23,7 +23,6 @@ "devDependencies": { "dotenv": "^16.4.2", "esbuild": "^0.19.11", - "tsx": "^4.7.0", - "typescript": "^5.3.3" + "tsx": "^4.7.0" } } \ No newline at end of file diff --git a/apps/proxy/package.json b/apps/proxy/package.json index d72311dcf8..80646e60a0 100644 --- a/apps/proxy/package.json +++ b/apps/proxy/package.json @@ -9,7 +9,6 @@ }, "devDependencies": { "@cloudflare/workers-types": "^4.20240512.0", - "typescript": "^5.0.4", "wrangler": "^3.57.1" }, "dependencies": { diff --git a/apps/webapp/package.json b/apps/webapp/package.json index 3488e24a93..e9e58cf692 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -250,7 +250,6 @@ "tailwindcss": "3.4.1", "ts-node": "^10.7.0", "tsconfig-paths": "^3.14.1", - "typescript": "^5.1.6", "vite-tsconfig-paths": "^4.0.5", "vitest": "^1.4.0" }, diff --git a/internal-packages/database/package.json b/internal-packages/database/package.json index 41afe0e5ef..427cdf1829 100644 --- a/internal-packages/database/package.json +++ b/internal-packages/database/package.json @@ -5,8 +5,7 @@ "main": "./src/index.ts", "types": "./src/index.ts", "dependencies": { - "@prisma/client": "5.4.1", - "typescript": "^4.8.4" + "@prisma/client": "5.4.1" }, "devDependencies": { "prisma": "5.4.1" diff --git a/internal-packages/emails/package.json b/internal-packages/emails/package.json index b22fb45e4f..5e0bc61925 100644 --- a/internal-packages/emails/package.json +++ b/internal-packages/emails/package.json @@ -19,8 +19,7 @@ }, "devDependencies": { "@types/node": "^18", - "@types/react": "18.2.69", - "typescript": "^4.9.4" + "@types/react": "18.2.69" }, "engines": { "node": ">=18.0.0" diff --git a/internal-packages/otlp-importer/package.json b/internal-packages/otlp-importer/package.json index 49d2fdbae5..72e46c2f9d 100644 --- a/internal-packages/otlp-importer/package.json +++ b/internal-packages/otlp-importer/package.json @@ -29,8 +29,7 @@ "devDependencies": { "@types/node": "^20", "rimraf": "^3.0.2", - "ts-proto": "^1.167.3", - "typescript": "^5.5.0" + "ts-proto": "^1.167.3" }, "engines": { "node": ">=18.0.0" diff --git a/internal-packages/redis-worker/package.json b/internal-packages/redis-worker/package.json index 33e7bbea42..bf44ab71cb 100644 --- a/internal-packages/redis-worker/package.json +++ b/internal-packages/redis-worker/package.json @@ -11,7 +11,6 @@ "ioredis": "^5.3.2", "lodash.omit": "^4.5.0", "nanoid": "^5.0.7", - "typescript": "^5.5.4", "zod": "3.23.8" }, "devDependencies": { diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index 3b1d373c9b..b0a2dc9eb6 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -14,7 +14,6 @@ "ioredis": "^5.3.2", "nanoid": "^3.3.4", "redlock": "5.0.0-beta.2", - "typescript": "^5.5.4", "zod": "3.23.8" }, "devDependencies": { diff --git a/internal-packages/testcontainers/package.json b/internal-packages/testcontainers/package.json index 1b629e7911..5fa73d40cd 100644 --- a/internal-packages/testcontainers/package.json +++ b/internal-packages/testcontainers/package.json @@ -7,8 +7,7 @@ "dependencies": { "@opentelemetry/api": "^1.9.0", "@trigger.dev/database": "workspace:*", - "ioredis": "^5.3.2", - "typescript": "^4.8.4" + "ioredis": "^5.3.2" }, "devDependencies": { "@testcontainers/postgresql": "^10.13.1", diff --git a/internal-packages/zod-worker/package.json b/internal-packages/zod-worker/package.json index 672d668ddc..712a110a9c 100644 --- a/internal-packages/zod-worker/package.json +++ b/internal-packages/zod-worker/package.json @@ -10,7 +10,6 @@ "@trigger.dev/database": "workspace:*", "graphile-worker": "0.16.6", "lodash.omit": "^4.5.0", - "typescript": "^5.5.4", "zod": "3.23.8" }, "devDependencies": { diff --git a/package.json b/package.json index 50ee8dc9e1..d05b84fdc5 100644 --- a/package.json +++ b/package.json @@ -57,7 +57,7 @@ "prettier": "^3.0.0", "tsx": "^3.7.1", "turbo": "^1.10.3", - "typescript": "^5.5.4", + "typescript": "5.5.4", "vite": "^4.1.1", "vite-tsconfig-paths": "^4.0.5", "vitest": "^0.28.4" diff --git a/packages/build/package.json b/packages/build/package.json index 6a3f851a8a..03afa06189 100644 --- a/packages/build/package.json +++ b/packages/build/package.json @@ -74,7 +74,6 @@ "@types/node": "20.14.14", "rimraf": "6.0.1", "tshy": "^3.0.2", - "typescript": "^5.5.4", "tsx": "4.17.0", "esbuild": "^0.23.0", "@arethetypeswrong/cli": "^0.15.4" diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 369f441db8..20b4201edf 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -62,7 +62,6 @@ "ts-essentials": "10.0.1", "tshy": "^3.0.2", "tsx": "4.17.0", - "typescript": "^5.5.4", "vitest": "^2.0.5" }, "scripts": { diff --git a/packages/core/package.json b/packages/core/package.json index f63f787d4d..ba92d8a0db 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -225,7 +225,6 @@ "ts-essentials": "10.0.1", "tshy": "^3.0.2", "tsx": "4.17.0", - "typescript": "^5.5.4", "vitest": "^1.6.0" }, "engines": { diff --git a/packages/react-hooks/package.json b/packages/react-hooks/package.json index 94a8c3bffd..a68d6a75a6 100644 --- a/packages/react-hooks/package.json +++ b/packages/react-hooks/package.json @@ -47,8 +47,7 @@ "@types/react-dom": "*", "rimraf": "^3.0.2", "tshy": "^3.0.2", - "tsx": "4.17.0", - "typescript": "^5.5.4" + "tsx": "4.17.0" }, "peerDependencies": { "react": "^18.0 || ^19.0 || ^19.0.0-rc", @@ -74,4 +73,4 @@ "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", "module": "./dist/esm/index.js" -} \ No newline at end of file +} diff --git a/packages/rsc/package.json b/packages/rsc/package.json index 15f5910bf5..bf69bb23dc 100644 --- a/packages/rsc/package.json +++ b/packages/rsc/package.json @@ -50,8 +50,7 @@ "@types/react-dom": "*", "rimraf": "^3.0.2", "tshy": "^3.0.2", - "tsx": "4.17.0", - "typescript": "^5.5.4" + "tsx": "4.17.0" }, "engines": { "node": ">=18.20.0" diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index 832d724221..fa8c63eace 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -72,7 +72,6 @@ "tshy": "^3.0.2", "tsx": "4.17.0", "typed-emitter": "^2.1.0", - "typescript": "^5.5.4", "zod": "3.23.8" }, "peerDependencies": { diff --git a/packages/worker/package.json b/packages/worker/package.json index ed5eb5e6fb..34346fea52 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -47,8 +47,7 @@ "@types/node": "20.14.14", "rimraf": "6.0.1", "tshy": "^3.0.2", - "tsx": "4.17.0", - "typescript": "^5.5.4" + "tsx": "4.17.0" }, "engines": { "node": ">=18.20.0" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0bcdbfb342..347a9b1620 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -60,7 +60,7 @@ importers: specifier: ^1.10.3 version: 1.10.3 typescript: - specifier: ^5.5.4 + specifier: 5.5.4 version: 5.5.4 vite: specifier: ^4.1.1 @@ -102,9 +102,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/docker-provider: dependencies: @@ -127,9 +124,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/kubernetes-provider: dependencies: @@ -152,9 +146,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/proxy: dependencies: @@ -177,9 +168,6 @@ importers: '@cloudflare/workers-types': specifier: ^4.20240512.0 version: 4.20240512.0 - typescript: - specifier: ^5.0.4 - version: 5.2.2 wrangler: specifier: ^3.57.1 version: 3.57.1(@cloudflare/workers-types@4.20240512.0) @@ -338,22 +326,22 @@ importers: version: 3.7.1(react@18.2.0) '@remix-run/express': specifier: 2.1.0 - version: 2.1.0(express@4.18.2)(typescript@5.2.2) + version: 2.1.0(express@4.18.2)(typescript@5.5.4) '@remix-run/node': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/react': specifier: 2.1.0 - version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': specifier: ^1.15.3 version: 1.15.3 '@remix-run/serve': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/server-runtime': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/v1-meta': specifier: ^0.1.3 version: 0.1.3(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) @@ -428,7 +416,7 @@ importers: version: 1.0.18 class-variance-authority: specifier: ^0.5.2 - version: 0.5.2(typescript@5.2.2) + version: 0.5.2(typescript@5.5.4) clsx: specifier: ^1.2.1 version: 1.2.1 @@ -467,7 +455,7 @@ importers: version: 10.12.11(react-dom@18.2.0)(react@18.2.0) graphile-worker: specifier: 0.16.6 - version: 0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.2.2) + version: 0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.5.4) highlight.run: specifier: ^7.3.4 version: 7.3.4 @@ -660,13 +648,13 @@ importers: version: link:../../internal-packages/testcontainers '@remix-run/dev': specifier: 2.1.0 - version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.2.2) + version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.5.4) '@remix-run/eslint-config': specifier: 2.1.0 - version: 2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/testing': specifier: ^2.1.0 - version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@swc/core': specifier: ^1.3.4 version: 1.3.26 @@ -759,10 +747,10 @@ importers: version: 8.5.4 '@typescript-eslint/eslint-plugin': specifier: ^5.59.6 - version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) + version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) '@typescript-eslint/parser': specifier: ^5.59.6 - version: 5.59.6(eslint@8.31.0)(typescript@5.2.2) + version: 5.59.6(eslint@8.31.0)(typescript@5.5.4) autoprefixer: specifier: ^10.4.13 version: 10.4.13(postcss@8.4.44) @@ -807,7 +795,7 @@ importers: version: 16.0.1(postcss@8.4.44) postcss-loader: specifier: ^8.1.1 - version: 8.1.1(postcss@8.4.44)(typescript@5.2.2)(webpack@5.88.2) + version: 8.1.1(postcss@8.4.44)(typescript@5.5.4)(webpack@5.88.2) prettier: specifier: ^2.8.8 version: 2.8.8 @@ -834,16 +822,13 @@ importers: version: 3.4.1(ts-node@10.9.1) ts-node: specifier: ^10.7.0 - version: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) + version: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.5.4) tsconfig-paths: specifier: ^3.14.1 version: 3.14.1 - typescript: - specifier: ^5.1.6 - version: 5.2.2 vite-tsconfig-paths: specifier: ^4.0.5 - version: 4.0.5(typescript@5.2.2) + version: 4.0.5(typescript@5.5.4) vitest: specifier: ^1.4.0 version: 1.4.0(@types/node@20.14.14) @@ -855,9 +840,6 @@ importers: '@prisma/client': specifier: 5.4.1 version: 5.4.1(prisma@5.4.1) - typescript: - specifier: ^4.8.4 - version: 4.9.5 devDependencies: prisma: specifier: 5.4.1 @@ -893,9 +875,6 @@ importers: '@types/react': specifier: 18.2.69 version: 18.2.69 - typescript: - specifier: ^4.9.4 - version: 4.9.5 internal-packages/otlp-importer: dependencies: @@ -915,9 +894,6 @@ importers: ts-proto: specifier: ^1.167.3 version: 1.167.3 - typescript: - specifier: ^5.5.0 - version: 5.5.4 internal-packages/redis-worker: dependencies: @@ -936,9 +912,6 @@ importers: nanoid: specifier: ^5.0.7 version: 5.0.7 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -982,9 +955,6 @@ importers: redlock: specifier: 5.0.0-beta.2 version: 5.0.0-beta.2 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -1007,9 +977,6 @@ importers: ioredis: specifier: ^5.3.2 version: 5.3.2 - typescript: - specifier: ^4.8.4 - version: 4.9.5 devDependencies: '@testcontainers/postgresql': specifier: ^10.13.1 @@ -1047,9 +1014,6 @@ importers: lodash.omit: specifier: ^4.5.0 version: 4.5.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -1097,9 +1061,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/cli-v3: dependencies: @@ -1308,9 +1269,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 vitest: specifier: ^2.0.5 version: 2.0.5(@types/node@20.14.14) @@ -1441,9 +1399,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 vitest: specifier: ^1.6.0 version: 1.6.0(@types/node@20.14.14) @@ -1484,9 +1439,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/rsc: dependencies: @@ -1527,9 +1479,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/trigger-sdk: dependencies: @@ -1609,9 +1558,6 @@ importers: typed-emitter: specifier: ^2.1.0 version: 2.1.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -1646,9 +1592,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/bun-catalog: dependencies: @@ -1662,9 +1605,6 @@ importers: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/hello-world: dependencies: @@ -1675,18 +1615,12 @@ importers: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/init-shell: devDependencies: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/init-shell-js: devDependencies: @@ -1790,9 +1724,6 @@ importers: trigger.dev: specifier: workspace:^3 version: link:../../packages/cli-v3 - typescript: - specifier: ^5 - version: 5.5.4 references/v3-catalog: dependencies: @@ -1989,9 +1920,6 @@ importers: tsconfig-paths: specifier: ^4.2.0 version: 4.2.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages: @@ -14009,7 +13937,7 @@ packages: - encoding dev: false - /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.2.2): + /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@20.14.14)(ts-node@10.9.1)(typescript@5.5.4): resolution: {integrity: sha512-Hn5lw46F+a48dp5uHKe68ckaHgdStW4+PmLod+LMFEqrMbkF0j4XD1ousebxlv989o0Uy/OLgfRMgMy4cBOvHg==} engines: {node: '>=18.0.0'} hasBin: true @@ -14031,8 +13959,8 @@ packages: '@babel/traverse': 7.22.17 '@mdx-js/mdx': 2.3.0 '@npmcli/package-json': 4.0.1 - '@remix-run/serve': 2.1.0(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/serve': 2.1.0(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) '@types/mdx': 2.0.5 '@vanilla-extract/integration': 6.2.1(@types/node@20.14.14) arg: 5.0.2 @@ -14070,7 +13998,7 @@ packages: semver: 7.6.3 tar-fs: 2.1.1 tsconfig-paths: 4.2.0 - typescript: 5.2.2 + typescript: 5.5.4 ws: 7.5.9 transitivePeerDependencies: - '@types/node' @@ -14088,7 +14016,7 @@ packages: - utf-8-validate dev: true - /@remix-run/eslint-config@2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/eslint-config@2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-yfeUnHpUG+XveujMi6QODKMGhs5CvKWCKzASU397BPXiPWbMv6r2acfODSWK64ZdBMu9hcLbOb42GBFydVQeHA==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14103,28 +14031,28 @@ packages: '@babel/eslint-parser': 7.21.8(@babel/core@7.22.17)(eslint@8.31.0) '@babel/preset-react': 7.18.6(@babel/core@7.22.17) '@rushstack/eslint-patch': 1.2.0 - '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 eslint-import-resolver-node: 0.3.7 eslint-import-resolver-typescript: 3.5.5(@typescript-eslint/parser@5.59.6)(eslint-import-resolver-node@0.3.7)(eslint-plugin-import@2.29.1)(eslint@8.31.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@5.59.6)(eslint-import-resolver-typescript@3.5.5)(eslint@8.31.0) - eslint-plugin-jest: 26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.2.2) + eslint-plugin-jest: 26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.5.4) eslint-plugin-jest-dom: 4.0.3(eslint@8.31.0) eslint-plugin-jsx-a11y: 6.7.1(eslint@8.31.0) eslint-plugin-node: 11.1.0(eslint@8.31.0) eslint-plugin-react: 7.32.2(eslint@8.31.0) eslint-plugin-react-hooks: 4.6.2(eslint@8.31.0) - eslint-plugin-testing-library: 5.11.0(eslint@8.31.0)(typescript@5.2.2) + eslint-plugin-testing-library: 5.11.0(eslint@8.31.0)(typescript@5.5.4) react: 18.2.0 - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - eslint-import-resolver-webpack - jest - supports-color dev: true - /@remix-run/express@2.1.0(express@4.18.2)(typescript@5.2.2): + /@remix-run/express@2.1.0(express@4.18.2)(typescript@5.5.4): resolution: {integrity: sha512-R5myPowQx6LYWY3+EqP42q19MOCT3+ZGwb2f0UKNs9a34R8U3nFpGWL7saXryC+To+EasujEScc8rTQw5Pftog==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14134,11 +14062,11 @@ packages: typescript: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) express: 4.18.2 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/node@2.1.0(typescript@5.2.2): + /@remix-run/node@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-TeSgjXnZUUlmw5FVpBVnXY7MLpracjdnwFNwoJE5NQkiUEFnGD/Yhvk4F2fOCkszqc2Z25KRclc5noweyiFu6Q==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14147,7 +14075,7 @@ packages: typescript: optional: true dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) '@remix-run/web-fetch': 4.4.1 '@remix-run/web-file': 3.1.0 '@remix-run/web-stream': 1.1.0 @@ -14155,9 +14083,9 @@ packages: cookie-signature: 1.2.0 source-map-support: 0.5.21 stream-slice: 0.1.2 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/react@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/react@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-DeYgfsvNxHqNn29sGA3XsZCciMKo2EFTQ9hHkuVPTsJXC4ipHr6Dja1j6UzZYPe/ZuKppiuTjueWCQlE2jOe1w==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14169,11 +14097,11 @@ packages: optional: true dependencies: '@remix-run/router': 1.10.0 - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) react-router-dom: 6.17.0(react-dom@18.2.0)(react@18.2.0) - typescript: 5.2.2 + typescript: 5.5.4 /@remix-run/router@1.10.0: resolution: {integrity: sha512-Lm+fYpMfZoEucJ7cMxgt4dYt8jLfbpwRCzAjm9UgSLOkmlqo9gupxt6YX3DY0Fk155NT9l17d/ydi+964uS9Lw==} @@ -14184,13 +14112,13 @@ packages: engines: {node: '>=14.0.0'} dev: false - /@remix-run/serve@2.1.0(typescript@5.2.2): + /@remix-run/serve@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-XHI+vPYz217qrg1QcV38TTPlEBTzMJzAt0SImPutyF0S2IBrZGZIFMEsspI0i0wNvdcdQz1IqmSx+mTghzW8eQ==} engines: {node: '>=18.0.0'} hasBin: true dependencies: - '@remix-run/express': 2.1.0(express@4.18.2)(typescript@5.2.2) - '@remix-run/node': 2.1.0(typescript@5.2.2) + '@remix-run/express': 2.1.0(express@4.18.2)(typescript@5.5.4) + '@remix-run/node': 2.1.0(typescript@5.5.4) chokidar: 3.5.3 compression: 1.7.4 express: 4.18.2 @@ -14201,7 +14129,7 @@ packages: - supports-color - typescript - /@remix-run/server-runtime@2.1.0(typescript@5.2.2): + /@remix-run/server-runtime@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-Uz69yF4Gu6F3VYQub3JgDo9godN8eDMeZclkadBTAWN7bYLonu0ChR/GlFxS35OLeF7BDgudxOSZob0nE1WHNg==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14216,9 +14144,9 @@ packages: cookie: 0.4.2 set-cookie-parser: 2.6.0 source-map: 0.7.4 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/testing@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/testing@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-eLPx4Bmjt243kyRpQTong1eFo6nkvSfCr65bb5PfoF172DKnsSSCYWAmBmB72VwtAPESHxBm3g6AUbhwphkU6A==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14228,12 +14156,12 @@ packages: typescript: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': 1.10.0 react: 18.2.0 react-router-dom: 6.17.0(react-dom@18.2.0)(react@18.2.0) - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - react-dom dev: true @@ -14244,8 +14172,8 @@ packages: '@remix-run/react': ^1.15.0 || ^2.0.0 '@remix-run/server-runtime': ^1.15.0 || ^2.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) dev: false /@remix-run/web-blob@3.1.0: @@ -16146,7 +16074,7 @@ packages: - '@types/json-schema' dev: false - /@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-sXtOgJNEuRU5RLwPUb1jxtToZbgvq3M6FPpY4QENxoOggK+UpTxUBpj6tD8+Qh2g46Pi9We87E+eHnUw8YcGsw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -16158,23 +16086,23 @@ packages: optional: true dependencies: '@eslint-community/regexpp': 4.5.1 - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) '@typescript-eslint/scope-manager': 5.59.6 - '@typescript-eslint/type-utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/type-utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 4.3.4 eslint: 8.31.0 grapheme-splitter: 1.0.4 ignore: 5.2.4 natural-compare-lite: 1.4.0 semver: 7.6.3 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-7pCa6al03Pv1yf/dUg/s1pXz/yGMUBAw5EeWqNTFiSueKvRNonze3hma3lhdsOrQcaOXhbk5gKu2Fludiho9VA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -16186,10 +16114,10 @@ packages: dependencies: '@typescript-eslint/scope-manager': 5.59.6 '@typescript-eslint/types': 5.59.6 - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) debug: 4.3.4 eslint: 8.31.0 - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true @@ -16202,7 +16130,7 @@ packages: '@typescript-eslint/visitor-keys': 5.59.6 dev: true - /@typescript-eslint/type-utils@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/type-utils@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-A4tms2Mp5yNvLDlySF+kAThV9VTBPCvGf0Rp8nl/eoDX9Okun8byTKoj3fJ52IJitjWOk0fKPNQhXEB++eNozQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -16212,12 +16140,12 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 4.3.7 eslint: 8.31.0 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true @@ -16227,7 +16155,7 @@ packages: engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /@typescript-eslint/typescript-estree@5.59.6(typescript@5.2.2): + /@typescript-eslint/typescript-estree@5.59.6(typescript@5.5.4): resolution: {integrity: sha512-vW6JP3lMAs/Tq4KjdI/RiHaaJSO7IUsbkz17it/Rl9Q+WkQ77EOuOnlbaU8kKfVIOJxMhnRiBG+olE7f3M16DA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -16242,13 +16170,13 @@ packages: globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.3 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/utils@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/utils@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-vzaaD6EXbTS29cVH0JjXBdzMt6VBlv+hE31XktDRMX1j3462wZCJa7VzO2AxXEXcIl8GQqZPcOPuW/Z1tZVogg==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -16259,7 +16187,7 @@ packages: '@types/semver': 7.5.1 '@typescript-eslint/scope-manager': 5.59.6 '@typescript-eslint/types': 5.59.6 - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) eslint: 8.31.0 eslint-scope: 5.1.1 semver: 7.6.3 @@ -18311,7 +18239,7 @@ packages: /cjs-module-lexer@1.2.3: resolution: {integrity: sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==} - /class-variance-authority@0.5.2(typescript@5.2.2): + /class-variance-authority@0.5.2(typescript@5.5.4): resolution: {integrity: sha512-j7Qqw3NPbs4IpO80gvdACWmVvHiLLo5MECacUBLnJG17CrLpWaQ7/4OaWX6P0IO1j2nvZ7AuSfBS/ImtEUZJGA==} peerDependencies: typescript: '>= 4.5.5 < 6' @@ -18319,7 +18247,7 @@ packages: typescript: optional: true dependencies: - typescript: 5.2.2 + typescript: 5.5.4 dev: false /class-variance-authority@0.7.0: @@ -18722,22 +18650,6 @@ packages: yaml: 1.10.2 dev: true - /cosmiconfig@8.3.6(typescript@5.2.2): - resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - path-type: 4.0.0 - typescript: 5.2.2 - dev: false - /cosmiconfig@8.3.6(typescript@5.5.4): resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} engines: {node: '>=14'} @@ -18754,22 +18666,6 @@ packages: typescript: 5.5.4 dev: false - /cosmiconfig@9.0.0(typescript@5.2.2): - resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - env-paths: 2.2.1 - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - typescript: 5.2.2 - dev: true - /cosmiconfig@9.0.0(typescript@5.5.4): resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} engines: {node: '>=14'} @@ -18784,7 +18680,6 @@ packages: js-yaml: 4.1.0 parse-json: 5.2.0 typescript: 5.5.4 - dev: false /cp-file@10.0.0: resolution: {integrity: sha512-vy2Vi1r2epK5WqxOLnskeKeZkdZvTKfFZQCplE3XWsP+SUJyd5XAUFC9lFgTjjXJF2GMne/UML14iEmkAaDfFg==} @@ -20437,7 +20332,7 @@ packages: eslint-import-resolver-webpack: optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 3.2.7 eslint: 8.31.0 eslint-import-resolver-node: 0.3.7 @@ -20467,7 +20362,7 @@ packages: eslint-import-resolver-webpack: optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 3.2.7 eslint: 8.31.0 eslint-import-resolver-node: 0.3.9 @@ -20497,7 +20392,7 @@ packages: '@typescript-eslint/parser': optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 array.prototype.flat: 1.3.2 @@ -20534,7 +20429,7 @@ packages: requireindex: 1.2.0 dev: true - /eslint-plugin-jest@26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.2.2): + /eslint-plugin-jest@26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-TWJxWGp1J628gxh2KhaH1H1paEdgE2J61BBF1I59c6xWeL5+D1BzMxGDN/nXAfX+aSkR5u80K+XhskK6Gwq9ng==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -20547,8 +20442,8 @@ packages: jest: optional: true dependencies: - '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 transitivePeerDependencies: - supports-color @@ -20628,13 +20523,13 @@ packages: string.prototype.matchall: 4.0.8 dev: true - /eslint-plugin-testing-library@5.11.0(eslint@8.31.0)(typescript@5.2.2): + /eslint-plugin-testing-library@5.11.0(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-ELY7Gefo+61OfXKlQeXNIDVVLPcvKTeiQOoMZG9TeuWa7Ln4dUNRv8JdRWBQI9Mbb427XGlVB1aa1QPZxBJM8Q==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0, npm: '>=6'} peerDependencies: eslint: ^7.5.0 || ^8.0.0 dependencies: - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 transitivePeerDependencies: - supports-color @@ -21915,27 +21810,6 @@ packages: - supports-color dev: false - /graphile-worker@0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.2.2): - resolution: {integrity: sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==} - engines: {node: '>=14.0.0'} - hasBin: true - dependencies: - '@graphile/logger': 0.2.0 - '@types/debug': 4.1.12 - '@types/pg': 8.11.6 - cosmiconfig: 8.3.6(typescript@5.2.2) - graphile-config: 0.0.1-beta.8 - json5: 2.2.3 - pg: 8.11.5 - tslib: 2.6.2 - yargs: 17.7.2 - transitivePeerDependencies: - - pg-native - - supports-color - - typescript - dev: false - patched: true - /graphile-worker@0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.5.4): resolution: {integrity: sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==} engines: {node: '>=14.0.0'} @@ -25799,7 +25673,7 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.29 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.5.4) yaml: 2.3.1 dev: true @@ -25817,10 +25691,10 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.44 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.5.4) yaml: 2.3.1 - /postcss-loader@8.1.1(postcss@8.4.44)(typescript@5.2.2)(webpack@5.88.2): + /postcss-loader@8.1.1(postcss@8.4.44)(typescript@5.5.4)(webpack@5.88.2): resolution: {integrity: sha512-0IeqyAsG6tYiDRCYKQJLAmgQr47DX6N7sFSWvQxt6AcupX8DIdmykuk/o/tx0Lze3ErGHJEp5OSRxrelC6+NdQ==} engines: {node: '>= 18.12.0'} peerDependencies: @@ -25833,7 +25707,7 @@ packages: webpack: optional: true dependencies: - cosmiconfig: 9.0.0(typescript@5.2.2) + cosmiconfig: 9.0.0(typescript@5.5.4) jiti: 1.21.0 postcss: 8.4.44 semver: 7.6.3 @@ -27459,7 +27333,7 @@ packages: '@remix-run/server-runtime': ^1.1.1 remix-auth: ^3.2.1 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) crypto-js: 4.1.1 remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) dev: false @@ -27470,7 +27344,7 @@ packages: '@remix-run/server-runtime': ^1.0.0 remix-auth: ^3.4.0 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) remix-auth-oauth2: 1.11.0(@remix-run/server-runtime@2.1.0)(remix-auth@3.6.0) transitivePeerDependencies: @@ -27483,7 +27357,7 @@ packages: '@remix-run/server-runtime': ^1.0.0 || ^2.0.0 remix-auth: ^3.6.0 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) debug: 4.3.7 remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) transitivePeerDependencies: @@ -27496,8 +27370,8 @@ packages: '@remix-run/react': ^1.0.0 || ^2.0.0 '@remix-run/server-runtime': ^1.0.0 || ^2.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) uuid: 8.3.2 dev: false @@ -27508,8 +27382,8 @@ packages: '@remix-run/server-runtime': ^1.16.0 || ^2.0 react: ^17.0.2 || ^18.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) react: 18.2.0 dev: false @@ -27549,8 +27423,8 @@ packages: zod: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': 1.15.3 intl-parse-accept-language: 1.0.0 react: 18.2.0 @@ -29549,7 +29423,7 @@ packages: /ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} - /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.2.2): + /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@20.14.14)(typescript@5.5.4): resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} hasBin: true peerDependencies: @@ -29576,7 +29450,7 @@ packages: create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.2.2 + typescript: 5.5.4 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 @@ -29641,19 +29515,6 @@ packages: resolution: {integrity: sha512-3IDBalvf6SyvHFS14UiwCWzqdSdo+Q0k2J7DZyJYaHW/iraW9DJpaBKDJpry3yQs3o/t/A+oGaRW3iVt2lKxzA==} dev: false - /tsconfck@2.1.2(typescript@5.2.2): - resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} - engines: {node: ^14.13.1 || ^16 || >=18} - hasBin: true - peerDependencies: - typescript: ^4.3.5 || ^5.0.0 - peerDependenciesMeta: - typescript: - optional: true - dependencies: - typescript: 5.2.2 - dev: true - /tsconfck@2.1.2(typescript@5.5.4): resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} engines: {node: ^14.13.1 || ^16 || >=18} @@ -29738,14 +29599,14 @@ packages: /tslib@2.6.2: resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} - /tsutils@3.21.0(typescript@5.2.2): + /tsutils@3.21.0(typescript@5.5.4): resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} engines: {node: '>= 6'} peerDependencies: typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' dependencies: tslib: 1.14.1 - typescript: 5.2.2 + typescript: 5.5.4 dev: true /tsx@3.12.2: @@ -30074,22 +29935,12 @@ packages: - supports-color dev: false - /typescript@4.9.5: - resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} - engines: {node: '>=4.2.0'} - hasBin: true - /typescript@5.1.6: resolution: {integrity: sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==} engines: {node: '>=14.17'} hasBin: true dev: false - /typescript@5.2.2: - resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} - engines: {node: '>=14.17'} - hasBin: true - /typescript@5.3.3: resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} engines: {node: '>=14.17'} @@ -30700,17 +30551,6 @@ packages: - terser dev: true - /vite-tsconfig-paths@4.0.5(typescript@5.2.2): - resolution: {integrity: sha512-/L/eHwySFYjwxoYt1WRJniuK/jPv+WGwgRGBYx3leciR5wBeqntQpUE6Js6+TJemChc+ter7fDBKieyEWDx4yQ==} - dependencies: - debug: 4.3.7 - globrex: 0.1.2 - tsconfck: 2.1.2(typescript@5.2.2) - transitivePeerDependencies: - - supports-color - - typescript - dev: true - /vite-tsconfig-paths@4.0.5(typescript@5.5.4): resolution: {integrity: sha512-/L/eHwySFYjwxoYt1WRJniuK/jPv+WGwgRGBYx3leciR5wBeqntQpUE6Js6+TJemChc+ter7fDBKieyEWDx4yQ==} dependencies: diff --git a/references/bun-catalog/package.json b/references/bun-catalog/package.json index 483cfe2c54..519296c6ca 100644 --- a/references/bun-catalog/package.json +++ b/references/bun-catalog/package.json @@ -11,7 +11,6 @@ }, "devDependencies": { "@types/bun": "^1.1.6", - "trigger.dev": "workspace:*", - "typescript": "^5.5.4" + "trigger.dev": "workspace:*" } } \ No newline at end of file diff --git a/references/hello-world/package.json b/references/hello-world/package.json index c38f17fa7e..b8c3a8ad1b 100644 --- a/references/hello-world/package.json +++ b/references/hello-world/package.json @@ -3,8 +3,7 @@ "private": true, "type": "module", "devDependencies": { - "trigger.dev": "workspace:*", - "typescript": "^5.5.4" + "trigger.dev": "workspace:*" }, "dependencies": { "@trigger.dev/sdk": "workspace:*" diff --git a/references/init-shell/package.json b/references/init-shell/package.json index 89c828db71..bf3ef58089 100644 --- a/references/init-shell/package.json +++ b/references/init-shell/package.json @@ -3,7 +3,6 @@ "private": true, "type": "module", "devDependencies": { - "trigger.dev": "workspace:*", - "typescript": "^5.5.4" + "trigger.dev": "workspace:*" } } \ No newline at end of file diff --git a/references/nextjs-realtime/package.json b/references/nextjs-realtime/package.json index 8985fc2a32..12e2a696d7 100644 --- a/references/nextjs-realtime/package.json +++ b/references/nextjs-realtime/package.json @@ -43,7 +43,6 @@ "@types/react-dom": "^18", "postcss": "^8", "tailwindcss": "^3.4.1", - "trigger.dev": "workspace:^3", - "typescript": "^5" + "trigger.dev": "workspace:^3" } } \ No newline at end of file diff --git a/references/v3-catalog/package.json b/references/v3-catalog/package.json index c9de9cf913..a8087a96b6 100644 --- a/references/v3-catalog/package.json +++ b/references/v3-catalog/package.json @@ -82,7 +82,6 @@ "prisma-kysely": "^1.8.0", "trigger.dev": "workspace:*", "ts-node": "^10.9.2", - "tsconfig-paths": "^4.2.0", - "typescript": "^5.5.4" + "tsconfig-paths": "^4.2.0" } } \ No newline at end of file From 23f3400508367b1a83c6d9803c947db1419fadc9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:59:26 +0000 Subject: [PATCH 298/485] redlock type patch --- package.json | 3 ++- pnpm-lock.yaml | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index d05b84fdc5..c38925e9c3 100644 --- a/package.json +++ b/package.json @@ -72,7 +72,8 @@ "patchedDependencies": { "@changesets/assemble-release-plan@5.2.4": "patches/@changesets__assemble-release-plan@5.2.4.patch", "engine.io-parser@5.2.2": "patches/engine.io-parser@5.2.2.patch", - "graphile-worker@0.16.6": "patches/graphile-worker@0.16.6.patch" + "graphile-worker@0.16.6": "patches/graphile-worker@0.16.6.patch", + "redlock@5.0.0-beta.2": "patches/redlock@5.0.0-beta.2.patch" } } } \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 347a9b1620..a4e54f8703 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -14,6 +14,9 @@ patchedDependencies: graphile-worker@0.16.6: hash: hdpetta7btqcc7xb5wfkcnanoa path: patches/graphile-worker@0.16.6.patch + redlock@5.0.0-beta.2: + hash: rwyegdki7iserrd7fgjwxkhnlu + path: patches/redlock@5.0.0-beta.2.patch importers: @@ -954,7 +957,7 @@ importers: version: 3.3.7 redlock: specifier: 5.0.0-beta.2 - version: 5.0.0-beta.2 + version: 5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu) zod: specifier: 3.23.8 version: 3.23.8 @@ -27154,12 +27157,13 @@ packages: redis-errors: 1.2.0 dev: false - /redlock@5.0.0-beta.2: + /redlock@5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu): resolution: {integrity: sha512-2RDWXg5jgRptDrB1w9O/JgSZC0j7y4SlaXnor93H/UJm/QyDiFgBKNtrh0TI6oCXqYSaSoXxFh6Sd3VtYfhRXw==} engines: {node: '>=12'} dependencies: node-abort-controller: 3.1.1 dev: false + patched: true /reduce-css-calc@2.1.8: resolution: {integrity: sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==} From 5f07ff297d0af7bcb214801a1d20fbbbf0b23f26 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:07:45 +0000 Subject: [PATCH 299/485] fix type issues caused by ts-reset --- internal-packages/redis-worker/src/queue.ts | 2 +- internal-packages/redis-worker/src/worker.ts | 2 +- internal-packages/run-engine/src/run-queue/index.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal-packages/redis-worker/src/queue.ts b/internal-packages/redis-worker/src/queue.ts index 04c08a30d2..4c7fa7e25e 100644 --- a/internal-packages/redis-worker/src/queue.ts +++ b/internal-packages/redis-worker/src/queue.ts @@ -128,7 +128,7 @@ export class SimpleQueue { const dequeuedItems = []; for (const [id, serializedItem] of results) { - const parsedItem = JSON.parse(serializedItem); + const parsedItem = JSON.parse(serializedItem) as any; if (typeof parsedItem.job !== "string") { this.logger.error(`Invalid item in queue`, { queue: this.name, id, item: parsedItem }); continue; diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index 04cc9c64b5..b9408d1224 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -297,7 +297,7 @@ class Worker { private async handleRedriveMessage(channel: string, message: string) { try { - const { id } = JSON.parse(message); + const { id } = JSON.parse(message) as any; if (typeof id !== "string") { throw new Error("Invalid message format: id must be a string"); } diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index c81a4cdc82..b5cb643355 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -653,7 +653,7 @@ export class RunQueue { private async handleRedriveMessage(channel: string, message: string) { try { - const { runId, orgId } = JSON.parse(message); + const { runId, orgId } = JSON.parse(message) as any; if (typeof orgId !== "string" || typeof runId !== "string") { this.logger.error( "handleRedriveMessage: invalid message format: runId and orgId must be strings", From 46c79e04f65f13df66654d79cbb16b9f4ca91446 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:55:35 +0000 Subject: [PATCH 300/485] improve cleanup scripts --- packages/build/package.json | 2 +- packages/cli-v3/package.json | 1 + packages/core/package.json | 2 +- packages/react-hooks/package.json | 2 +- packages/rsc/package.json | 2 +- packages/trigger-sdk/package.json | 2 +- packages/worker/package.json | 2 +- 7 files changed, 7 insertions(+), 6 deletions(-) diff --git a/packages/build/package.json b/packages/build/package.json index 03afa06189..e128521117 100644 --- a/packages/build/package.json +++ b/packages/build/package.json @@ -57,7 +57,7 @@ } }, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", "typecheck": "tsc --noEmit -p tsconfig.src.json", diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 20b4201edf..4c4fce1560 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -65,6 +65,7 @@ "vitest": "^2.0.5" }, "scripts": { + "clean": "rimraf dist .tshy .tshy-build .turbo", "typecheck": "tsc -p tsconfig.src.json --noEmit", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", diff --git a/packages/core/package.json b/packages/core/package.json index ba92d8a0db..887adfce71 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -173,7 +173,7 @@ }, "sideEffects": false, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "update-version": "tsx ../../scripts/updateVersion.ts", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", diff --git a/packages/react-hooks/package.json b/packages/react-hooks/package.json index a68d6a75a6..8834122301 100644 --- a/packages/react-hooks/package.json +++ b/packages/react-hooks/package.json @@ -29,7 +29,7 @@ ] }, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", "typecheck": "tsc --noEmit", diff --git a/packages/rsc/package.json b/packages/rsc/package.json index bf69bb23dc..46fdf10d8e 100644 --- a/packages/rsc/package.json +++ b/packages/rsc/package.json @@ -29,7 +29,7 @@ ] }, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", "typecheck": "tsc --noEmit", diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index fa8c63eace..938c9e1d9a 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -37,7 +37,7 @@ } }, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", "typecheck": "tsc --noEmit", diff --git a/packages/worker/package.json b/packages/worker/package.json index 34346fea52..6ccfffb7c8 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -29,7 +29,7 @@ ] }, "scripts": { - "clean": "rimraf dist", + "clean": "rimraf dist .tshy .tshy-build .turbo", "build": "tshy && pnpm run update-version", "dev": "tshy --watch", "typecheck": "tsc --noEmit -p tsconfig.src.json", From accf9f7c0377ee41416d01d6bd86226dcd73b05f Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:56:08 +0000 Subject: [PATCH 301/485] add missing socket.io dep --- packages/cli-v3/package.json | 1 + pnpm-lock.yaml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 4c4fce1560..854ca711e8 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -115,6 +115,7 @@ "resolve": "^1.22.8", "semver": "^7.5.0", "signal-exit": "^4.1.0", + "socket.io-client": "4.7.5", "source-map-support": "0.5.21", "std-env": "^3.7.0", "terminal-link": "^3.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a4e54f8703..9b75634999 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1193,6 +1193,9 @@ importers: signal-exit: specifier: ^4.1.0 version: 4.1.0 + socket.io-client: + specifier: 4.7.5 + version: 4.7.5 source-map-support: specifier: 0.5.21 version: 0.5.21 From 0b0b7da7c581e837a5fe92ae7e887fbbd3027b11 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:56:44 +0000 Subject: [PATCH 302/485] fix run notification handler type --- .../cli-v3/src/entryPoints/managed-run-controller.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 38e9bd6ad7..e52361c7dd 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -401,6 +401,15 @@ class ManagedRunController { this.socket.on("run:notify", async ({ version, run }) => { console.log("[ManagedRunController] Received run notification", { version, run }); + if (!this.runFriendlyId) { + logger.debug("[ManagedRunController] Ignoring notification, no local run ID", { + runId: run.friendlyId, + currentRunId: this.runFriendlyId, + currentSnapshotId: this.snapshotFriendlyId, + }); + return; + } + if (run.friendlyId !== this.runFriendlyId) { console.log("[ManagedRunController] Ignoring notification for different run", { runId: run.friendlyId, From fa8bfc9b5625d4d7d74871b688a5d4569d95f7c4 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 13:57:24 +0000 Subject: [PATCH 303/485] fix worker group test again --- apps/webapp/test/workerGroup.test.ts | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts index 1d80bcd873..b5876faf8e 100644 --- a/apps/webapp/test/workerGroup.test.ts +++ b/apps/webapp/test/workerGroup.test.ts @@ -18,8 +18,6 @@ import { import { HEADER_NAME } from "@trigger.dev/worker"; import { RunEngine } from "@internal/run-engine"; import { trace } from "@opentelemetry/api"; -import { TriggerTaskService } from "~/v3/services/triggerTask.server"; -import { env } from "~/env.server"; describe("worker", () => { const defaultInstanceName = "test_worker"; @@ -70,7 +68,7 @@ describe("worker", () => { headers: { Authorization: `Bearer ${token.plaintext}`, [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - [HEADER_NAME.WORKER_MANAGED_SECRET]: env.MANAGED_WORKER_SECRET, + [HEADER_NAME.WORKER_MANAGED_SECRET]: "managed-secret", }, }); @@ -88,7 +86,7 @@ describe("worker", () => { headers: { Authorization: `Bearer ${token.plaintext}`, [HEADER_NAME.WORKER_INSTANCE_NAME]: secondInstanceName, - [HEADER_NAME.WORKER_MANAGED_SECRET]: env.MANAGED_WORKER_SECRET, + [HEADER_NAME.WORKER_MANAGED_SECRET]: "managed-secret", }, }); const secondAuth = await tokenService.authenticate(secondRequest); @@ -117,7 +115,6 @@ describe("worker", () => { assert(deployment, "deployment should be defined"); const engine = setupRunEngine(prisma, redisContainer); - const triggerService = new TriggerTaskService({ prisma, engine }); const { token, workerGroupService, workerGroup } = await setupWorkerGroup({ prisma, @@ -151,7 +148,25 @@ describe("worker", () => { ); // Trigger - const run = await triggerService.call(taskIdentifier, authenticatedEnvironment, {}); + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); assert(run, "run should be defined"); // Check this is a V2 run From 958c4399cccd669630fef0bf46384da73bec9975 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:21:13 +0000 Subject: [PATCH 304/485] generate prisma client for e2e tests --- .github/workflows/e2e.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 27c8e97dd2..e74f6a080b 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -38,6 +38,9 @@ jobs: - name: 📥 Download deps run: pnpm install --frozen-lockfile --filter trigger.dev... + - name: 📀 Generate Prisma Client + run: pnpm run generate + - name: 🔧 Build v3 cli monorepo dependencies run: pnpm run build --filter trigger.dev^... From c8f460e71a9258b58149292334b22051f7b1a387 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 14:54:38 +0000 Subject: [PATCH 305/485] remove worker group tests for now --- apps/webapp/test/workerGroup.test.ts | 250 --------------------------- 1 file changed, 250 deletions(-) delete mode 100644 apps/webapp/test/workerGroup.test.ts diff --git a/apps/webapp/test/workerGroup.test.ts b/apps/webapp/test/workerGroup.test.ts deleted file mode 100644 index b5876faf8e..0000000000 --- a/apps/webapp/test/workerGroup.test.ts +++ /dev/null @@ -1,250 +0,0 @@ -import { describe, expect, assert } from "vitest"; -import { - AuthenticatedEnvironment, - containerTest, - setupAuthenticatedEnvironment, - setupBackgroundWorker, - StartedRedisContainer, -} from "@internal/testcontainers"; -import { WorkerGroupTokenService } from "~/v3/services/worker/workerGroupTokenService.server"; -import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.server"; -import { - PrismaClient, - PrismaClientOrTransaction, - RunEngineVersion, - TaskRunStatus, - WorkerInstanceGroupType, -} from "@trigger.dev/database"; -import { HEADER_NAME } from "@trigger.dev/worker"; -import { RunEngine } from "@internal/run-engine"; -import { trace } from "@opentelemetry/api"; - -describe("worker", () => { - const defaultInstanceName = "test_worker"; - - describe("auth", { concurrent: true, timeout: 10000 }, () => { - containerTest("should fail", async ({ prisma }) => { - const { workerGroup, token } = await setupWorkerGroup({ prisma }); - expect(workerGroup.type).toBe(WorkerInstanceGroupType.MANAGED); - - const missingToken = new Request("https://example.com", { - headers: { - [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - }, - }); - - const badToken = new Request("https://example.com", { - headers: { - Authorization: `Bearer foo`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - }, - }); - - const emptyToken = new Request("https://example.com", { - headers: { - Authorization: `Bearer `, - [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - }, - }); - - const missingInstanceName = new Request("https://example.com", { - headers: { - Authorization: `Bearer ${token.plaintext}`, - }, - }); - - const tokenService = new WorkerGroupTokenService({ prisma }); - expect(await tokenService.authenticate(missingToken)).toBeUndefined(); - expect(await tokenService.authenticate(badToken)).toBeUndefined(); - expect(await tokenService.authenticate(emptyToken)).toBeUndefined(); - expect(await tokenService.authenticate(missingInstanceName)).toBeUndefined(); - }); - - containerTest("should succeed", async ({ prisma }) => { - const { workerGroup, token } = await setupWorkerGroup({ prisma }); - expect(workerGroup.type).toBe(WorkerInstanceGroupType.MANAGED); - - const request = new Request("https://example.com", { - headers: { - Authorization: `Bearer ${token.plaintext}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - [HEADER_NAME.WORKER_MANAGED_SECRET]: "managed-secret", - }, - }); - - const tokenService = new WorkerGroupTokenService({ prisma }); - const authentication = await tokenService.authenticate(request); - - expect(authentication).toBeDefined(); - expect(authentication?.workerGroupId).toBe(workerGroup.id); - - const identicalAuth = await tokenService.authenticate(request); - expect(identicalAuth).toEqual(authentication); - - const secondInstanceName = "test_worker_2"; - const secondRequest = new Request("https://example.com", { - headers: { - Authorization: `Bearer ${token.plaintext}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: secondInstanceName, - [HEADER_NAME.WORKER_MANAGED_SECRET]: "managed-secret", - }, - }); - const secondAuth = await tokenService.authenticate(secondRequest); - expect(secondAuth).toBeDefined(); - expect(secondAuth?.workerGroupId).toBe(workerGroup.id); - expect(secondAuth?.workerInstanceId).not.toBe(authentication?.workerInstanceId); - }); - }); - - describe("trigger", { timeout: 10000 }, () => { - containerTest("dequeue - unmanaged", async ({ prisma, redisContainer }) => { - const taskIdentifier = "test-task"; - - const authenticatedEnvironment = await setupAuthenticatedEnvironment( - prisma, - "PRODUCTION", - "V2" - ); - - const { deployment } = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - assert(deployment, "deployment should be defined"); - - const engine = setupRunEngine(prisma, redisContainer); - - const { token, workerGroupService, workerGroup } = await setupWorkerGroup({ - prisma, - engine, - authenticatedEnvironment, - }); - - // Promote worker group to project default - await workerGroupService.setDefaultWorkerGroupForProject({ - projectId: authenticatedEnvironment.projectId, - workerGroupId: workerGroup.id, - }); - - const request = new Request("https://example.com", { - headers: { - Authorization: `Bearer ${token.plaintext}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: defaultInstanceName, - [HEADER_NAME.WORKER_DEPLOYMENT_ID]: deployment.id, - }, - }); - - try { - const tokenService = new WorkerGroupTokenService({ prisma, engine }); - const authenticatedInstance = await tokenService.authenticate(request); - - assert(authenticatedInstance, "authenticatedInstance should be defined"); - expect(authenticatedInstance.type).toBe(WorkerInstanceGroupType.UNMANAGED); - assert( - authenticatedInstance.type === WorkerInstanceGroupType.UNMANAGED, - "type should be unmanaged" - ); - - // Trigger - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - assert(run, "run should be defined"); - - // Check this is a V2 run - expect(run.engine).toBe(RunEngineVersion.V2); - - const queueLengthBefore = await engine.runQueue.lengthOfQueue( - authenticatedEnvironment, - run.queue - ); - expect(queueLengthBefore).toBe(1); - - const runBeforeDequeue = await prisma.taskRun.findUnique({ - where: { - id: run.id, - }, - }); - expect(runBeforeDequeue?.status).toBe(TaskRunStatus.PENDING); - - // Dequeue - const dequeued = await authenticatedInstance.dequeue(); - expect(dequeued.length).toBe(1); - expect(dequeued[0].run.id).toBe(run.id); - expect(dequeued[0].run.attemptNumber).toBe(1); - } finally { - engine.quit(); - } - }); - }); -}); - -async function setupWorkerGroup({ - prisma, - engine, - authenticatedEnvironment, -}: { - prisma: PrismaClientOrTransaction; - engine?: RunEngine; - authenticatedEnvironment?: AuthenticatedEnvironment; -}) { - const workerGroupService = new WorkerGroupService({ prisma, engine }); - const { workerGroup, token } = await workerGroupService.createWorkerGroup({ - projectId: authenticatedEnvironment?.projectId, - organizationId: authenticatedEnvironment?.organizationId, - }); - - return { - workerGroupService, - workerGroup, - token, - }; -} - -function setupRunEngine(prisma: PrismaClient, redisContainer: StartedRedisContainer) { - return new RunEngine({ - prisma, - redis: { - host: redisContainer.getHost(), - port: redisContainer.getPort(), - password: redisContainer.getPassword(), - enableAutoPipelining: true, - }, - worker: { - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); -} From 082644ed36092d6f2bfcce0c33afade366785278 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:36:18 +0000 Subject: [PATCH 306/485] prevent image pull rate limits during unit tests --- .github/workflows/unit-tests.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 5d19bc4e92..99019a0e1b 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -24,6 +24,13 @@ jobs: node-version: 20.11.1 cache: "pnpm" + # ..to avoid rate limits when pulling images + - name: 🐳 Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: 📥 Download deps run: pnpm install --frozen-lockfile From 95a7155f01f761dc35f5caf42cae899a420c894b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:37:28 +0000 Subject: [PATCH 307/485] increase timeout for queue concurrency limit test --- internal-packages/run-engine/src/run-queue/index.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index f914555d97..c2eee409f1 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -69,7 +69,7 @@ const messageDev: InputPayload = { describe("RunQueue", () => { redisTest( "Get/set Queue concurrency limit", - { timeout: 5_000 }, + { timeout: 15_000 }, async ({ redisContainer, redis }) => { const queue = new RunQueue({ ...testOptions, From 610c95f0b94234d732cf3149ccb577081395599e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:52:43 +0000 Subject: [PATCH 308/485] generate prisma client for preview release --- .github/workflows/pr_checks.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/pr_checks.yml b/.github/workflows/pr_checks.yml index 55bf20ad51..d5b53a5bee 100644 --- a/.github/workflows/pr_checks.yml +++ b/.github/workflows/pr_checks.yml @@ -54,6 +54,9 @@ jobs: - name: 📥 Download deps run: pnpm install --frozen-lockfile + - name: 📀 Generate Prisma Client + run: pnpm run generate + - name: 🏗️ Build run: pnpm run build --filter "@trigger.dev/*" --filter "trigger.dev" From b1604fb52a9ff1b484010e27690ef6cebfd196fe Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 18 Dec 2024 18:46:30 +0000 Subject: [PATCH 309/485] same node types everywhere --- apps/coordinator/package.json | 1 - apps/coordinator/src/checkpointer.ts | 2 +- apps/docker-provider/package.json | 1 - apps/webapp/package.json | 1 - internal-packages/emails/package.json | 1 - packages/build/package.json | 1 - packages/cli-v3/package.json | 1 - packages/core/package.json | 1 - packages/react-hooks/package.json | 1 - packages/rsc/package.json | 1 - packages/trigger-sdk/package.json | 1 - packages/worker/package.json | 1 - pnpm-lock.yaml | 43 ------------------------- references/nextjs-realtime/package.json | 1 - references/v3-catalog/package.json | 1 - 15 files changed, 1 insertion(+), 57 deletions(-) diff --git a/apps/coordinator/package.json b/apps/coordinator/package.json index 20e0e267e9..3b4240bd37 100644 --- a/apps/coordinator/package.json +++ b/apps/coordinator/package.json @@ -23,7 +23,6 @@ "tinyexec": "^0.3.0" }, "devDependencies": { - "@types/node": "^18", "dotenv": "^16.4.2", "esbuild": "^0.19.11", "tsx": "^4.7.0" diff --git a/apps/coordinator/src/checkpointer.ts b/apps/coordinator/src/checkpointer.ts index bf82a6702c..f6468e5c2e 100644 --- a/apps/coordinator/src/checkpointer.ts +++ b/apps/coordinator/src/checkpointer.ts @@ -193,7 +193,7 @@ export class Checkpointer { const start = performance.now(); this.#logger.log(`checkpointAndPush() start`, { start, opts }); - let interval: NodeJS.Timer | undefined; + let interval: NodeJS.Timeout | undefined; if (opts.shouldHeartbeat) { interval = setInterval(() => { diff --git a/apps/docker-provider/package.json b/apps/docker-provider/package.json index e68115451b..f3e4015ef0 100644 --- a/apps/docker-provider/package.json +++ b/apps/docker-provider/package.json @@ -20,7 +20,6 @@ "execa": "^8.0.1" }, "devDependencies": { - "@types/node": "^18.19.8", "dotenv": "^16.4.2", "esbuild": "^0.19.11", "tsx": "^4.7.0" diff --git a/apps/webapp/package.json b/apps/webapp/package.json index e9e58cf692..986ae25e31 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -208,7 +208,6 @@ "@types/lodash.omit": "^4.5.7", "@types/marked": "^4.0.3", "@types/morgan": "^1.9.3", - "@types/node": "20.14.14", "@types/node-fetch": "^2.6.2", "@types/prismjs": "^1.26.0", "@types/qs": "^6.9.7", diff --git a/internal-packages/emails/package.json b/internal-packages/emails/package.json index 5e0bc61925..c49fd4b035 100644 --- a/internal-packages/emails/package.json +++ b/internal-packages/emails/package.json @@ -18,7 +18,6 @@ "zod": "3.23.8" }, "devDependencies": { - "@types/node": "^18", "@types/react": "18.2.69" }, "engines": { diff --git a/packages/build/package.json b/packages/build/package.json index e128521117..323904be7c 100644 --- a/packages/build/package.json +++ b/packages/build/package.json @@ -71,7 +71,6 @@ "tsconfck": "3.1.3" }, "devDependencies": { - "@types/node": "20.14.14", "rimraf": "6.0.1", "tshy": "^3.0.2", "tsx": "4.17.0", diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 854ca711e8..4a53ae2a74 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -47,7 +47,6 @@ "devDependencies": { "@epic-web/test-server": "^0.1.0", "@types/gradient-string": "^1.1.2", - "@types/node": "20.14.14", "@types/object-hash": "3.0.6", "@types/react": "^18.2.48", "@types/resolve": "^1.20.6", diff --git a/packages/core/package.json b/packages/core/package.json index 887adfce71..4d21f2914b 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -215,7 +215,6 @@ "@epic-web/test-server": "^0.1.0", "@trigger.dev/database": "workspace:*", "@types/humanize-duration": "^3.27.1", - "@types/node": "20.14.14", "@types/readable-stream": "^4.0.14", "ai": "^3.4.33", "defu": "^6.1.4", diff --git a/packages/react-hooks/package.json b/packages/react-hooks/package.json index 8834122301..b1296c2fe5 100644 --- a/packages/react-hooks/package.json +++ b/packages/react-hooks/package.json @@ -42,7 +42,6 @@ }, "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", - "@types/node": "^20.14.14", "@types/react": "*", "@types/react-dom": "*", "rimraf": "^3.0.2", diff --git a/packages/rsc/package.json b/packages/rsc/package.json index 46fdf10d8e..68731484da 100644 --- a/packages/rsc/package.json +++ b/packages/rsc/package.json @@ -45,7 +45,6 @@ "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", "@trigger.dev/build": "workspace:^3.3.7", - "@types/node": "^20.14.14", "@types/react": "*", "@types/react-dom": "*", "rimraf": "^3.0.2", diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index 938c9e1d9a..b33d0e95c8 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -62,7 +62,6 @@ "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", "@types/debug": "^4.1.7", - "@types/node": "20.14.14", "@types/slug": "^5.0.3", "@types/uuid": "^9.0.0", "@types/ws": "^8.5.3", diff --git a/packages/worker/package.json b/packages/worker/package.json index 6ccfffb7c8..56304df679 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -44,7 +44,6 @@ }, "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", - "@types/node": "20.14.14", "rimraf": "6.0.1", "tshy": "^3.0.2", "tsx": "4.17.0" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9b75634999..9b6a890dd9 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -93,9 +93,6 @@ importers: specifier: ^0.3.0 version: 0.3.0 devDependencies: - '@types/node': - specifier: ^18 - version: 18.17.1 dotenv: specifier: ^16.4.2 version: 16.4.4 @@ -115,9 +112,6 @@ importers: specifier: ^8.0.1 version: 8.0.1 devDependencies: - '@types/node': - specifier: ^18.19.8 - version: 18.19.20 dotenv: specifier: ^16.4.2 version: 16.4.4 @@ -703,9 +697,6 @@ importers: '@types/morgan': specifier: ^1.9.3 version: 1.9.4 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/node-fetch': specifier: ^2.6.2 version: 2.6.2 @@ -872,9 +863,6 @@ importers: specifier: 3.23.8 version: 3.23.8 devDependencies: - '@types/node': - specifier: ^18 - version: 18.19.20 '@types/react': specifier: 18.2.69 version: 18.2.69 @@ -1049,9 +1037,6 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 esbuild: specifier: ^0.23.0 version: 0.23.0 @@ -1230,9 +1215,6 @@ importers: '@types/gradient-string': specifier: ^1.1.2 version: 1.1.2 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/object-hash': specifier: 3.0.6 version: 3.0.6 @@ -1375,9 +1357,6 @@ importers: '@types/humanize-duration': specifier: ^3.27.1 version: 3.27.1 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/readable-stream': specifier: ^4.0.14 version: 4.0.14 @@ -1427,9 +1406,6 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 - '@types/node': - specifier: ^20.14.14 - version: 20.14.14 '@types/react': specifier: '*' version: 18.3.1 @@ -1467,9 +1443,6 @@ importers: '@trigger.dev/build': specifier: workspace:^3.3.7 version: link:../build - '@types/node': - specifier: ^20.14.14 - version: 20.14.14 '@types/react': specifier: '*' version: 18.3.1 @@ -1534,9 +1507,6 @@ importers: '@types/debug': specifier: ^4.1.7 version: 4.1.7 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/slug': specifier: ^5.0.3 version: 5.0.3 @@ -1586,9 +1556,6 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 rimraf: specifier: 6.0.1 version: 6.0.1 @@ -1712,9 +1679,6 @@ importers: '@trigger.dev/rsc': specifier: workspace:^3 version: link:../../packages/rsc - '@types/node': - specifier: ^20 - version: 20.14.14 '@types/react': specifier: ^18 version: 18.3.1 @@ -1902,9 +1866,6 @@ importers: '@types/fluent-ffmpeg': specifier: ^2.1.26 version: 2.1.26 - '@types/node': - specifier: 20.4.2 - version: 20.4.2 '@types/react': specifier: ^18.3.1 version: 18.3.1 @@ -15771,10 +15732,6 @@ packages: /@types/node@12.20.55: resolution: {integrity: sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ==} - /@types/node@18.17.1: - resolution: {integrity: sha512-xlR1jahfizdplZYRU59JlUx9uzF1ARa8jbhM11ccpCJya8kvos5jwdm2ZAgxSCwOl0fq21svP18EVwPBXMQudw==} - dev: true - /@types/node@18.19.20: resolution: {integrity: sha512-SKXZvI375jkpvAj8o+5U2518XQv76mAsixqfXiVyWyXZbVWQK25RurFovYpVIxVzul0rZoH58V/3SkEnm7s3qA==} dependencies: diff --git a/references/nextjs-realtime/package.json b/references/nextjs-realtime/package.json index 12e2a696d7..cb88e054e9 100644 --- a/references/nextjs-realtime/package.json +++ b/references/nextjs-realtime/package.json @@ -38,7 +38,6 @@ "devDependencies": { "@next/bundle-analyzer": "^15.0.2", "@trigger.dev/rsc": "workspace:^3", - "@types/node": "^20", "@types/react": "^18", "@types/react-dom": "^18", "postcss": "^8", diff --git a/references/v3-catalog/package.json b/references/v3-catalog/package.json index a8087a96b6..90977430bc 100644 --- a/references/v3-catalog/package.json +++ b/references/v3-catalog/package.json @@ -75,7 +75,6 @@ "@trigger.dev/build": "workspace:*", "@types/email-reply-parser": "^1.4.2", "@types/fluent-ffmpeg": "^2.1.26", - "@types/node": "20.4.2", "@types/react": "^18.3.1", "esbuild": "^0.19.11", "prisma": "5.19.0", From 14250d57b48ce8a2ad1da423396fc9a5cd523dce Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 19 Dec 2024 17:19:14 +0000 Subject: [PATCH 310/485] Updated engine readme, removed legacy system notes --- internal-packages/run-engine/README.md | 109 +------------------------ 1 file changed, 1 insertion(+), 108 deletions(-) diff --git a/internal-packages/run-engine/README.md b/internal-packages/run-engine/README.md index 1c676c4cb9..a2ca8fda22 100644 --- a/internal-packages/run-engine/README.md +++ b/internal-packages/run-engine/README.md @@ -15,8 +15,7 @@ It is responsible for: - **Worker group**: A group of workers that all pull from the same queue, e.g. "us-east-1", "my-self-hosted-workers". - **Worker**: A worker is a 'server' that connects to the platform and receives runs. - **Supervisor**: Pulls new runs from the queue, communicates with the platform, spins up new Deploy executors. - - **Checkpointer**: Responsible for checkpointing runs. - - **Deploy executor**: Container that comes from a specific deploy from a user's project. + - **Deploy container**: Container that comes from a specific deploy from a user's project. - **Run controller**: The code that manages running the task. - **Run executor**: The actual task running. @@ -188,109 +187,3 @@ This is useful: ## Emitting events The Run Engine emits events using its `eventBus`. This is used for runs completing, failing, or things that any workers should be aware of. - -# Legacy system - -These are all the TaskRun mutations happening right now: - -## 1. TriggerTaskService - -This is called from: - -- trigger task API -- `BatchTriggerTaskService` for each item -- `ReplayTaskRunService` -- `TestTaskService` -- `TriggerScheduledTaskService` when the CRON fires - -Directly creates a run if it doesn't exist, either in the `PENDING` or `DELAYED` states. -Enqueues the run. - -[TriggerTaskService.call()](/apps//webapp/app/v3/services/triggerTask.server.ts#246) - -## 2. Batch trigger - -## 3. DevQueueConsumer executing a run - -### a. Lock run and set status to `EXECUTING` - -[DevQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts#371) - -### b. If an error is thrown, unlock the run and set status to `PENDING` - -[DevQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts#477) - -## 4. SharedQueueConsumer executing a run - -### a. `EXECUTE`, lock the run - -We lock the run and update some basic metadata (but not status). -[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#394) - -### b. `EXECUTE`, if an error is thrown, unlock the run - -We unlock the run, but don't change the status. -[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#552) - -### c. `EXECUTE`, if the run has no deployment set the status to `WAITING_FOR_DEPLOY` - -[SharedQueueConsumer.#doWorkInternal()](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#876) - -## 5. CompleteAttemptService retrying a run - -### a. When an attempt has failed, we set the status to `RETRYING_AFTER_FAILURE` - -[CompleteAttemptService.#completeAttemptFailed()](/apps/webapp/app/v3/services/completeAttempt.server.ts#239) - -## 6. CreateTaskRunAttemptService creating a new attempt, setting the run to `EXECUTING` - -We call this when: - -- [Executing a DEV run from the CLI.](/packages/cli-v3//src/dev/workerRuntime.ts#305) -- [Deprecated: directly from the SharedQueueCOnsumer when we don't support lazy attempts](/apps/webapp/app/v3/marqs/sharedQueueConsumer.server.ts#501) -- [When we receive a `CREATE_TASK_RUN_ATTEMPT` message from the coordinator](/apps/webapp//app/v3//handleSocketIo.server.ts#187) - -This is the actual very simple TaskRun update: -[CreateTaskRunAttemptService.call()](/apps/webapp/app/v3/services/createTaskRunAttempt.server.ts#134) - -## 7. EnqueueDelayedRunService set a run to `PENDING` when the `delay` has elapsed - -When the run attempt gets created it will be marked as `EXECUTING`. - -[EnqueueDelayedRunService.#call()](/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts#41) - -## 8. FinalizeTaskRunService finalizing a run - -This service is called from many places, when a run is in a "final" state. This means the run can't be acted on anymore. - -We set the status, expiredAt and completedAt fields. - -[FinalizeTaskRunService.#call()](/apps/webapp/app/v3/services/finalizeTaskRun.server.ts#63) - -This function is called from: - -- [`FailedTaskRunService` when a run has SYSTEM_FAILURE](/apps/webapp/app/v3/failedTaskRun.server.ts#41) -- [`CancelAttemptService` when an attempt is canceled](/apps/webapp/app/v3/services/cancelAttempt.server.ts#66) -- [`CancelTaskRunService` when a run is canceled](/apps/webapp/app/v3/services/cancelTaskRun.server.ts#51) -- `CompleteAttemptService` when a SYSTEM_FAILURE happens - - [No attempt](/apps/webapp/app/v3/services/completeAttempt.server.ts#74) - - [`completeAttemptFailed` and there's no checkpoint](/apps/webapp/app/v3/services/completeAttempt.server.ts#280) - - [`completeAttemptFailed` and the error is internal and a graceful exit timeout](/apps/webapp/app/v3/services/completeAttempt.server.ts#321) -- `CompleteTaskRunService` when a run has failed (this isn't a bug) - - [`completeAttemptFailed`](/apps/webapp/app/v3/services/completeAttempt.server.ts#352) -- `CompleteTaskRunService` when a run is completed successfully - - [`completeAttemptSuccessfully`](/apps/webapp/app/v3/services/completeAttempt.server.ts#135) -- `CrashTaskRunService` when a run has crashed - - [`call`](/apps/webapp/app/v3/services/crashTaskRun.server.ts#47) -- `ExpireEnqueuedRunService` when a run has expired - - [`call`](/apps/webapp/app/v3/services/expireEnqueuedRun.server.ts#42) - -## 9. RescheduleTaskRunService (when further delaying a delayed run) - -[RescheduleTaskRunService.#call()](/apps/webapp/app/v3/services/rescheduleTaskRun.server.ts#21) - -## 10. Triggering a scheduled run - -Graphile Worker calls this function based on the schedule. We add the schedule data onto the run, and call `TriggerTaskService.call()`. - -[TriggerScheduledRunService.#call()](/apps/webapp/app/v3/services/triggerScheduledTask.server.ts#131) From 56979e66e39ca1a8764f5e247450e836ee2b9b6c Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:17:40 +0000 Subject: [PATCH 311/485] use default machine preset from platform package --- apps/webapp/app/v3/runEngine.server.ts | 2 +- .../v3/services/worker/workerGroupTokenService.server.ts | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 5e30fe6321..cdf1a9fa72 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -27,7 +27,7 @@ function createRunEngine() { pollIntervalMs: env.RUN_ENGINE_WORKER_POLL_INTERVAL, }, machines: { - defaultMachine: defaultMachine, + defaultMachine, machines: allMachines(), baseCostInCents: env.CENTS_PER_RUN, }, diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 96e40639e7..9ed932a49d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -25,6 +25,8 @@ import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; import { fromFriendlyId } from "@trigger.dev/core/v3/apps"; +import { machinePresetFromName } from "~/v3/machinePresets.server"; +import { defaultMachine } from "@trigger.dev/platform/v3"; export class WorkerGroupTokenService extends WithRunEngine { private readonly tokenPrefix = "tr_wgt_"; @@ -647,12 +649,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { isWarmStart, }); - const defaultMachinePreset = { - name: "small-1x", - cpu: 1, - memory: 1, - centsPerMs: 0, - } satisfies MachinePreset; + const defaultMachinePreset = machinePresetFromName(defaultMachine); const environment = this.environment ?? From ae6a47350e96e7d6b57a1d27cc13f4aa18a4a2a3 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:35:51 +0000 Subject: [PATCH 312/485] worker instances plural in schema --- internal-packages/database/prisma/schema.prisma | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index dc3effda79..b538cb7c21 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -423,7 +423,7 @@ model RuntimeEnvironment { taskRunNumberCounter TaskRunNumberCounter[] taskRunCheckpoints TaskRunCheckpoint[] waitpoints Waitpoint[] - workerInstance WorkerInstance[] + workerInstances WorkerInstance[] @@unique([projectId, slug, orgMemberId]) @@unique([projectId, shortcode]) From 482a98d8d01e20a00f63ca7cf54ac9b21f5cdc52 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 20 Dec 2024 09:44:30 +0000 Subject: [PATCH 313/485] disable pnpm update notifications --- .npmrc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.npmrc b/.npmrc index 8dbd39f189..fac274c900 100644 --- a/.npmrc +++ b/.npmrc @@ -1,3 +1,4 @@ link-workspace-packages=false public-hoist-pattern[]=*prisma* -prefer-workspace-packages=true \ No newline at end of file +prefer-workspace-packages=true +update-notifier=false \ No newline at end of file From d3386e223a6f6d12ca98abc9b383612663af5767 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 20 Dec 2024 12:30:23 +0000 Subject: [PATCH 314/485] return worker group details from connect call --- .../routes/api.v1.worker-actions.connect.ts | 13 ++++++++----- .../worker/workerGroupTokenService.server.ts | 11 ++++++++++- packages/worker/src/supervisor/schemas.ts | 4 ++++ packages/worker/src/supervisor/session.ts | 19 +++++++++++++------ 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts index 5b74c0033a..024526a147 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts @@ -1,8 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { - WorkerApiConnectRequestBody, - WorkerApiConnectResponseBody, -} from "@trigger.dev/worker"; +import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody } from "@trigger.dev/worker"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( @@ -11,6 +8,12 @@ export const action = createActionWorkerApiRoute( }, async ({ authenticatedWorker, body }): Promise> => { await authenticatedWorker.connect(body.metadata); - return json({ ok: true }); + return json({ + ok: true, + workerGroup: { + type: authenticatedWorker.type, + name: authenticatedWorker.name, + }, + }); } ); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 9ed932a49d..020b4cfa49 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -207,6 +207,7 @@ export class WorkerGroupTokenService extends WithRunEngine { prisma: this._prisma, engine: this._engine, type: WorkerInstanceGroupType.MANAGED, + name: workerGroup.name, workerGroupId: workerGroup.id, workerInstanceId: workerInstance.id, masterQueue: workerGroup.masterQueue, @@ -242,6 +243,7 @@ export class WorkerGroupTokenService extends WithRunEngine { prisma: this._prisma, engine: this._engine, type: WorkerInstanceGroupType.UNMANAGED, + name: workerGroup.name, workerGroupId: workerGroup.id, workerInstanceId: workerInstance.id, masterQueue: workerGroup.masterQueue, @@ -481,6 +483,7 @@ export type WorkerInstanceEnv = z.infer; export type AuthenticatedWorkerInstanceOptions = WithRunEngineOptions<{ type: WorkerInstanceGroupType; + name: string; workerGroupId: string; workerInstanceId: string; masterQueue: string; @@ -492,6 +495,7 @@ export type AuthenticatedWorkerInstanceOptions = WithRunEngineOptions<{ export class AuthenticatedWorkerInstance extends WithRunEngine { readonly type: WorkerInstanceGroupType; + readonly name: string; readonly workerGroupId: string; readonly workerInstanceId: string; readonly masterQueue: string; @@ -499,13 +503,14 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { readonly deploymentId?: string; readonly backgroundWorkerId?: string; - // FIXME + // FIXME: Required for unmanaged workers readonly isLatestDeployment = true; constructor(opts: AuthenticatedWorkerInstanceOptions) { super({ prisma: opts.prisma, engine: opts.engine }); this.type = opts.type; + this.name = opts.name; this.workerGroupId = opts.workerGroupId; this.workerInstanceId = opts.workerInstanceId; this.masterQueue = opts.masterQueue; @@ -715,6 +720,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { if (this.type === WorkerInstanceGroupType.MANAGED) { return { type: WorkerInstanceGroupType.MANAGED, + name: this.name, workerGroupId: this.workerGroupId, workerInstanceId: this.workerInstanceId, masterQueue: this.masterQueue, @@ -723,6 +729,7 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { return { type: WorkerInstanceGroupType.UNMANAGED, + name: this.name, workerGroupId: this.workerGroupId, workerInstanceId: this.workerInstanceId, masterQueue: this.masterQueue, @@ -761,12 +768,14 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { export type WorkerGroupTokenAuthenticationResponse = | { type: typeof WorkerInstanceGroupType.MANAGED; + name: string; workerGroupId: string; workerInstanceId: string; masterQueue: string; } | { type: typeof WorkerInstanceGroupType.UNMANAGED; + name: string; workerGroupId: string; workerInstanceId: string; masterQueue: string; diff --git a/packages/worker/src/supervisor/schemas.ts b/packages/worker/src/supervisor/schemas.ts index cfeee07458..6fc646576e 100644 --- a/packages/worker/src/supervisor/schemas.ts +++ b/packages/worker/src/supervisor/schemas.ts @@ -33,6 +33,10 @@ export type WorkerApiConnectRequestBody = z.infer; diff --git a/packages/worker/src/supervisor/session.ts b/packages/worker/src/supervisor/session.ts index 9cb4b872c0..c21ebff139 100644 --- a/packages/worker/src/supervisor/session.ts +++ b/packages/worker/src/supervisor/session.ts @@ -100,17 +100,17 @@ export class SupervisorSession extends EventEmitter { extraHeaders: getDefaultWorkerHeaders(this.opts), }); this.socket.on("run:notify", ({ version, run }) => { - console.log("[WorkerSession] Received run notification", { version, run }); + console.log("[WorkerSession][WS] Received run notification", { version, run }); this.emit("runNotification", { time: new Date(), run }); }); this.socket.on("connect", () => { - console.log("[WorkerSession] Connected to platform"); + console.log("[WorkerSession][WS] Connected to platform"); }); this.socket.on("connect_error", (error) => { - console.error("[WorkerSession] Connection error", { error }); + console.error("[WorkerSession][WS] Connection error", { error }); }); this.socket.on("disconnect", (reason, description) => { - console.log("[WorkerSession] Disconnected from platform", { reason, description }); + console.log("[WorkerSession][WS] Disconnected from platform", { reason, description }); }); } @@ -122,10 +122,17 @@ export class SupervisorSession extends EventEmitter { }); if (!connect.success) { - console.error("[WorkerSession] Failed to connect via HTTP client", { error: connect.error }); - throw new Error("[WorkerSession] Failed to connect via HTTP client"); + console.error("[WorkerSession][HTTP] Failed to connect", { error: connect.error }); + throw new Error("[WorkerSession][HTTP] Failed to connect"); } + const { workerGroup } = connect.data; + + console.log("[WorkerSession][HTTP] Connected to platform", { + type: workerGroup.type, + name: workerGroup.name, + }); + this.queueConsumer.start(); this.heartbeatService.start(); this.createSocket(); From 7d382ac0bf379086f86f5bdeea8033d4807b36ba Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 20 Dec 2024 13:14:49 +0000 Subject: [PATCH 315/485] add workers admin route --- .../webapp/app/routes/admin.api.v1.workers.ts | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 apps/webapp/app/routes/admin.api.v1.workers.ts diff --git a/apps/webapp/app/routes/admin.api.v1.workers.ts b/apps/webapp/app/routes/admin.api.v1.workers.ts new file mode 100644 index 0000000000..185c9cc4d0 --- /dev/null +++ b/apps/webapp/app/routes/admin.api.v1.workers.ts @@ -0,0 +1,65 @@ +import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; +import { z } from "zod"; +import { prisma } from "~/db.server"; +import { authenticateApiRequestWithPersonalAccessToken } from "~/services/personalAccessToken.server"; +import { WorkerGroupService } from "~/v3/services/worker/workerGroupService.server"; + +const RequestBodySchema = z.object({ + name: z.string().optional(), + description: z.string().optional(), + projectId: z.string().optional(), + makeDefault: z.boolean().optional(), +}); + +export async function action({ request }: ActionFunctionArgs) { + // Next authenticate the request + const authenticationResult = await authenticateApiRequestWithPersonalAccessToken(request); + + if (!authenticationResult) { + return json({ error: "Invalid or Missing API key" }, { status: 401 }); + } + + const user = await prisma.user.findUnique({ + where: { + id: authenticationResult.userId, + }, + }); + + if (!user) { + return json({ error: "Invalid or Missing API key" }, { status: 401 }); + } + + if (!user.admin) { + return json({ error: "You must be an admin to perform this action" }, { status: 403 }); + } + + try { + const rawBody = await request.json(); + const { name, description, projectId, makeDefault } = RequestBodySchema.parse(rawBody ?? {}); + + const service = new WorkerGroupService(); + const { workerGroup, token } = await service.createWorkerGroup({ + name, + description, + }); + + if (makeDefault && projectId) { + await prisma.project.update({ + where: { + id: projectId, + }, + data: { + defaultWorkerGroupId: workerGroup.id, + engine: "V2", + }, + }); + } + + return json({ + token, + workerGroup, + }); + } catch (error) { + return json({ error: error instanceof Error ? error.message : error }, { status: 400 }); + } +} From a759bf700bcc0a8e0139725f6e2f55d04e7f7e44 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 20 Dec 2024 13:22:29 +0000 Subject: [PATCH 316/485] fix heartbeat route return type --- apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts index 332e5396ff..babe12d5ea 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts @@ -1,15 +1,12 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { - WorkerApiConnectResponseBody, - WorkerApiHeartbeatRequestBody, -} from "@trigger.dev/worker"; +import { WorkerApiHeartbeatResponseBody, WorkerApiHeartbeatRequestBody } from "@trigger.dev/worker"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( { body: WorkerApiHeartbeatRequestBody, }, - async ({ authenticatedWorker }): Promise> => { + async ({ authenticatedWorker }): Promise> => { await authenticatedWorker.heartbeatWorkerInstance(); return json({ ok: true }); } From 927edea53211d2fc004ff36892fc5d775b4c162b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 20 Dec 2024 15:57:12 +0000 Subject: [PATCH 317/485] move deployment labels to core apps --- apps/webapp/app/consts.ts | 2 -- apps/webapp/app/presenters/v3/TaskListPresenter.server.ts | 2 +- ...rker-actions.deployments.$deploymentFriendlyId.dequeue.ts | 2 +- apps/webapp/app/v3/models/workerDeployment.server.ts | 5 ++++- .../app/v3/services/createDeployedBackgroundWorker.server.ts | 3 +-- apps/webapp/app/v3/services/finalizeDeployment.server.ts | 2 +- apps/webapp/app/v3/services/rollbackDeployment.server.ts | 2 +- .../app/v3/services/worker/workerGroupTokenService.server.ts | 3 +-- internal-packages/run-engine/src/engine/consts.ts | 1 - internal-packages/run-engine/src/engine/db/worker.ts | 2 +- packages/core/src/v3/apps/consts.ts | 1 + 11 files changed, 12 insertions(+), 13 deletions(-) diff --git a/apps/webapp/app/consts.ts b/apps/webapp/app/consts.ts index 84f56d1bd4..e349bc086b 100644 --- a/apps/webapp/app/consts.ts +++ b/apps/webapp/app/consts.ts @@ -1,7 +1,5 @@ export const LIVE_ENVIRONMENT = "live"; export const DEV_ENVIRONMENT = "development"; -export const CURRENT_DEPLOYMENT_LABEL = "current"; -export const CURRENT_UNMANAGED_DEPLOYMENT_LABEL = "current-unmanaged"; export const MAX_LIVE_PROJECTS = 1; export const DEFAULT_MAX_CONCURRENT_RUNS = 10; export const MAX_CONCURRENT_RUNS_LIMIT = 20; diff --git a/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts b/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts index cb1aa77b04..c4260bf312 100644 --- a/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts @@ -19,8 +19,8 @@ import { import { logger } from "~/services/logger.server"; import { BasePresenter } from "./basePresenter.server"; import { TaskRunStatus } from "~/database-types"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { concurrencyTracker } from "~/v3/services/taskRunConcurrencyTracker.server"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; export type Task = { slug: string; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts index 2e48ee75be..fa56f895b2 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -1,7 +1,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; import { z } from "zod"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { $replica, prisma } from "~/db.server"; import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/v3/models/workerDeployment.server.ts b/apps/webapp/app/v3/models/workerDeployment.server.ts index 71575a32bf..fa98dba818 100644 --- a/apps/webapp/app/v3/models/workerDeployment.server.ts +++ b/apps/webapp/app/v3/models/workerDeployment.server.ts @@ -1,6 +1,9 @@ import type { Prettify } from "@trigger.dev/core"; import { BackgroundWorker, WorkerDeployment } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL, CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; +import { + CURRENT_DEPLOYMENT_LABEL, + CURRENT_UNMANAGED_DEPLOYMENT_LABEL, +} from "@trigger.dev/core/v3/apps"; import { Prisma, prisma } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts index da5e5d9507..1840a9c1e3 100644 --- a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts @@ -1,9 +1,8 @@ import { CreateBackgroundWorkerRequestBody } from "@trigger.dev/core/v3"; import type { BackgroundWorker } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; -import { generateFriendlyId } from "../friendlyIdentifiers"; import { socketIo } from "../handleSocketIo.server"; import { updateEnvConcurrencyLimits } from "../runQueue.server"; import { PerformDeploymentAlertsService } from "./alerts/performDeploymentAlerts.server"; diff --git a/apps/webapp/app/v3/services/finalizeDeployment.server.ts b/apps/webapp/app/v3/services/finalizeDeployment.server.ts index 13418fd156..baeca927b2 100644 --- a/apps/webapp/app/v3/services/finalizeDeployment.server.ts +++ b/apps/webapp/app/v3/services/finalizeDeployment.server.ts @@ -1,5 +1,5 @@ import { FinalizeDeploymentRequestBody } from "@trigger.dev/core/v3/schemas"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { socketIo } from "../handleSocketIo.server"; diff --git a/apps/webapp/app/v3/services/rollbackDeployment.server.ts b/apps/webapp/app/v3/services/rollbackDeployment.server.ts index ceeca4fed8..128797cb06 100644 --- a/apps/webapp/app/v3/services/rollbackDeployment.server.ts +++ b/apps/webapp/app/v3/services/rollbackDeployment.server.ts @@ -1,7 +1,7 @@ import { logger } from "~/services/logger.server"; import { BaseService } from "./baseService.server"; import { WorkerDeployment, WorkerInstanceGroupType } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; export class RollbackDeploymentService extends BaseService { diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 020b4cfa49..66e5752f83 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -21,10 +21,9 @@ import { } from "@trigger.dev/core/v3"; import { env } from "~/env.server"; import { $transaction } from "~/db.server"; -import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL } from "~/consts"; import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; -import { fromFriendlyId } from "@trigger.dev/core/v3/apps"; +import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL, fromFriendlyId } from "@trigger.dev/core/v3/apps"; import { machinePresetFromName } from "~/v3/machinePresets.server"; import { defaultMachine } from "@trigger.dev/platform/v3"; diff --git a/internal-packages/run-engine/src/engine/consts.ts b/internal-packages/run-engine/src/engine/consts.ts index ee183d1c5c..6ea6f54c38 100644 --- a/internal-packages/run-engine/src/engine/consts.ts +++ b/internal-packages/run-engine/src/engine/consts.ts @@ -1,2 +1 @@ export const MAX_TASK_RUN_ATTEMPTS = 250; -export const CURRENT_DEPLOYMENT_LABEL = "current"; diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts index 046779251a..684bad6f50 100644 --- a/internal-packages/run-engine/src/engine/db/worker.ts +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -5,7 +5,7 @@ import { PrismaClientOrTransaction, WorkerDeployment, } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "../consts"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; type RunWithMininimalEnvironment = Prisma.TaskRunGetPayload<{ include: { diff --git a/packages/core/src/v3/apps/consts.ts b/packages/core/src/v3/apps/consts.ts index 6f62cf064a..6789d897a9 100644 --- a/packages/core/src/v3/apps/consts.ts +++ b/packages/core/src/v3/apps/consts.ts @@ -1 +1,2 @@ export const CURRENT_DEPLOYMENT_LABEL = "current"; +export const CURRENT_UNMANAGED_DEPLOYMENT_LABEL = "current-unmanaged"; From 3d21147293a2c0f8ba2dbfedd8bacbe7969729c5 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 31 Dec 2024 15:13:49 +0000 Subject: [PATCH 318/485] refactor run controller env schema --- .../src/entryPoints/managed-run-controller.ts | 21 ++++++++++--------- .../entryPoints/unmanaged-run-controller.ts | 4 ++-- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index e52361c7dd..5ae9f37939 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -2,7 +2,6 @@ import { logger } from "../utilities/logger.js"; import { OnWaitMessage, TaskRunProcess } from "../executions/taskRunProcess.js"; import { env as stdEnv } from "std-env"; import { z } from "zod"; -import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; import { @@ -24,20 +23,22 @@ import { io, Socket } from "socket.io-client"; // All IDs are friendly IDs const Env = z.object({ - TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), + // Set at build time TRIGGER_CONTENT_HASH: z.string(), - TRIGGER_WORKER_API_URL: z.string().url(), - TRIGGER_WORKLOAD_CONTROLLER_ID: z.string().default(`controller_${randomUUID()}`), TRIGGER_DEPLOYMENT_ID: z.string(), TRIGGER_DEPLOYMENT_VERSION: z.string(), - TRIGGER_ENV_ID: z.string(), - // This is only useful for cold starts - TRIGGER_RUN_ID: z.string().optional(), - // This is only useful for cold starts - TRIGGER_SNAPSHOT_ID: z.string().optional(), + TRIGGER_PROJECT_ID: z.string(), + TRIGGER_PROJECT_REF: z.string(), NODE_ENV: z.string().default("production"), NODE_EXTRA_CA_CERTS: z.string().optional(), - OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), + + // Set at runtime + TRIGGER_WORKER_API_URL: z.string().url(), + TRIGGER_WORKLOAD_CONTROLLER_ID: z.string().default(`controller_${randomUUID()}`), + TRIGGER_ENV_ID: z.string(), + TRIGGER_RUN_ID: z.string().optional(), // This is only useful for cold starts + TRIGGER_SNAPSHOT_ID: z.string().optional(), // This is only useful for cold starts + OTEL_EXPORTER_OTLP_ENDPOINT: z.string().url(), TRIGGER_WARM_START_URL: z.string().optional(), TRIGGER_MACHINE_CPU: z.string().default("0"), TRIGGER_MACHINE_MEMORY: z.string().default("0"), diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index a027c2fe73..20b026ff91 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -9,7 +9,7 @@ import { WorkerManifest } from "@trigger.dev/core/v3"; import { SupervisorSession } from "@trigger.dev/worker"; const Env = z.object({ - TRIGGER_API_URL: z.string().default(CLOUD_API_URL), + TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), TRIGGER_CONTENT_HASH: z.string(), TRIGGER_WORKER_TOKEN: z.string(), TRIGGER_WORKER_INSTANCE_NAME: z.string().default(randomUUID()), @@ -17,7 +17,7 @@ const Env = z.object({ TRIGGER_DEPLOYMENT_VERSION: z.string(), NODE_ENV: z.string().default("production"), NODE_EXTRA_CA_CERTS: z.string().optional(), - OTEL_EXPORTER_OTLP_ENDPOINT: z.string().default("http://0.0.0.0:3030/otel"), + OTEL_EXPORTER_OTLP_ENDPOINT: z.string().url(), }); const env = Env.parse(stdEnv); From 4f954b26d0b9823fcfb40626968ea382d0897029 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 31 Dec 2024 15:54:50 +0000 Subject: [PATCH 319/485] Add firstAttemptStartedAt to TaskRun --- internal-packages/database/prisma/schema.prisma | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index b538cb7c21..5de8bd3728 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1731,6 +1731,9 @@ model TaskRun { completedAt DateTime? machinePreset String? + /// Run Engine 2.0+ + firstAttemptStartedAt DateTime? + usageDurationMs Int @default(0) costInCents Float @default(0) baseCostInCents Float @default(0) From 3d4068a4dad61816c76b92a207698a904eaf97f6 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 2 Jan 2025 17:10:19 +0000 Subject: [PATCH 320/485] RunEngine 2.0 batch trigger support (#1581) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make it clear when BatchTriggerV2Service is used * Copy of BatchTriggerV2Service * WIP batch triggering * Allow blocking a run with multiple waitpoints at once. Made it atomic * Removed unused param * New batch service * Pass through the parentRunId and resumeParentOnCompletion * Use the new batch service, and correct trigger task version * Force V1 engine if using BatchTriggerV2Service, we’ve already done the check at this point * Removed the $transaction and early exit if nothing changed * Adedd a simple batch task to the hello world reference catalog * Fix for batch waits not working * Added parentRunId in a couple more places * Removed waitForBatch log * Added another parentRunId * Expanded the example to include all the different triggers --- apps/webapp/app/routes/api.v1.tasks.batch.ts | 18 +- apps/webapp/app/services/worker.server.ts | 14 + .../app/v3/services/batchTriggerV2.server.ts | 6 +- .../app/v3/services/batchTriggerV3.server.ts | 914 ++++++++++++++++++ .../app/v3/services/triggerTask.server.ts | 7 +- .../app/v3/services/triggerTaskV2.server.ts | 5 +- .../run-engine/src/engine/index.ts | 59 +- packages/core/src/v3/apps/friendlyId.ts | 1 + .../src/v3/runtime/managedRuntimeManager.ts | 2 - packages/core/src/v3/schemas/api.ts | 11 + packages/trigger-sdk/src/v3/shared.ts | 14 +- references/hello-world/src/trigger/example.ts | 50 +- 12 files changed, 1046 insertions(+), 55 deletions(-) create mode 100644 apps/webapp/app/v3/services/batchTriggerV3.server.ts diff --git a/apps/webapp/app/routes/api.v1.tasks.batch.ts b/apps/webapp/app/routes/api.v1.tasks.batch.ts index 5b1b89d2d9..c7534acee4 100644 --- a/apps/webapp/app/routes/api.v1.tasks.batch.ts +++ b/apps/webapp/app/routes/api.v1.tasks.batch.ts @@ -1,23 +1,23 @@ import { json } from "@remix-run/server-runtime"; import { - BatchTriggerTaskResponse, BatchTriggerTaskV2RequestBody, BatchTriggerTaskV2Response, generateJWT, } from "@trigger.dev/core/v3"; import { env } from "~/env.server"; +import { AuthenticatedEnvironment, getOneTimeUseToken } from "~/services/apiAuth.server"; +import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { HeadersSchema } from "./api.v1.tasks.$taskId.trigger"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; +import { determineEngineVersion } from "~/v3/engineVersion.server"; +import { ServiceValidationError } from "~/v3/services/baseService.server"; import { BatchProcessingStrategy, BatchTriggerV2Service, } from "~/v3/services/batchTriggerV2.server"; -import { ServiceValidationError } from "~/v3/services/baseService.server"; +import { BatchTriggerV3Service } from "~/v3/services/batchTriggerV3.server"; import { OutOfEntitlementError } from "~/v3/services/triggerTask.server"; -import { AuthenticatedEnvironment, getOneTimeUseToken } from "~/services/apiAuth.server"; -import { logger } from "~/services/logger.server"; -import { z } from "zod"; +import { HeadersSchema } from "./api.v1.tasks.$taskId.trigger"; const { action, loader } = createActionApiRoute( { @@ -87,7 +87,11 @@ const { action, loader } = createActionApiRoute( resolveIdempotencyKeyTTL(idempotencyKeyTTL) ?? new Date(Date.now() + 24 * 60 * 60 * 1000 * 30); - const service = new BatchTriggerV2Service(batchProcessingStrategy ?? undefined); + const version = await determineEngineVersion({ environment: authentication.environment }); + const service = + version === "V1" + ? new BatchTriggerV2Service(batchProcessingStrategy ?? undefined) + : new BatchTriggerV3Service(batchProcessingStrategy ?? undefined); try { const batch = await service.call(authentication.environment, body, { diff --git a/apps/webapp/app/services/worker.server.ts b/apps/webapp/app/services/worker.server.ts index c2409cf8c5..3eea668a6e 100644 --- a/apps/webapp/app/services/worker.server.ts +++ b/apps/webapp/app/services/worker.server.ts @@ -56,6 +56,10 @@ import { } from "~/v3/services/cancelDevSessionRuns.server"; import { logger } from "./logger.server"; import { BatchProcessingOptions, BatchTriggerV2Service } from "~/v3/services/batchTriggerV2.server"; +import { + BatchProcessingOptions as BatchProcessingOptionsV3, + BatchTriggerV3Service, +} from "~/v3/services/batchTriggerV3.server"; const workerCatalog = { indexEndpoint: z.object({ @@ -199,6 +203,7 @@ const workerCatalog = { }), "v3.cancelDevSessionRuns": CancelDevSessionRunsServiceOptions, "v3.processBatchTaskRun": BatchProcessingOptions, + "v3.processBatchTaskRunV3": BatchProcessingOptionsV3, }; const executionWorkerCatalog = { @@ -735,6 +740,15 @@ function getWorkerQueue() { handler: async (payload, job) => { const service = new BatchTriggerV2Service(payload.strategy); + await service.processBatchTaskRun(payload); + }, + }, + "v3.processBatchTaskRunV3": { + priority: 0, + maxAttempts: 5, + handler: async (payload, job) => { + const service = new BatchTriggerV3Service(payload.strategy); + await service.processBatchTaskRun(payload); }, }, diff --git a/apps/webapp/app/v3/services/batchTriggerV2.server.ts b/apps/webapp/app/v3/services/batchTriggerV2.server.ts index ff39679fc5..8c7bbf411f 100644 --- a/apps/webapp/app/v3/services/batchTriggerV2.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerV2.server.ts @@ -49,6 +49,9 @@ export type BatchTriggerTaskServiceOptions = { oneTimeUseToken?: string; }; +/** + * Larger batches, used in Run Engine v1 + */ export class BatchTriggerV2Service extends BaseService { private _batchProcessingStrategy: BatchProcessingStrategy; @@ -787,7 +790,8 @@ export class BatchTriggerV2Service extends BaseService { batchId: batch.friendlyId, skipChecks: true, runFriendlyId: task.runFriendlyId, - } + }, + "V1" ); if (!run) { diff --git a/apps/webapp/app/v3/services/batchTriggerV3.server.ts b/apps/webapp/app/v3/services/batchTriggerV3.server.ts new file mode 100644 index 0000000000..66c259f83a --- /dev/null +++ b/apps/webapp/app/v3/services/batchTriggerV3.server.ts @@ -0,0 +1,914 @@ +import { + BatchTriggerTaskV2RequestBody, + BatchTriggerTaskV2Response, + IOPacket, + packetRequiresOffloading, + parsePacket, +} from "@trigger.dev/core/v3"; +import { BatchId, RunId } from "@trigger.dev/core/v3/apps"; +import { BatchTaskRun, Prisma } from "@trigger.dev/database"; +import { z } from "zod"; +import { $transaction, prisma, PrismaClientOrTransaction } from "~/db.server"; +import { env } from "~/env.server"; +import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; +import { logger } from "~/services/logger.server"; +import { getEntitlement } from "~/services/platform.v3.server"; +import { workerQueue } from "~/services/worker.server"; +import { downloadPacketFromObjectStore, uploadPacketToObjectStore } from "../r2.server"; +import { startActiveSpan } from "../tracer.server"; +import { ServiceValidationError, WithRunEngine } from "./baseService.server"; +import { OutOfEntitlementError, TriggerTaskService } from "./triggerTask.server"; +import { guardQueueSizeLimitsForEnv } from "./triggerTaskV2.server"; + +const PROCESSING_BATCH_SIZE = 50; +const ASYNC_BATCH_PROCESS_SIZE_THRESHOLD = 20; +const MAX_ATTEMPTS = 10; + +export const BatchProcessingStrategy = z.enum(["sequential", "parallel"]); +export type BatchProcessingStrategy = z.infer; + +export const BatchProcessingOptions = z.object({ + batchId: z.string(), + processingId: z.string(), + range: z.object({ start: z.number().int(), count: z.number().int() }), + attemptCount: z.number().int(), + strategy: BatchProcessingStrategy, + parentRunId: z.string().optional(), + resumeParentOnCompletion: z.boolean().optional(), +}); + +export type BatchProcessingOptions = z.infer; + +export type BatchTriggerTaskServiceOptions = { + idempotencyKey?: string; + idempotencyKeyExpiresAt?: Date; + triggerVersion?: string; + traceContext?: Record; + spanParentAsLink?: boolean; + oneTimeUseToken?: string; +}; + +/** + * Larger batches, used in Run Engine v2 + */ +export class BatchTriggerV3Service extends WithRunEngine { + private _batchProcessingStrategy: BatchProcessingStrategy; + + constructor( + batchProcessingStrategy?: BatchProcessingStrategy, + protected readonly _prisma: PrismaClientOrTransaction = prisma + ) { + super({ prisma }); + + this._batchProcessingStrategy = batchProcessingStrategy ?? "parallel"; + } + + public async call( + environment: AuthenticatedEnvironment, + body: BatchTriggerTaskV2RequestBody, + options: BatchTriggerTaskServiceOptions = {} + ): Promise { + try { + return await this.traceWithEnv( + "call()", + environment, + async (span) => { + const existingBatch = options.idempotencyKey + ? await this._prisma.batchTaskRun.findUnique({ + where: { + runtimeEnvironmentId_idempotencyKey: { + runtimeEnvironmentId: environment.id, + idempotencyKey: options.idempotencyKey, + }, + }, + }) + : undefined; + + if (existingBatch) { + if ( + existingBatch.idempotencyKeyExpiresAt && + existingBatch.idempotencyKeyExpiresAt < new Date() + ) { + logger.debug("[BatchTriggerV3][call] Idempotency key has expired", { + idempotencyKey: options.idempotencyKey, + batch: { + id: existingBatch.id, + friendlyId: existingBatch.friendlyId, + runCount: existingBatch.runCount, + idempotencyKeyExpiresAt: existingBatch.idempotencyKeyExpiresAt, + idempotencyKey: existingBatch.idempotencyKey, + }, + }); + + // Update the existing batch to remove the idempotency key + await this._prisma.batchTaskRun.update({ + where: { id: existingBatch.id }, + data: { idempotencyKey: null }, + }); + + // Don't return, just continue with the batch trigger + } else { + span.setAttribute("batchId", existingBatch.friendlyId); + + return this.#respondWithExistingBatch( + existingBatch, + environment, + body.resumeParentOnCompletion ? body.parentRunId : undefined + ); + } + } + + const { id, friendlyId } = BatchId.generate(); + + span.setAttribute("batchId", friendlyId); + + if (environment.type !== "DEVELOPMENT") { + const result = await getEntitlement(environment.organizationId); + if (result && result.hasAccess === false) { + throw new OutOfEntitlementError(); + } + } + + const idempotencyKeys = body.items.map((i) => i.options?.idempotencyKey).filter(Boolean); + + const cachedRuns = + idempotencyKeys.length > 0 + ? await this._prisma.taskRun.findMany({ + where: { + runtimeEnvironmentId: environment.id, + idempotencyKey: { + in: body.items.map((i) => i.options?.idempotencyKey).filter(Boolean), + }, + }, + select: { + friendlyId: true, + idempotencyKey: true, + idempotencyKeyExpiresAt: true, + }, + }) + : []; + + if (cachedRuns.length) { + logger.debug("[BatchTriggerV3][call] Found cached runs", { + cachedRuns, + batchId: friendlyId, + }); + } + + // Now we need to create an array of all the run IDs, in order + // If we have a cached run, that isn't expired, we should use that run ID + // If we have a cached run, that is expired, we should generate a new run ID and save that cached run ID to a set of expired run IDs + // If we don't have a cached run, we should generate a new run ID + const expiredRunIds = new Set(); + let cachedRunCount = 0; + + const runs = body.items.map((item) => { + const cachedRun = cachedRuns.find( + (r) => r.idempotencyKey === item.options?.idempotencyKey + ); + + const runId = RunId.generate(); + + if (cachedRun) { + if ( + cachedRun.idempotencyKeyExpiresAt && + cachedRun.idempotencyKeyExpiresAt < new Date() + ) { + expiredRunIds.add(cachedRun.friendlyId); + + return { + id: runId.friendlyId, + isCached: false, + idempotencyKey: item.options?.idempotencyKey ?? undefined, + taskIdentifier: item.task, + }; + } + + cachedRunCount++; + + return { + id: cachedRun.friendlyId, + isCached: true, + idempotencyKey: item.options?.idempotencyKey ?? undefined, + taskIdentifier: item.task, + }; + } + + return { + id: runId.friendlyId, + isCached: false, + idempotencyKey: item.options?.idempotencyKey ?? undefined, + taskIdentifier: item.task, + }; + }); + + //block the parent with any existing children + if (body.resumeParentOnCompletion && body.parentRunId) { + const existingChildFriendlyIds = runs.flatMap((r) => (r.isCached ? [r.id] : [])); + + if (existingChildFriendlyIds.length > 0) { + await this.#blockParentRun({ + parentRunId: body.parentRunId, + childFriendlyIds: existingChildFriendlyIds, + environment, + }); + } + } + + // Calculate how many new runs we need to create + const newRunCount = body.items.length - cachedRunCount; + + if (newRunCount === 0) { + logger.debug("[BatchTriggerV3][call] All runs are cached", { + batchId: friendlyId, + }); + + await this._prisma.batchTaskRun.create({ + data: { + friendlyId, + runtimeEnvironmentId: environment.id, + idempotencyKey: options.idempotencyKey, + idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, + runCount: body.items.length, + runIds: runs.map((r) => r.id), + //todo is this correct? Surely some of the runs could still be in progress? + status: "COMPLETED", + batchVersion: "v2", + oneTimeUseToken: options.oneTimeUseToken, + }, + }); + + return { + id: friendlyId, + isCached: false, + idempotencyKey: options.idempotencyKey ?? undefined, + runs, + }; + } + + const queueSizeGuard = await guardQueueSizeLimitsForEnv( + this._engine, + environment, + newRunCount + ); + + logger.debug("Queue size guard result", { + newRunCount, + queueSizeGuard, + environment: { + id: environment.id, + type: environment.type, + organization: environment.organization, + project: environment.project, + }, + }); + + if (!queueSizeGuard.isWithinLimits) { + throw new ServiceValidationError( + `Cannot trigger ${newRunCount} tasks as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` + ); + } + + // Expire the cached runs that are no longer valid + if (expiredRunIds.size) { + logger.debug("Expiring cached runs", { + expiredRunIds: Array.from(expiredRunIds), + batchId: friendlyId, + }); + + // TODO: is there a limit to the number of items we can update in a single query? + await this._prisma.taskRun.updateMany({ + where: { friendlyId: { in: Array.from(expiredRunIds) } }, + data: { idempotencyKey: null }, + }); + } + + // Upload to object store + const payloadPacket = await this.#handlePayloadPacket( + body.items, + `batch/${friendlyId}`, + environment + ); + + const batch = await this.#createAndProcessBatchTaskRun( + friendlyId, + runs, + payloadPacket, + newRunCount, + environment, + body, + options + ); + + if (!batch) { + throw new Error("Failed to create batch"); + } + + return { + id: batch.friendlyId, + isCached: false, + idempotencyKey: batch.idempotencyKey ?? undefined, + runs, + }; + } + ); + } catch (error) { + // Detect a prisma transaction Unique constraint violation + if (error instanceof Prisma.PrismaClientKnownRequestError) { + logger.debug("BatchTriggerV3: Prisma transaction error", { + code: error.code, + message: error.message, + meta: error.meta, + }); + + if (error.code === "P2002") { + const target = error.meta?.target; + + if ( + Array.isArray(target) && + target.length > 0 && + typeof target[0] === "string" && + target[0].includes("oneTimeUseToken") + ) { + throw new ServiceValidationError( + "Cannot batch trigger with a one-time use token as it has already been used." + ); + } else { + throw new ServiceValidationError( + "Cannot batch trigger as it has already been triggered with the same idempotency key." + ); + } + } + } + + throw error; + } + } + + async #createAndProcessBatchTaskRun( + batchId: string, + runs: Array<{ + id: string; + isCached: boolean; + idempotencyKey: string | undefined; + taskIdentifier: string; + }>, + payloadPacket: IOPacket, + newRunCount: number, + environment: AuthenticatedEnvironment, + body: BatchTriggerTaskV2RequestBody, + options: BatchTriggerTaskServiceOptions = {} + ) { + if (newRunCount <= ASYNC_BATCH_PROCESS_SIZE_THRESHOLD) { + const batch = await this._prisma.batchTaskRun.create({ + data: { + friendlyId: batchId, + runtimeEnvironmentId: environment.id, + idempotencyKey: options.idempotencyKey, + idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, + runCount: newRunCount, + runIds: runs.map((r) => r.id), + payload: payloadPacket.data, + payloadType: payloadPacket.dataType, + options, + batchVersion: "v2", + oneTimeUseToken: options.oneTimeUseToken, + }, + }); + + const result = await this.#processBatchTaskRunItems({ + batch, + environment, + currentIndex: 0, + batchSize: PROCESSING_BATCH_SIZE, + items: body.items, + options, + parentRunId: body.parentRunId, + resumeParentOnCompletion: body.resumeParentOnCompletion, + }); + + switch (result.status) { + case "COMPLETE": { + logger.debug("[BatchTriggerV3][call] Batch inline processing complete", { + batchId: batch.friendlyId, + currentIndex: 0, + }); + + return batch; + } + case "INCOMPLETE": { + logger.debug("[BatchTriggerV3][call] Batch inline processing incomplete", { + batchId: batch.friendlyId, + currentIndex: result.workingIndex, + }); + + // If processing inline does not finish for some reason, enqueue processing the rest of the batch + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: "0", + range: { + start: result.workingIndex, + count: PROCESSING_BATCH_SIZE, + }, + attemptCount: 0, + strategy: "sequential", + parentRunId: body.parentRunId, + resumeParentOnCompletion: body.resumeParentOnCompletion, + }); + + return batch; + } + case "ERROR": { + logger.error("[BatchTriggerV3][call] Batch inline processing error", { + batchId: batch.friendlyId, + currentIndex: result.workingIndex, + error: result.error, + }); + + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: "0", + range: { + start: result.workingIndex, + count: PROCESSING_BATCH_SIZE, + }, + attemptCount: 0, + strategy: "sequential", + parentRunId: body.parentRunId, + resumeParentOnCompletion: body.resumeParentOnCompletion, + }); + + return batch; + } + } + } else { + return await $transaction(this._prisma, async (tx) => { + const batch = await tx.batchTaskRun.create({ + data: { + friendlyId: batchId, + runtimeEnvironmentId: environment.id, + idempotencyKey: options.idempotencyKey, + idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, + runCount: body.items.length, + runIds: runs.map((r) => r.id), + payload: payloadPacket.data, + payloadType: payloadPacket.dataType, + options, + batchVersion: "v2", + oneTimeUseToken: options.oneTimeUseToken, + }, + }); + + switch (this._batchProcessingStrategy) { + case "sequential": { + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: batchId, + range: { start: 0, count: PROCESSING_BATCH_SIZE }, + attemptCount: 0, + strategy: this._batchProcessingStrategy, + parentRunId: body.parentRunId, + resumeParentOnCompletion: body.resumeParentOnCompletion, + }); + + break; + } + case "parallel": { + const ranges = Array.from({ + length: Math.ceil(newRunCount / PROCESSING_BATCH_SIZE), + }).map((_, index) => ({ + start: index * PROCESSING_BATCH_SIZE, + count: PROCESSING_BATCH_SIZE, + })); + + await Promise.all( + ranges.map((range, index) => + this.#enqueueBatchTaskRun( + { + batchId: batch.id, + processingId: `${index}`, + range, + attemptCount: 0, + strategy: this._batchProcessingStrategy, + parentRunId: body.parentRunId, + resumeParentOnCompletion: body.resumeParentOnCompletion, + }, + tx + ) + ) + ); + + break; + } + } + + return batch; + }); + } + } + + async #respondWithExistingBatch( + batch: BatchTaskRun, + environment: AuthenticatedEnvironment, + blockParentRunId: string | undefined + ): Promise { + // Resolve the payload + const payloadPacket = await downloadPacketFromObjectStore( + { + data: batch.payload ?? undefined, + dataType: batch.payloadType, + }, + environment + ); + + const payload = await parsePacket(payloadPacket).then( + (p) => p as BatchTriggerTaskV2RequestBody["items"] + ); + + const runs = batch.runIds.map((id, index) => { + const item = payload[index]; + + return { + id, + taskIdentifier: item.task, + isCached: true, + idempotencyKey: item.options?.idempotencyKey ?? undefined, + }; + }); + + //block the parent with all of the children + if (blockParentRunId) { + await this.#blockParentRun({ + parentRunId: blockParentRunId, + childFriendlyIds: batch.runIds, + environment, + }); + } + + return { + id: batch.friendlyId, + idempotencyKey: batch.idempotencyKey ?? undefined, + isCached: true, + runs, + }; + } + + async processBatchTaskRun(options: BatchProcessingOptions) { + logger.debug("[BatchTriggerV3][processBatchTaskRun] Processing batch", { + options, + }); + + const $attemptCount = options.attemptCount + 1; + + // Add early return if max attempts reached + if ($attemptCount > MAX_ATTEMPTS) { + logger.error("[BatchTriggerV3][processBatchTaskRun] Max attempts reached", { + options, + attemptCount: $attemptCount, + }); + // You might want to update the batch status to failed here + return; + } + + const batch = await this._prisma.batchTaskRun.findFirst({ + where: { id: options.batchId }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + }, + }, + }, + }); + + if (!batch) { + return; + } + + // Check to make sure the currentIndex is not greater than the runCount + if (options.range.start >= batch.runCount) { + logger.debug("[BatchTriggerV3][processBatchTaskRun] currentIndex is greater than runCount", { + options, + batchId: batch.friendlyId, + runCount: batch.runCount, + attemptCount: $attemptCount, + }); + + return; + } + + // Resolve the payload + const payloadPacket = await downloadPacketFromObjectStore( + { + data: batch.payload ?? undefined, + dataType: batch.payloadType, + }, + batch.runtimeEnvironment + ); + + const payload = await parsePacket(payloadPacket); + + if (!payload) { + logger.debug("[BatchTriggerV3][processBatchTaskRun] Failed to parse payload", { + options, + batchId: batch.friendlyId, + attemptCount: $attemptCount, + }); + + throw new Error("Failed to parse payload"); + } + + // Skip zod parsing + const $payload = payload as BatchTriggerTaskV2RequestBody["items"]; + const $options = batch.options as BatchTriggerTaskServiceOptions; + + const result = await this.#processBatchTaskRunItems({ + batch, + environment: batch.runtimeEnvironment, + currentIndex: options.range.start, + batchSize: options.range.count, + items: $payload, + options: $options, + }); + + switch (result.status) { + case "COMPLETE": { + logger.debug("[BatchTriggerV3][processBatchTaskRun] Batch processing complete", { + options, + batchId: batch.friendlyId, + attemptCount: $attemptCount, + }); + + return; + } + case "INCOMPLETE": { + logger.debug("[BatchTriggerV3][processBatchTaskRun] Batch processing incomplete", { + batchId: batch.friendlyId, + currentIndex: result.workingIndex, + attemptCount: $attemptCount, + }); + + // Only enqueue the next batch task run if the strategy is sequential + // if the strategy is parallel, we will already have enqueued the next batch task run + if (options.strategy === "sequential") { + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: options.processingId, + range: { + start: result.workingIndex, + count: options.range.count, + }, + attemptCount: 0, + strategy: options.strategy, + parentRunId: options.parentRunId, + resumeParentOnCompletion: options.resumeParentOnCompletion, + }); + } + + return; + } + case "ERROR": { + logger.error("[BatchTriggerV3][processBatchTaskRun] Batch processing error", { + batchId: batch.friendlyId, + currentIndex: result.workingIndex, + error: result.error, + attemptCount: $attemptCount, + }); + + // if the strategy is sequential, we will requeue processing with a count of the PROCESSING_BATCH_SIZE + // if the strategy is parallel, we will requeue processing with a range starting at the workingIndex and a count that is the remainder of this "slice" of the batch + if (options.strategy === "sequential") { + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: options.processingId, + range: { + start: result.workingIndex, + count: options.range.count, // This will be the same as the original count + }, + attemptCount: $attemptCount, + strategy: options.strategy, + parentRunId: options.parentRunId, + resumeParentOnCompletion: options.resumeParentOnCompletion, + }); + } else { + await this.#enqueueBatchTaskRun({ + batchId: batch.id, + processingId: options.processingId, + range: { + start: result.workingIndex, + // This will be the remainder of the slice + // for example if the original range was 0-50 and the workingIndex is 25, the new range will be 25-25 + // if the original range was 51-100 and the workingIndex is 75, the new range will be 75-25 + count: options.range.count - result.workingIndex - options.range.start, + }, + attemptCount: $attemptCount, + strategy: options.strategy, + parentRunId: options.parentRunId, + resumeParentOnCompletion: options.resumeParentOnCompletion, + }); + } + + return; + } + } + } + + async #processBatchTaskRunItems({ + batch, + environment, + currentIndex, + batchSize, + items, + options, + parentRunId, + resumeParentOnCompletion, + }: { + batch: BatchTaskRun; + environment: AuthenticatedEnvironment; + currentIndex: number; + batchSize: number; + items: BatchTriggerTaskV2RequestBody["items"]; + options?: BatchTriggerTaskServiceOptions; + parentRunId?: string | undefined; + resumeParentOnCompletion?: boolean | undefined; + }): Promise< + | { status: "COMPLETE" } + | { status: "INCOMPLETE"; workingIndex: number } + | { status: "ERROR"; error: string; workingIndex: number } + > { + // Grab the next PROCESSING_BATCH_SIZE runIds + const runFriendlyIds = batch.runIds.slice(currentIndex, currentIndex + batchSize); + + logger.debug("[BatchTriggerV3][processBatchTaskRun] Processing batch items", { + batchId: batch.friendlyId, + currentIndex, + runIds: runFriendlyIds, + runCount: batch.runCount, + }); + + // Combine the "window" between currentIndex and currentIndex + PROCESSING_BATCH_SIZE with the runId and the item in the payload which is an array + const itemsToProcess = runFriendlyIds.map((runFriendlyId, index) => ({ + runFriendlyId, + item: items[index + currentIndex], + })); + + let workingIndex = currentIndex; + + for (const item of itemsToProcess) { + try { + await this.#processBatchTaskRunItem({ + batch, + environment, + task: item, + currentIndex: workingIndex, + options, + parentRunId, + resumeParentOnCompletion, + }); + + workingIndex++; + } catch (error) { + logger.error("[BatchTriggerV3][processBatchTaskRun] Failed to process item", { + batchId: batch.friendlyId, + currentIndex: workingIndex, + error, + }); + + return { + status: "ERROR", + error: error instanceof Error ? error.message : String(error), + workingIndex, + }; + } + } + + // if there are more items to process, requeue the batch + if (workingIndex < batch.runCount) { + return { status: "INCOMPLETE", workingIndex }; + } + + return { status: "COMPLETE" }; + } + + async #processBatchTaskRunItem({ + batch, + environment, + task, + currentIndex, + options, + parentRunId, + resumeParentOnCompletion, + }: { + batch: BatchTaskRun; + environment: AuthenticatedEnvironment; + task: { runFriendlyId: string; item: BatchTriggerTaskV2RequestBody["items"][number] }; + currentIndex: number; + options?: BatchTriggerTaskServiceOptions; + parentRunId: string | undefined; + resumeParentOnCompletion: boolean | undefined; + }) { + logger.debug("[BatchTriggerV3][processBatchTaskRunItem] Processing item", { + batchId: batch.friendlyId, + runId: task.runFriendlyId, + currentIndex, + }); + + const triggerTaskService = new TriggerTaskService(); + + await triggerTaskService.call( + task.item.task, + environment, + { + ...task.item, + options: { + ...task.item.options, + parentRunId, + resumeParentOnCompletion, + }, + }, + { + triggerVersion: options?.triggerVersion, + traceContext: options?.traceContext, + spanParentAsLink: options?.spanParentAsLink, + batchId: batch.friendlyId, + skipChecks: true, + runFriendlyId: task.runFriendlyId, + }, + "V2" + ); + } + + async #enqueueBatchTaskRun(options: BatchProcessingOptions, tx?: PrismaClientOrTransaction) { + await workerQueue.enqueue("v3.processBatchTaskRunV3", options, { + tx, + jobKey: `BatchTriggerV3Service.process:${options.batchId}:${options.processingId}`, + }); + } + + async #handlePayloadPacket( + payload: any, + pathPrefix: string, + environment: AuthenticatedEnvironment + ) { + return await startActiveSpan("handlePayloadPacket()", async (span) => { + const packet = { data: JSON.stringify(payload), dataType: "application/json" }; + + if (!packet.data) { + return packet; + } + + const { needsOffloading } = packetRequiresOffloading( + packet, + env.TASK_PAYLOAD_OFFLOAD_THRESHOLD + ); + + if (!needsOffloading) { + return packet; + } + + const filename = `${pathPrefix}/payload.json`; + + await uploadPacketToObjectStore(filename, packet.data, packet.dataType, environment); + + return { + data: filename, + dataType: "application/store", + }; + }); + } + + async #blockParentRun({ + parentRunId, + childFriendlyIds, + environment, + }: { + parentRunId: string; + childFriendlyIds: string[]; + environment: AuthenticatedEnvironment; + }) { + const runsWithAssociatedWaitpoints = await this._prisma.taskRun.findMany({ + where: { + id: { + in: childFriendlyIds.map((r) => RunId.fromFriendlyId(r)), + }, + }, + select: { + associatedWaitpoint: { + select: { + id: true, + }, + }, + }, + }); + + await this._engine.blockRunWithWaitpoint({ + runId: RunId.fromFriendlyId(parentRunId), + waitpointId: runsWithAssociatedWaitpoints.flatMap((r) => + r.associatedWaitpoint ? [r.associatedWaitpoint.id] : [] + ), + environmentId: environment.id, + projectId: environment.projectId, + }); + } +} diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 5c7f1ad753..55ed259b8d 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -31,14 +31,15 @@ export class TriggerTaskService extends WithRunEngine { taskId: string, environment: AuthenticatedEnvironment, body: TriggerTaskRequestBody, - options: TriggerTaskServiceOptions = {} + options: TriggerTaskServiceOptions = {}, + version?: RunEngineVersion ) { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); - const version = await determineEngineVersion({ environment }); + const v = await determineEngineVersion({ environment, version }); - switch (version) { + switch (v) { case "V1": { return await this.callV1(taskId, environment, body, options); } diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 781c45cec7..cc00491933 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -99,7 +99,6 @@ export class TriggerTaskServiceV2 extends WithRunEngine { waitpointId: existingRun.associatedWaitpoint.id, environmentId: environment.id, projectId: environment.projectId, - checkWaitpointIsPending: true, tx: this._prisma, }); } @@ -164,7 +163,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { //todo we will pass in the `parentRun` and `resumeParentOnCompletion` const parentRun = body.options?.parentRunId ? await this._prisma.taskRun.findFirst({ - where: { friendlyId: body.options.parentRunId }, + where: { id: RunId.fromFriendlyId(body.options.parentRunId) }, }) : undefined; @@ -299,7 +298,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { tags, oneTimeUseToken: options.oneTimeUseToken, parentTaskRunId: parentRun?.id, - rootTaskRunId: parentRun?.rootTaskRunId ?? undefined, + rootTaskRunId: parentRun?.rootTaskRunId ?? parentRun?.id, batchId: body.options?.parentBatch ?? undefined, resumeParentOnCompletion: body.options?.resumeParentOnCompletion, depth, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index d4358b7786..f54fe3435f 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1577,20 +1577,19 @@ export class RunEngine { waitpointId, projectId, failAfter, - checkWaitpointIsPending = false, tx, }: { runId: string; - waitpointId: string; + waitpointId: string | string[]; environmentId: string; projectId: string; failAfter?: Date; - /** If the waitpoint could be completed, i.e. not inside a run lock and not new */ - checkWaitpointIsPending?: boolean; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; + let waitpointIds = typeof waitpointId === "string" ? [waitpointId] : waitpointId; + return await this.runLock.lock([runId], 5000, async (signal) => { let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); @@ -1602,29 +1601,25 @@ export class RunEngine { newStatus = "EXECUTING_WITH_WAITPOINTS"; } - if (checkWaitpointIsPending) { - const waitpoint = await prisma.waitpoint.findUnique({ - where: { id: waitpointId }, - }); - - if (!waitpoint) { - throw new ServiceValidationError("Waitpoint not found", 404); - } - - //the waitpoint has been completed since it was retrieved - if (waitpoint.status !== "PENDING") { - return snapshot; - } + const insertedBlockers = await prisma.$executeRaw` + INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt") + SELECT + gen_random_uuid(), + ${runId}, + w.id, + ${projectId}, + NOW(), + NOW() + FROM "Waitpoint" w + WHERE w.id IN (${Prisma.join(waitpointIds)}) + AND w.status = 'PENDING' + ON CONFLICT DO NOTHING;`; + + //if no runs were blocked we don't want to do anything more + if (insertedBlockers === 0) { + return snapshot; } - const taskWaitpoint = await prisma.taskRunWaitpoint.create({ - data: { - taskRunId: runId, - waitpointId: waitpointId, - projectId: projectId, - }, - }); - //if the state has changed, create a new snapshot if (newStatus !== snapshot.executionStatus) { snapshot = await this.#createExecutionSnapshot(prisma, { @@ -1641,12 +1636,14 @@ export class RunEngine { } if (failAfter) { - await this.worker.enqueue({ - id: `finishWaitpoint.${waitpointId}`, - job: "finishWaitpoint", - payload: { waitpointId, error: "Waitpoint timed out" }, - availableAt: failAfter, - }); + for (const waitpointId of waitpointIds) { + await this.worker.enqueue({ + id: `finishWaitpoint.${waitpointId}`, + job: "finishWaitpoint", + payload: { waitpointId, error: "Waitpoint timed out" }, + availableAt: failAfter, + }); + } } return snapshot; diff --git a/packages/core/src/v3/apps/friendlyId.ts b/packages/core/src/v3/apps/friendlyId.ts index f982ec280d..c95813eb48 100644 --- a/packages/core/src/v3/apps/friendlyId.ts +++ b/packages/core/src/v3/apps/friendlyId.ts @@ -80,3 +80,4 @@ export const QueueId = new IdUtil("queue"); export const RunId = new IdUtil("run"); export const SnapshotId = new IdUtil("snapshot"); export const WaitpointId = new IdUtil("waitpoint"); +export const BatchId = new IdUtil("batch"); diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index e29e06b5c9..90eddcd5e7 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -68,8 +68,6 @@ export class ManagedRuntimeManager implements RuntimeManager { runs: string[]; ctx: TaskRunContext; }): Promise { - console.log("waitForBatch", params); - if (!params.runs.length) { return Promise.resolve({ id: params.id, items: [] }); } diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 2c52f740a0..609f7c8e15 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -154,7 +154,18 @@ export type BatchTriggerTaskItem = z.infer; export const BatchTriggerTaskV2RequestBody = z.object({ items: BatchTriggerTaskItem.array(), + /** @deprecated engine v1 only */ dependentAttempt: z.string().optional(), + /** + * RunEngine v2 + * If triggered inside another run, the parentRunId is the friendly ID of the parent run. + */ + parentRunId: z.string().optional(), + /** + * RunEngine v2 + * Should be `true` if `triggerAndWait` or `batchTriggerAndWait` + */ + resumeParentOnCompletion: z.boolean().optional(), }); export type BatchTriggerTaskV2RequestBody = z.infer; diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index 9bfca56ce4..86e0175662 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -617,6 +617,7 @@ export async function batchTriggerById( }; }) ), + parentRunId: taskContext.ctx?.run.id, }, { spanParentAsLink: true, @@ -791,6 +792,8 @@ export async function batchTriggerByIdAndWait( }) ), dependentAttempt: ctx.attempt.id, + parentRunId: ctx.run.id, + resumeParentOnCompletion: true, }, { processingStrategy: options?.triggerSequentially ? "sequential" : undefined, @@ -951,6 +954,7 @@ export async function batchTriggerTasks( }; }) ), + parentRunId: taskContext.ctx?.run.id, }, { spanParentAsLink: true, @@ -1127,6 +1131,8 @@ export async function batchTriggerAndWaitTasks( parentAttempt: taskContext.ctx?.attempt.id, metadata: options?.metadata, maxDuration: options?.maxDuration, + parentRunId: taskContext.ctx?.run.id, }, }, { @@ -1234,6 +1241,8 @@ async function batchTrigger_internal( ): Promise> { const apiClient = apiClientManager.clientOrThrow(); + const ctx = taskContext.ctx; + const response = await apiClient.batchTriggerV2( { items: await Promise.all( @@ -1259,6 +1268,7 @@ async function batchTrigger_internal( parentAttempt: taskContext.ctx?.attempt.id, metadata: item.options?.metadata, maxDuration: item.options?.maxDuration, + parentRunId: ctx?.run.id, }, }; }) @@ -1430,13 +1440,13 @@ async function batchTriggerAndWait_internal { + logger.log("Hello, world from the parent", { payload }); + + const results = await childTask.batchTriggerAndWait([ + { payload: { message: "Hello, world!" } }, + { payload: { message: "Hello, world 2!" } }, + ]); + logger.log("Results", { results }); + + const results2 = await batch.triggerAndWait([ + { id: "child", payload: { message: "Hello, world !" } }, + { id: "child", payload: { message: "Hello, world 2!" } }, + ]); + logger.log("Results 2", { results2 }); + + const results3 = await batch.triggerByTask([ + { task: childTask, payload: { message: "Hello, world !" } }, + { task: childTask, payload: { message: "Hello, world 2!" } }, + ]); + logger.log("Results 3", { results3 }); + + const results4 = await batch.triggerByTaskAndWait([ + { task: childTask, payload: { message: "Hello, world !" } }, + { task: childTask, payload: { message: "Hello, world 2!" } }, + ]); + logger.log("Results 4", { results4 }); + }, +}); + export const childTask = task({ id: "child", - run: async (payload: any, { ctx }) => { - logger.info("Hello, world from the child", { payload }); + run: async ( + { message, failureChance = 0.3 }: { message?: string; failureChance?: number }, + { ctx } + ) => { + logger.info("Hello, world from the child", { message, failureChance }); - if (Math.random() > 0.5) { + if (Math.random() < failureChance) { throw new Error("Random error at start"); } - await setTimeout(10000); + await setTimeout(3_000); - if (Math.random() > 0.5) { + if (Math.random() < failureChance) { throw new Error("Random error at end"); } + + return { + message, + }; }, }); From 0642c9ce0512b47294f9ff1eb4557eb72600fff9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 3 Jan 2025 13:28:55 +0000 Subject: [PATCH 321/485] More changes to blocking to support continuing after idempotent completed runs --- .../app/v3/services/triggerTaskV2.server.ts | 2 +- .../run-engine/src/engine/index.ts | 55 +++++++++----- references/hello-world/src/trigger/example.ts | 8 +- .../hello-world/src/trigger/idempotency.ts | 76 +++++++++++++++++++ 4 files changed, 119 insertions(+), 22 deletions(-) create mode 100644 references/hello-world/src/trigger/idempotency.ts diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index cc00491933..ad1dd097d0 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -90,7 +90,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } else { //We're using `andWait` so we need to block the parent run with a waitpoint if ( - existingRun.associatedWaitpoint?.status === "PENDING" && + existingRun.associatedWaitpoint && body.options?.resumeParentOnCompletion && body.options?.parentRunId ) { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index f54fe3435f..9737f4f8a2 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1593,6 +1593,29 @@ export class RunEngine { return await this.runLock.lock([runId], 5000, async (signal) => { let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); + //block the run with the waitpoints, returning how many waitpoints are pending + const insert = await prisma.$queryRaw<{ pending_count: number }[]>` + WITH inserted AS ( + INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt") + SELECT + gen_random_uuid(), + ${runId}, + w.id, + ${projectId}, + NOW(), + NOW() + FROM "Waitpoint" w + WHERE w.id IN (${Prisma.join(waitpointIds)}) + ON CONFLICT DO NOTHING + RETURNING "waitpointId" + ) + SELECT COUNT(*) as pending_count + FROM inserted i + JOIN "Waitpoint" w ON w.id = i."waitpointId" + WHERE w.status = 'PENDING';`; + + const pendingCount = insert.at(0)?.pending_count ?? 0; + let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; if ( snapshot.executionStatus === "EXECUTING" || @@ -1601,25 +1624,6 @@ export class RunEngine { newStatus = "EXECUTING_WITH_WAITPOINTS"; } - const insertedBlockers = await prisma.$executeRaw` - INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt") - SELECT - gen_random_uuid(), - ${runId}, - w.id, - ${projectId}, - NOW(), - NOW() - FROM "Waitpoint" w - WHERE w.id IN (${Prisma.join(waitpointIds)}) - AND w.status = 'PENDING' - ON CONFLICT DO NOTHING;`; - - //if no runs were blocked we don't want to do anything more - if (insertedBlockers === 0) { - return snapshot; - } - //if the state has changed, create a new snapshot if (newStatus !== snapshot.executionStatus) { snapshot = await this.#createExecutionSnapshot(prisma, { @@ -1646,6 +1650,19 @@ export class RunEngine { } } + //no pending waitpoint, schedule unblocking the run + //debounce if we're rapidly adding waitpoints + if (pendingCount === 0) { + await this.worker.enqueue({ + //this will debounce the call + id: `continueRunIfUnblocked:${runId}`, + job: "continueRunIfUnblocked", + payload: { runId: runId }, + //100ms in the future + availableAt: new Date(Date.now() + 100), + }); + } + return snapshot; }); } diff --git a/references/hello-world/src/trigger/example.ts b/references/hello-world/src/trigger/example.ts index 547ac77bc9..18cb388b25 100644 --- a/references/hello-world/src/trigger/example.ts +++ b/references/hello-world/src/trigger/example.ts @@ -60,7 +60,11 @@ export const batchParentTask = task({ export const childTask = task({ id: "child", run: async ( - { message, failureChance = 0.3 }: { message?: string; failureChance?: number }, + { + message, + failureChance = 0.3, + duration = 3_000, + }: { message?: string; failureChance?: number; duration?: number }, { ctx } ) => { logger.info("Hello, world from the child", { message, failureChance }); @@ -69,7 +73,7 @@ export const childTask = task({ throw new Error("Random error at start"); } - await setTimeout(3_000); + await setTimeout(duration); if (Math.random() < failureChance) { throw new Error("Random error at end"); diff --git a/references/hello-world/src/trigger/idempotency.ts b/references/hello-world/src/trigger/idempotency.ts new file mode 100644 index 0000000000..9136399cc4 --- /dev/null +++ b/references/hello-world/src/trigger/idempotency.ts @@ -0,0 +1,76 @@ +import { batch, idempotencyKeys, logger, task, timeout, usage, wait } from "@trigger.dev/sdk/v3"; +import { setTimeout } from "timers/promises"; +import { childTask } from "./example.js"; + +export const idempotency = task({ + id: "idempotency", + run: async (payload: any, { ctx }) => { + logger.log("Hello, world from the parent", { payload }); + + const child1Key = await idempotencyKeys.create("a", { scope: "global" }); + + const child1 = await childTask.triggerAndWait( + { message: "Hello, world!", duration: 10_000 }, + { idempotencyKey: child1Key, idempotencyKeyTTL: "60s" } + ); + logger.log("Child 1", { child1 }); + + ctx.attempt.id; + + const child2 = await childTask.triggerAndWait( + { message: "Hello, world!", duration: 10_000 }, + { idempotencyKey: child1Key, idempotencyKeyTTL: "60s" } + ); + logger.log("Child 2", { child2 }); + + // const results = await childTask.batchTriggerAndWait([ + // { + // payload: { message: "Hello, world!" }, + // //@ts-ignore + // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, + // }, + // { + // payload: { message: "Hello, world 2!" }, + // //@ts-ignore + // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, + // }, + // ]); + // logger.log("Results", { results }); + + // const results2 = await batch.triggerAndWait([ + // { + // id: "child", + // payload: { message: "Hello, world !" }, + // //@ts-ignore + // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, + // }, + // { + // id: "child", + // payload: { message: "Hello, world 2!" }, + // //@ts-ignore + // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, + // }, + // ]); + // logger.log("Results 2", { results2 }); + + // const results3 = await batch.triggerByTask([ + // { + // task: childTask, + // payload: { message: "Hello, world !" }, + // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, + // }, + // { + // task: childTask, + // payload: { message: "Hello, world 2!" }, + // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, + // }, + // ]); + // logger.log("Results 3", { results3 }); + + // const results4 = await batch.triggerByTaskAndWait([ + // { task: childTask, payload: { message: "Hello, world !" } }, + // { task: childTask, payload: { message: "Hello, world 2!" } }, + // ]); + // logger.log("Results 4", { results4 }); + }, +}); From fdf0bf6214418ee4a2b37cb9feb6b3b88db531a7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 3 Jan 2025 16:07:54 +0000 Subject: [PATCH 322/485] Fix for the wrong type when blocking a run --- internal-packages/run-engine/src/engine/index.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 9737f4f8a2..83dc41e65b 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1594,7 +1594,7 @@ export class RunEngine { let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); //block the run with the waitpoints, returning how many waitpoints are pending - const insert = await prisma.$queryRaw<{ pending_count: number }[]>` + const insert = await prisma.$queryRaw<{ pending_count: BigInt }[]>` WITH inserted AS ( INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt") SELECT @@ -1614,7 +1614,7 @@ export class RunEngine { JOIN "Waitpoint" w ON w.id = i."waitpointId" WHERE w.status = 'PENDING';`; - const pendingCount = insert.at(0)?.pending_count ?? 0; + const pendingCount = Number(insert.at(0)?.pending_count ?? 0); let newStatus: TaskRunExecutionStatus = "BLOCKED_BY_WAITPOINTS"; if ( From d6e87439c660d7964e4187f2639633207adcda03 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 2 Jan 2025 15:23:47 +0000 Subject: [PATCH 323/485] remove @map --- internal-packages/database/prisma/schema.prisma | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 5de8bd3728..27ae1650c6 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2149,7 +2149,7 @@ model WorkerInstance { } enum WorkerInstanceGroupType { - MANAGED @map("SHARED") + MANAGED UNMANAGED } From 9d24fc1aec140c748b778be577b0a8c25a9eab77 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 2 Jan 2025 19:01:45 +0000 Subject: [PATCH 324/485] optimise worker auth query --- .../worker/workerGroupTokenService.server.ts | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 66e5752f83..54bef5c7b5 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -52,24 +52,20 @@ export class WorkerGroupTokenService extends WithRunEngine { async findWorkerGroup({ token }: { token: string }) { const tokenHash = await this.hashToken(token); - const workerGroupToken = await this._prisma.workerGroupToken.findFirst({ + const workerGroup = await this._prisma.workerInstanceGroup.findFirst({ where: { - workerGroup: { - isNot: null, + token: { + tokenHash, }, - tokenHash, - }, - include: { - workerGroup: true, }, }); - if (!workerGroupToken) { - logger.warn("[WorkerGroupTokenService] Token not found", { token }); - return; + if (!workerGroup) { + logger.warn("[WorkerGroupTokenService] No matching worker group found", { token }); + return null; } - return workerGroupToken.workerGroup; + return workerGroup; } async rotateToken({ workerGroupId }: { workerGroupId: string }) { From 446a94f73535ae565fcc59f40ed2e7cb5effd579 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 2 Jan 2025 22:28:52 +0000 Subject: [PATCH 325/485] add engine version header to core api client requests --- .../routes/api.v1.tasks.$taskId.trigger.ts | 30 +++++++++++++------ apps/webapp/app/routes/api.v1.tasks.batch.ts | 7 ++++- .../src/entryPoints/deploy-run-controller.ts | 5 +++- .../src/entryPoints/managed-run-controller.ts | 1 + .../entryPoints/unmanaged-run-controller.ts | 1 + packages/core/src/v3/apiClient/index.ts | 1 + packages/core/src/v3/schemas/messages.ts | 2 ++ packages/core/src/v3/schemas/schemas.ts | 3 ++ 8 files changed, 39 insertions(+), 11 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index e6e3398e69..96eec3ba31 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -1,5 +1,9 @@ import { json } from "@remix-run/server-runtime"; -import { generateJWT as internal_generateJWT, TriggerTaskRequestBody } from "@trigger.dev/core/v3"; +import { + generateJWT as internal_generateJWT, + RunEngineVersionSchema, + TriggerTaskRequestBody, +} from "@trigger.dev/core/v3"; import { TaskRun } from "@trigger.dev/database"; import { z } from "zod"; import { env } from "~/env.server"; @@ -21,6 +25,7 @@ export const HeadersSchema = z.object({ "x-trigger-span-parent-as-link": z.coerce.number().nullish(), "x-trigger-worker": z.string().nullish(), "x-trigger-client": z.string().nullish(), + "x-trigger-engine-version": RunEngineVersionSchema.nullish(), traceparent: z.string().optional(), tracestate: z.string().optional(), }); @@ -49,6 +54,7 @@ const { action, loader } = createActionApiRoute( tracestate, "x-trigger-worker": isFromWorker, "x-trigger-client": triggerClient, + "x-trigger-engine-version": engineVersion, } = headers; const service = new TriggerTaskService(); @@ -74,14 +80,20 @@ const { action, loader } = createActionApiRoute( const idempotencyKeyExpiresAt = resolveIdempotencyKeyTTL(idempotencyKeyTTL); - const run = await service.call(params.taskId, authentication.environment, body, { - idempotencyKey: idempotencyKey ?? undefined, - idempotencyKeyExpiresAt: idempotencyKeyExpiresAt, - triggerVersion: triggerVersion ?? undefined, - traceContext, - spanParentAsLink: spanParentAsLink === 1, - oneTimeUseToken, - }); + const run = await service.call( + params.taskId, + authentication.environment, + body, + { + idempotencyKey: idempotencyKey ?? undefined, + idempotencyKeyExpiresAt: idempotencyKeyExpiresAt, + triggerVersion: triggerVersion ?? undefined, + traceContext, + spanParentAsLink: spanParentAsLink === 1, + oneTimeUseToken, + }, + engineVersion ?? undefined + ); if (!run) { return json({ error: "Task not found" }, { status: 404 }); diff --git a/apps/webapp/app/routes/api.v1.tasks.batch.ts b/apps/webapp/app/routes/api.v1.tasks.batch.ts index c7534acee4..591d04f0ce 100644 --- a/apps/webapp/app/routes/api.v1.tasks.batch.ts +++ b/apps/webapp/app/routes/api.v1.tasks.batch.ts @@ -58,6 +58,7 @@ const { action, loader } = createActionApiRoute( "x-trigger-span-parent-as-link": spanParentAsLink, "x-trigger-worker": isFromWorker, "x-trigger-client": triggerClient, + "x-trigger-engine-version": engineVersion, "batch-processing-strategy": batchProcessingStrategy, traceparent, tracestate, @@ -87,7 +88,11 @@ const { action, loader } = createActionApiRoute( resolveIdempotencyKeyTTL(idempotencyKeyTTL) ?? new Date(Date.now() + 24 * 60 * 60 * 1000 * 30); - const version = await determineEngineVersion({ environment: authentication.environment }); + const version = await determineEngineVersion({ + environment: authentication.environment, + version: engineVersion ?? undefined, + }); + const service = version === "V1" ? new BatchTriggerV2Service(batchProcessingStrategy ?? undefined) diff --git a/packages/cli-v3/src/entryPoints/deploy-run-controller.ts b/packages/cli-v3/src/entryPoints/deploy-run-controller.ts index e17a152241..8b082a03f2 100644 --- a/packages/cli-v3/src/entryPoints/deploy-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/deploy-run-controller.ts @@ -879,7 +879,10 @@ class ProdWorker { this._taskRunProcess = new TaskRunProcess({ workerManifest: this.workerManifest, env, - serverWorker: execution.worker, + serverWorker: { + ...execution.worker, + engine: "V1", + }, payload: createAttempt.result.executionPayload, messageId: message.lazyPayload.messageId, }); diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 5ae9f37939..9394b97c15 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -453,6 +453,7 @@ class ManagedRunController { id: "unmanaged", contentHash: env.TRIGGER_CONTENT_HASH, version: env.TRIGGER_DEPLOYMENT_VERSION, + engine: "V2", }, payload: { execution, diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index 20b026ff91..11989e89da 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -65,6 +65,7 @@ class UnmanagedRunController { id: "unmanaged", contentHash: env.TRIGGER_CONTENT_HASH, version: env.TRIGGER_DEPLOYMENT_VERSION, + engine: "V2", }, payload: { execution, diff --git a/packages/core/src/v3/apiClient/index.ts b/packages/core/src/v3/apiClient/index.ts index 2b778a14d8..0b062914ea 100644 --- a/packages/core/src/v3/apiClient/index.ts +++ b/packages/core/src/v3/apiClient/index.ts @@ -692,6 +692,7 @@ export class ApiClient { "Content-Type": "application/json", Authorization: `Bearer ${this.accessToken}`, "trigger-version": VERSION, + "x-trigger-engine-version": taskContext.worker?.engine ?? "V1", ...Object.entries(additionalHeaders ?? {}).reduce( (acc, [key, value]) => { if (value !== undefined) { diff --git a/packages/core/src/v3/schemas/messages.ts b/packages/core/src/v3/schemas/messages.ts index 3a25cf0964..9da770646a 100644 --- a/packages/core/src/v3/schemas/messages.ts +++ b/packages/core/src/v3/schemas/messages.ts @@ -12,6 +12,7 @@ import { EnvironmentType, ProdTaskRunExecution, ProdTaskRunExecutionPayload, + RunEngineVersionSchema, RuntimeWait, TaskRunExecutionLazyAttemptPayload, WaitReason, @@ -103,6 +104,7 @@ export const ServerBackgroundWorker = z.object({ id: z.string(), version: z.string(), contentHash: z.string(), + engine: RunEngineVersionSchema.optional(), }); export type ServerBackgroundWorker = z.infer; diff --git a/packages/core/src/v3/schemas/schemas.ts b/packages/core/src/v3/schemas/schemas.ts index 0256a1f19b..c4494f3dde 100644 --- a/packages/core/src/v3/schemas/schemas.ts +++ b/packages/core/src/v3/schemas/schemas.ts @@ -8,6 +8,8 @@ import { MachineConfig, MachinePreset, TaskRunExecution } from "./common.js"; export const EnvironmentType = z.enum(["PRODUCTION", "STAGING", "DEVELOPMENT", "PREVIEW"]); export type EnvironmentType = z.infer; +export const RunEngineVersionSchema = z.enum(["V1", "V2"]); + export const TaskRunExecutionPayload = z.object({ execution: TaskRunExecution, traceContext: z.record(z.unknown()), @@ -25,6 +27,7 @@ export const ProdTaskRunExecution = TaskRunExecution.extend({ id: z.string(), contentHash: z.string(), version: z.string(), + type: RunEngineVersionSchema.optional(), }), machine: MachinePreset.default({ name: "small-1x", cpu: 1, memory: 1, centsPerMs: 0 }), }); From ca82d753bbf41df2cf7e66d7371ca2a9e6c0edee Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:13:33 +0000 Subject: [PATCH 326/485] remove unique constraint for default group id --- internal-packages/database/prisma/schema.prisma | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 27ae1650c6..4860f6d61b 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -459,7 +459,7 @@ model Project { workers WorkerInstance[] defaultWorkerGroup WorkerInstanceGroup? @relation("ProjectDefaultWorkerGroup", fields: [defaultWorkerGroupId], references: [id]) - defaultWorkerGroupId String? @unique + defaultWorkerGroupId String? environments RuntimeEnvironment[] endpoints Endpoint[] From 3c673501d4335b9341e35d346bafc68e9363083a Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:13:47 +0000 Subject: [PATCH 327/485] consolidate migrations --- .../migration.sql | 9 -- .../migration.sql | 2 - .../migration.sql | 126 ----------------- .../migration.sql | 14 -- .../migration.sql | 84 ----------- .../migration.sql | 15 -- .../migration.sql | 130 +++++++++++++++++- 7 files changed, 124 insertions(+), 256 deletions(-) delete mode 100644 internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql delete mode 100644 internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql delete mode 100644 internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql delete mode 100644 internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql delete mode 100644 internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql delete mode 100644 internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql rename internal-packages/database/prisma/migrations/{20241024144743_run_engine_first_migration => 20250103152909_add_run_engine_v2}/migration.sql (53%) diff --git a/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql b/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql deleted file mode 100644 index 4223408016..0000000000 --- a/internal-packages/database/prisma/migrations/20241025173231_task_run_execution_snapshot_added_is_valid_and_error/migration.sql +++ /dev/null @@ -1,9 +0,0 @@ --- DropIndex -DROP INDEX "TaskRunExecutionSnapshot_runId_createdAt_idx"; - --- AlterTable -ALTER TABLE "TaskRunExecutionSnapshot" ADD COLUMN "error" TEXT, -ADD COLUMN "isValid" BOOLEAN NOT NULL DEFAULT true; - --- CreateIndex -CREATE INDEX "TaskRunExecutionSnapshot_runId_isValid_createdAt_idx" ON "TaskRunExecutionSnapshot"("runId", "isValid", "createdAt" DESC); diff --git a/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql b/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql deleted file mode 100644 index d994f02179..0000000000 --- a/internal-packages/database/prisma/migrations/20241105144506_waitpoint_added_output_is_error_column/migration.sql +++ /dev/null @@ -1,2 +0,0 @@ --- AlterTable -ALTER TABLE "Waitpoint" ADD COLUMN "outputIsError" BOOLEAN NOT NULL DEFAULT false; diff --git a/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql b/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql deleted file mode 100644 index 239d8933e2..0000000000 --- a/internal-packages/database/prisma/migrations/20241111171629_project_worker_group_master_queue_token_id_and_default_worker_group/migration.sql +++ /dev/null @@ -1,126 +0,0 @@ -/* - Warnings: - - - You are about to drop the `Worker` table. If the table is not empty, all the data it contains will be lost. - - A unique constraint covering the columns `[defaultWorkerGroupId]` on the table `Project` will be added. If there are existing duplicate values, this will fail. - - A unique constraint covering the columns `[masterQueue]` on the table `WorkerGroup` will be added. If there are existing duplicate values, this will fail. - - A unique constraint covering the columns `[tokenId]` on the table `WorkerGroup` will be added. If there are existing duplicate values, this will fail. - - Added the required column `name` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. - - Added the required column `tokenId` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. - - Added the required column `type` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. - - Added the required column `updatedAt` to the `WorkerGroup` table without a default value. This is not possible if the table is not empty. - -*/ --- CreateEnum -CREATE TYPE "WorkerInstanceGroupType" AS ENUM ('SHARED', 'UNMANAGED'); - --- AlterTable -ALTER TABLE "BackgroundWorker" ADD COLUMN "workerGroupId" TEXT; - --- AlterTable -ALTER TABLE "Project" ADD COLUMN "defaultWorkerGroupId" TEXT; - --- AlterTable -ALTER TABLE "TaskRunExecutionSnapshot" ADD COLUMN "lastHeartbeatAt" TIMESTAMP(3), -ADD COLUMN "workerId" TEXT; - --- AlterTable -ALTER TABLE "WorkerGroup" ADD COLUMN "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, -ADD COLUMN "description" TEXT, -ADD COLUMN "hidden" BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN "name" TEXT NOT NULL, -ADD COLUMN "organizationId" TEXT, -ADD COLUMN "projectId" TEXT, -ADD COLUMN "tokenId" TEXT NOT NULL, -ADD COLUMN "type" "WorkerInstanceGroupType" NOT NULL, -ADD COLUMN "updatedAt" TIMESTAMP(3) NOT NULL; - --- DropTable -DROP TABLE "Worker"; - --- CreateTable -CREATE TABLE "FeatureFlag" ( - "id" TEXT NOT NULL, - "key" TEXT NOT NULL, - "value" JSONB, - - CONSTRAINT "FeatureFlag_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "WorkerInstance" ( - "id" TEXT NOT NULL, - "name" TEXT NOT NULL, - "workerGroupId" TEXT NOT NULL, - "organizationId" TEXT, - "projectId" TEXT, - "environmentId" TEXT, - "deploymentId" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - "lastDequeueAt" TIMESTAMP(3), - "lastHeartbeatAt" TIMESTAMP(3), - - CONSTRAINT "WorkerInstance_pkey" PRIMARY KEY ("id") -); - --- CreateTable -CREATE TABLE "WorkerGroupToken" ( - "id" TEXT NOT NULL, - "tokenHash" TEXT NOT NULL, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "WorkerGroupToken_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "FeatureFlag_key_key" ON "FeatureFlag"("key"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_name_key" ON "WorkerInstance"("workerGroupId", "name"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerGroupToken_tokenHash_key" ON "WorkerGroupToken"("tokenHash"); - --- CreateIndex -CREATE UNIQUE INDEX "Project_defaultWorkerGroupId_key" ON "Project"("defaultWorkerGroupId"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerGroup_masterQueue_key" ON "WorkerGroup"("masterQueue"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerGroup_tokenId_key" ON "WorkerGroup"("tokenId"); - --- AddForeignKey -ALTER TABLE "Project" ADD CONSTRAINT "Project_defaultWorkerGroupId_fkey" FOREIGN KEY ("defaultWorkerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "BackgroundWorker" ADD CONSTRAINT "BackgroundWorker_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "WorkerInstance"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerGroup"("id") ON DELETE RESTRICT ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_environmentId_fkey" FOREIGN KEY ("environmentId") REFERENCES "RuntimeEnvironment"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_deploymentId_fkey" FOREIGN KEY ("deploymentId") REFERENCES "WorkerDeployment"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_tokenId_fkey" FOREIGN KEY ("tokenId") REFERENCES "WorkerGroupToken"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerGroup" ADD CONSTRAINT "WorkerGroup_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql b/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql deleted file mode 100644 index 9e160392a2..0000000000 --- a/internal-packages/database/prisma/migrations/20241111171709_waitpoint_type_event_renamed_to_manual/migration.sql +++ /dev/null @@ -1,14 +0,0 @@ -/* - Warnings: - - - The values [EVENT] on the enum `WaitpointType` will be removed. If these variants are still used in the database, this will fail. - -*/ --- AlterEnum -BEGIN; -CREATE TYPE "WaitpointType_new" AS ENUM ('RUN', 'DATETIME', 'MANUAL'); -ALTER TABLE "Waitpoint" ALTER COLUMN "type" TYPE "WaitpointType_new" USING ("type"::text::"WaitpointType_new"); -ALTER TYPE "WaitpointType" RENAME TO "WaitpointType_old"; -ALTER TYPE "WaitpointType_new" RENAME TO "WaitpointType"; -DROP TYPE "WaitpointType_old"; -COMMIT; diff --git a/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql b/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql deleted file mode 100644 index 6ef11749a7..0000000000 --- a/internal-packages/database/prisma/migrations/20241113125729_batch_task_run_engine_changes_worker_instance_changes/migration.sql +++ /dev/null @@ -1,84 +0,0 @@ -/* - Warnings: - - - You are about to drop the `WorkerGroup` table. If the table is not empty, all the data it contains will be lost. - - A unique constraint covering the columns `[workerGroupId,resourceIdentifier]` on the table `WorkerInstance` will be added. If there are existing duplicate values, this will fail. - - Added the required column `resourceIdentifier` to the `WorkerInstance` table without a default value. This is not possible if the table is not empty. - -*/ --- DropForeignKey -ALTER TABLE "BackgroundWorker" DROP CONSTRAINT "BackgroundWorker_workerGroupId_fkey"; - --- DropForeignKey -ALTER TABLE "Project" DROP CONSTRAINT "Project_defaultWorkerGroupId_fkey"; - --- DropForeignKey -ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_organizationId_fkey"; - --- DropForeignKey -ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_projectId_fkey"; - --- DropForeignKey -ALTER TABLE "WorkerGroup" DROP CONSTRAINT "WorkerGroup_tokenId_fkey"; - --- DropForeignKey -ALTER TABLE "WorkerInstance" DROP CONSTRAINT "WorkerInstance_workerGroupId_fkey"; - --- DropIndex -DROP INDEX "WorkerInstance_workerGroupId_name_key"; - --- AlterTable -ALTER TABLE "Project" ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1'; - --- AlterTable -ALTER TABLE "WorkerDeployment" ADD COLUMN "type" "WorkerInstanceGroupType" NOT NULL DEFAULT 'SHARED'; - --- AlterTable -ALTER TABLE "WorkerInstance" ADD COLUMN "resourceIdentifier" TEXT NOT NULL; - --- DropTable -DROP TABLE "WorkerGroup"; - --- CreateTable -CREATE TABLE "WorkerInstanceGroup" ( - "id" TEXT NOT NULL, - "type" "WorkerInstanceGroupType" NOT NULL, - "name" TEXT NOT NULL, - "masterQueue" TEXT NOT NULL, - "description" TEXT, - "hidden" BOOLEAN NOT NULL DEFAULT false, - "tokenId" TEXT NOT NULL, - "organizationId" TEXT, - "projectId" TEXT, - "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updatedAt" TIMESTAMP(3) NOT NULL, - - CONSTRAINT "WorkerInstanceGroup_pkey" PRIMARY KEY ("id") -); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerInstanceGroup_masterQueue_key" ON "WorkerInstanceGroup"("masterQueue"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerInstanceGroup_tokenId_key" ON "WorkerInstanceGroup"("tokenId"); - --- CreateIndex -CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_resourceIdentifier_key" ON "WorkerInstance"("workerGroupId", "resourceIdentifier"); - --- AddForeignKey -ALTER TABLE "Project" ADD CONSTRAINT "Project_defaultWorkerGroupId_fkey" FOREIGN KEY ("defaultWorkerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "BackgroundWorker" ADD CONSTRAINT "BackgroundWorker_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE RESTRICT ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_tokenId_fkey" FOREIGN KEY ("tokenId") REFERENCES "WorkerGroupToken"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql b/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql deleted file mode 100644 index 344fd8f717..0000000000 --- a/internal-packages/database/prisma/migrations/20241114155210_waitpoint_friendly_id_and_task_run_priority_ms/migration.sql +++ /dev/null @@ -1,15 +0,0 @@ -/* - Warnings: - - - A unique constraint covering the columns `[friendlyId]` on the table `Waitpoint` will be added. If there are existing duplicate values, this will fail. - - Added the required column `friendlyId` to the `Waitpoint` table without a default value. This is not possible if the table is not empty. - -*/ --- AlterTable -ALTER TABLE "TaskRun" ADD COLUMN "priorityMs" INTEGER NOT NULL DEFAULT 0; - --- AlterTable -ALTER TABLE "Waitpoint" ADD COLUMN "friendlyId" TEXT NOT NULL; - --- CreateIndex -CREATE UNIQUE INDEX "Waitpoint_friendlyId_key" ON "Waitpoint"("friendlyId"); diff --git a/internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql b/internal-packages/database/prisma/migrations/20250103152909_add_run_engine_v2/migration.sql similarity index 53% rename from internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql rename to internal-packages/database/prisma/migrations/20250103152909_add_run_engine_v2/migration.sql index 88301ce317..5161459ff5 100644 --- a/internal-packages/database/prisma/migrations/20241024144743_run_engine_first_migration/migration.sql +++ b/internal-packages/database/prisma/migrations/20250103152909_add_run_engine_v2/migration.sql @@ -8,29 +8,54 @@ CREATE TYPE "TaskRunExecutionStatus" AS ENUM ('RUN_CREATED', 'QUEUED', 'PENDING_ CREATE TYPE "TaskRunCheckpointType" AS ENUM ('DOCKER', 'KUBERNETES'); -- CreateEnum -CREATE TYPE "WaitpointType" AS ENUM ('RUN', 'DATETIME', 'EVENT'); +CREATE TYPE "WaitpointType" AS ENUM ('RUN', 'DATETIME', 'MANUAL'); -- CreateEnum CREATE TYPE "WaitpointStatus" AS ENUM ('PENDING', 'COMPLETED'); +-- CreateEnum +CREATE TYPE "WorkerInstanceGroupType" AS ENUM ('MANAGED', 'UNMANAGED'); + +-- CreateEnum +CREATE TYPE "WorkerDeploymentType" AS ENUM ('MANAGED', 'UNMANAGED', 'V1'); + +-- AlterTable +ALTER TABLE "BackgroundWorker" ADD COLUMN "workerGroupId" TEXT; + +-- AlterTable +ALTER TABLE "Project" ADD COLUMN "defaultWorkerGroupId" TEXT, +ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1'; + +-- AlterTable +ALTER TABLE "TaskEvent" ADD COLUMN "isDebug" BOOLEAN NOT NULL DEFAULT false; + -- AlterTable ALTER TABLE "TaskRun" ADD COLUMN "attemptNumber" INTEGER, ADD COLUMN "engine" "RunEngineVersion" NOT NULL DEFAULT 'V1', +ADD COLUMN "firstAttemptStartedAt" TIMESTAMP(3), ADD COLUMN "masterQueue" TEXT NOT NULL DEFAULT 'main', +ADD COLUMN "priorityMs" INTEGER NOT NULL DEFAULT 0, ADD COLUMN "secondaryMasterQueue" TEXT; +-- AlterTable +ALTER TABLE "WorkerDeployment" ADD COLUMN "type" "WorkerDeploymentType" NOT NULL DEFAULT 'V1'; + -- CreateTable CREATE TABLE "TaskRunExecutionSnapshot" ( "id" TEXT NOT NULL, "engine" "RunEngineVersion" NOT NULL DEFAULT 'V2', "executionStatus" "TaskRunExecutionStatus" NOT NULL, "description" TEXT NOT NULL, + "isValid" BOOLEAN NOT NULL DEFAULT true, + "error" TEXT, "runId" TEXT NOT NULL, "runStatus" "TaskRunStatus" NOT NULL, "attemptNumber" INTEGER, "checkpointId" TEXT, + "workerId" TEXT, "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, "updatedAt" TIMESTAMP(3) NOT NULL, + "lastHeartbeatAt" TIMESTAMP(3), CONSTRAINT "TaskRunExecutionSnapshot_pkey" PRIMARY KEY ("id") ); @@ -55,6 +80,7 @@ CREATE TABLE "TaskRunCheckpoint" ( -- CreateTable CREATE TABLE "Waitpoint" ( "id" TEXT NOT NULL, + "friendlyId" TEXT NOT NULL, "type" "WaitpointType" NOT NULL, "status" "WaitpointStatus" NOT NULL DEFAULT 'PENDING', "completedAt" TIMESTAMP(3), @@ -65,6 +91,7 @@ CREATE TABLE "Waitpoint" ( "completedAfter" TIMESTAMP(3), "output" TEXT, "outputType" TEXT NOT NULL DEFAULT 'application/json', + "outputIsError" BOOLEAN NOT NULL DEFAULT false, "projectId" TEXT NOT NULL, "environmentId" TEXT NOT NULL, "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -86,18 +113,58 @@ CREATE TABLE "TaskRunWaitpoint" ( ); -- CreateTable -CREATE TABLE "Worker" ( +CREATE TABLE "FeatureFlag" ( + "id" TEXT NOT NULL, + "key" TEXT NOT NULL, + "value" JSONB, + + CONSTRAINT "FeatureFlag_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "WorkerInstance" ( "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "resourceIdentifier" TEXT NOT NULL, + "metadata" JSONB, + "workerGroupId" TEXT NOT NULL, + "organizationId" TEXT, + "projectId" TEXT, + "environmentId" TEXT, + "deploymentId" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + "lastDequeueAt" TIMESTAMP(3), + "lastHeartbeatAt" TIMESTAMP(3), - CONSTRAINT "Worker_pkey" PRIMARY KEY ("id") + CONSTRAINT "WorkerInstance_pkey" PRIMARY KEY ("id") ); -- CreateTable -CREATE TABLE "WorkerGroup" ( +CREATE TABLE "WorkerInstanceGroup" ( "id" TEXT NOT NULL, + "type" "WorkerInstanceGroupType" NOT NULL, + "name" TEXT NOT NULL, "masterQueue" TEXT NOT NULL, + "description" TEXT, + "hidden" BOOLEAN NOT NULL DEFAULT false, + "tokenId" TEXT NOT NULL, + "organizationId" TEXT, + "projectId" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "WorkerInstanceGroup_pkey" PRIMARY KEY ("id") +); - CONSTRAINT "WorkerGroup_pkey" PRIMARY KEY ("id") +-- CreateTable +CREATE TABLE "WorkerGroupToken" ( + "id" TEXT NOT NULL, + "tokenHash" TEXT NOT NULL, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "WorkerGroupToken_pkey" PRIMARY KEY ("id") ); -- CreateTable @@ -107,11 +174,14 @@ CREATE TABLE "_completedWaitpoints" ( ); -- CreateIndex -CREATE INDEX "TaskRunExecutionSnapshot_runId_createdAt_idx" ON "TaskRunExecutionSnapshot"("runId", "createdAt" DESC); +CREATE INDEX "TaskRunExecutionSnapshot_runId_isValid_createdAt_idx" ON "TaskRunExecutionSnapshot"("runId", "isValid", "createdAt" DESC); -- CreateIndex CREATE UNIQUE INDEX "TaskRunCheckpoint_friendlyId_key" ON "TaskRunCheckpoint"("friendlyId"); +-- CreateIndex +CREATE UNIQUE INDEX "Waitpoint_friendlyId_key" ON "Waitpoint"("friendlyId"); + -- CreateIndex CREATE UNIQUE INDEX "Waitpoint_completedByTaskRunId_key" ON "Waitpoint"("completedByTaskRunId"); @@ -127,18 +197,42 @@ CREATE INDEX "TaskRunWaitpoint_waitpointId_idx" ON "TaskRunWaitpoint"("waitpoint -- CreateIndex CREATE UNIQUE INDEX "TaskRunWaitpoint_taskRunId_waitpointId_key" ON "TaskRunWaitpoint"("taskRunId", "waitpointId"); +-- CreateIndex +CREATE UNIQUE INDEX "FeatureFlag_key_key" ON "FeatureFlag"("key"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstance_workerGroupId_resourceIdentifier_key" ON "WorkerInstance"("workerGroupId", "resourceIdentifier"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstanceGroup_masterQueue_key" ON "WorkerInstanceGroup"("masterQueue"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerInstanceGroup_tokenId_key" ON "WorkerInstanceGroup"("tokenId"); + +-- CreateIndex +CREATE UNIQUE INDEX "WorkerGroupToken_tokenHash_key" ON "WorkerGroupToken"("tokenHash"); + -- CreateIndex CREATE UNIQUE INDEX "_completedWaitpoints_AB_unique" ON "_completedWaitpoints"("A", "B"); -- CreateIndex CREATE INDEX "_completedWaitpoints_B_index" ON "_completedWaitpoints"("B"); +-- AddForeignKey +ALTER TABLE "Project" ADD CONSTRAINT "Project_defaultWorkerGroupId_fkey" FOREIGN KEY ("defaultWorkerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "BackgroundWorker" ADD CONSTRAINT "BackgroundWorker_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE SET NULL ON UPDATE CASCADE; + -- AddForeignKey ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_runId_fkey" FOREIGN KEY ("runId") REFERENCES "TaskRun"("id") ON DELETE RESTRICT ON UPDATE CASCADE; -- AddForeignKey ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_checkpointId_fkey" FOREIGN KEY ("checkpointId") REFERENCES "TaskRunCheckpoint"("id") ON DELETE SET NULL ON UPDATE CASCADE; +-- AddForeignKey +ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_workerId_fkey" FOREIGN KEY ("workerId") REFERENCES "WorkerInstance"("id") ON DELETE SET NULL ON UPDATE CASCADE; + -- AddForeignKey ALTER TABLE "TaskRunCheckpoint" ADD CONSTRAINT "TaskRunCheckpoint_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; @@ -163,6 +257,30 @@ ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_waitpointId_fkey -- AddForeignKey ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_workerGroupId_fkey" FOREIGN KEY ("workerGroupId") REFERENCES "WorkerInstanceGroup"("id") ON DELETE RESTRICT ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_environmentId_fkey" FOREIGN KEY ("environmentId") REFERENCES "RuntimeEnvironment"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstance" ADD CONSTRAINT "WorkerInstance_deploymentId_fkey" FOREIGN KEY ("deploymentId") REFERENCES "WorkerDeployment"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_tokenId_fkey" FOREIGN KEY ("tokenId") REFERENCES "WorkerGroupToken"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_organizationId_fkey" FOREIGN KEY ("organizationId") REFERENCES "Organization"("id") ON DELETE CASCADE ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "WorkerInstanceGroup" ADD CONSTRAINT "WorkerInstanceGroup_projectId_fkey" FOREIGN KEY ("projectId") REFERENCES "Project"("id") ON DELETE CASCADE ON UPDATE CASCADE; + -- AddForeignKey ALTER TABLE "_completedWaitpoints" ADD CONSTRAINT "_completedWaitpoints_A_fkey" FOREIGN KEY ("A") REFERENCES "TaskRunExecutionSnapshot"("id") ON DELETE CASCADE ON UPDATE CASCADE; From f961a75efb294d6678a1f28454fa696b7483d8b7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:14:27 +0000 Subject: [PATCH 328/485] the first managed worker becomes the global default --- .../worker/workerGroupService.server.ts | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index f8fde5b04c..affdabd319 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -2,7 +2,7 @@ import { WorkerInstanceGroup, WorkerInstanceGroupType } from "@trigger.dev/datab import { WithRunEngine } from "../baseService.server"; import { WorkerGroupTokenService } from "./workerGroupTokenService.server"; import { logger } from "~/services/logger.server"; -import { makeFlags } from "~/v3/featureFlags.server"; +import { makeFlags, makeSetFlags } from "~/v3/featureFlags.server"; export class WorkerGroupService extends WithRunEngine { private readonly defaultNamePrefix = "worker_group"; @@ -38,6 +38,22 @@ export class WorkerGroupService extends WithRunEngine { }, }); + if (workerGroup.type === WorkerInstanceGroupType.MANAGED) { + const managedCount = await this._prisma.workerInstanceGroup.count({ + where: { + type: WorkerInstanceGroupType.MANAGED, + }, + }); + + if (managedCount === 1) { + const setFlag = makeSetFlags(this._prisma); + await setFlag({ + key: "defaultWorkerInstanceGroupId", + value: workerGroup.id, + }); + } + } + return { workerGroup, token, From 45a829ee9c7a1f06f592bc876848ca2d6c29cd37 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 3 Jan 2025 19:35:03 +0000 Subject: [PATCH 329/485] Debug events off by default, added an admin toggle to show them --- .../route.tsx | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx index baa4f3055e..af8371031f 100644 --- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx +++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam/route.tsx @@ -262,7 +262,6 @@ export default function Page() { function TraceView({ run, trace, maximumLiveReloadingSetting, resizable }: LoaderData) { const organization = useOrganization(); const project = useProject(); - const isAdmin = useHasAdminAccess(); const { searchParams, replaceSearchParam } = useReplaceSearchParams(); const selectedSpanId = searchParams.get("span") ?? undefined; @@ -303,7 +302,7 @@ function TraceView({ run, trace, maximumLiveReloadingSetting, resizable }: Loade !event.data.isDebug)} + events={events} parentRunFriendlyId={parentRunFriendlyId} onSelectedIdChanged={(selectedSpan) => { //instantly close the panel if no span is selected @@ -463,8 +462,10 @@ function TasksTreeView({ maximumLiveReloadingSetting, rootRun, }: TasksTreeViewProps) { + const isAdmin = useHasAdminAccess(); const [filterText, setFilterText] = useState(""); const [errorsOnly, setErrorsOnly] = useState(false); + const [showDebug, setShowDebug] = useState(false); const [showDurations, setShowDurations] = useState(true); const [scale, setScale] = useState(0); const parentRef = useRef(null); @@ -484,7 +485,7 @@ function TasksTreeView({ scrollToNode, virtualizer, } = useTree({ - tree: events, + tree: showDebug ? events : events.filter((event) => !event.data.isDebug), selectedId, // collapsedIds, onSelectedIdChanged, @@ -509,6 +510,14 @@ function TasksTreeView({

+ {isAdmin && ( + setShowDebug(e.valueOf())} + /> + )} Date: Sun, 5 Jan 2025 20:24:14 +0000 Subject: [PATCH 330/485] worker group name can't be an empty string --- .../app/v3/services/worker/workerGroupService.server.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts index affdabd319..24d457a882 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupService.server.ts @@ -18,7 +18,9 @@ export class WorkerGroupService extends WithRunEngine { name?: string; description?: string; }) { - name = name ?? (await this.generateWorkerName({ projectId })); + if (!name) { + name = await this.generateWorkerName({ projectId }); + } const tokenService = new WorkerGroupTokenService({ prisma: this._prisma, From bc38276938df49650909a82d1d54754d693e0f53 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 7 Jan 2025 12:02:03 +0000 Subject: [PATCH 331/485] add exec helper to core --- packages/core/package.json | 1 + packages/core/src/v3/apps/exec.ts | 106 +++++++++++++++++++++++++++++ packages/core/src/v3/apps/index.ts | 1 + pnpm-lock.yaml | 18 +++-- 4 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 packages/core/src/v3/apps/exec.ts diff --git a/packages/core/package.json b/packages/core/package.json index 4d21f2914b..fdba1a5b32 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -205,6 +205,7 @@ "nanoid": "^3.3.4", "socket.io-client": "4.7.5", "superjson": "^2.2.1", + "tinyexec": "^0.3.2", "zod": "3.23.8", "zod-error": "1.5.0", "zod-validation-error": "^1.5.0" diff --git a/packages/core/src/v3/apps/exec.ts b/packages/core/src/v3/apps/exec.ts new file mode 100644 index 0000000000..afa1a03d07 --- /dev/null +++ b/packages/core/src/v3/apps/exec.ts @@ -0,0 +1,106 @@ +import { SimpleStructuredLogger } from "../utils/structuredLogger.js"; +import { type Result, x } from "tinyexec"; + +export class ExecResult { + pid?: number; + exitCode?: number; + aborted: boolean; + killed: boolean; + + constructor(result: Result) { + this.pid = result.pid; + this.exitCode = result.exitCode; + this.aborted = result.aborted; + this.killed = result.killed; + } +} + +export interface ExecOptions { + logger?: SimpleStructuredLogger; + abortSignal?: AbortSignal; + logOutput?: boolean; + trimArgs?: boolean; + neverThrow?: boolean; +} + +export class Exec { + private logger: SimpleStructuredLogger; + private abortSignal: AbortSignal | undefined; + + private logOutput: boolean; + private trimArgs: boolean; + private neverThrow: boolean; + + constructor(opts: ExecOptions) { + this.logger = opts.logger ?? new SimpleStructuredLogger("exec"); + this.abortSignal = opts.abortSignal; + + this.logOutput = opts.logOutput ?? true; + this.trimArgs = opts.trimArgs ?? true; + this.neverThrow = opts.neverThrow ?? false; + } + + async x( + command: string, + args?: string[], + opts?: { neverThrow?: boolean; ignoreAbort?: boolean } + ) { + const argsTrimmed = this.trimArgs ? args?.map((arg) => arg.trim()) : args; + + const commandWithFirstArg = `${command}${argsTrimmed?.length ? ` ${argsTrimmed[0]}` : ""}`; + this.logger.debug(`exec: ${commandWithFirstArg}`, { command, args, argsTrimmed }); + + const result = x(command, argsTrimmed, { + signal: opts?.ignoreAbort ? undefined : this.abortSignal, + // We don't use this as it doesn't cover killed and aborted processes + // throwOnError: true, + }); + + const output = await result; + + const metadata = { + command, + argsRaw: args, + argsTrimmed, + globalOpts: { + trimArgs: this.trimArgs, + neverThrow: this.neverThrow, + hasAbortSignal: !!this.abortSignal, + }, + localOpts: opts, + stdout: output.stdout, + stderr: output.stderr, + pid: result.pid, + exitCode: result.exitCode, + aborted: result.aborted, + killed: result.killed, + }; + + if (this.logOutput) { + this.logger.debug(`output: ${commandWithFirstArg}`, metadata); + } + + if (this.neverThrow || opts?.neverThrow) { + return output; + } + + if (result.aborted) { + this.logger.error(`aborted: ${commandWithFirstArg}`, metadata); + throw new ExecResult(result); + } + + if (result.killed) { + this.logger.error(`killed: ${commandWithFirstArg}`, metadata); + throw new ExecResult(result); + } + + if (result.exitCode !== 0) { + this.logger.error(`non-zero exit: ${commandWithFirstArg}`, metadata); + throw new ExecResult(result); + } + + return output; + } + + static Result = ExecResult; +} diff --git a/packages/core/src/v3/apps/index.ts b/packages/core/src/v3/apps/index.ts index c4028cf4b2..b30778299b 100644 --- a/packages/core/src/v3/apps/index.ts +++ b/packages/core/src/v3/apps/index.ts @@ -10,3 +10,4 @@ export * from "./duration.js"; export * from "./maxDuration.js"; export * from "./queueName.js"; export * from "./consts.js"; +export * from "./exec.js"; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a3d20b7e3c..1e2833e023 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1341,6 +1341,9 @@ importers: superjson: specifier: ^2.2.1 version: 2.2.1 + tinyexec: + specifier: ^0.3.2 + version: 0.3.2 zod: specifier: 3.23.8 version: 3.23.8 @@ -1892,7 +1895,7 @@ importers: version: link:../../packages/cli-v3 ts-node: specifier: ^10.9.2 - version: 10.9.2(@types/node@20.4.2)(typescript@5.5.4) + version: 10.9.2(@types/node@20.14.14)(typescript@5.5.4) tsconfig-paths: specifier: ^4.2.0 version: 4.2.0 @@ -16611,9 +16614,6 @@ packages: dependencies: undici-types: 5.26.5 - /@types/node@20.4.2: - resolution: {integrity: sha512-Dd0BYtWgnWJKwO1jkmTrzofjK2QXXcai0dmtzvIBhcA+RsG5h8R3xlyta0kGOZRNfL9GuRtb1knmPEhQrePCEw==} - /@types/nodemailer@6.4.17: resolution: {integrity: sha512-I9CCaIp6DTldEg7vyUTZi8+9Vo0hi1/T8gv3C89yk1rSAAzoKQ8H8ki/jBYJSFoH/BisgLP8tkZMlQ91CIquww==} dependencies: @@ -30101,6 +30101,10 @@ packages: resolution: {integrity: sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==} dev: false + /tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + dev: false + /tinyglobby@0.2.2: resolution: {integrity: sha512-mZ2sDMaySvi1PkTp4lTo1In2zjU+cY8OvZsfwrDrx3YGRbXPX1/cbPwCR9zkm3O/Fz9Jo0F1HNgIQ1b8BepqyQ==} engines: {node: '>=12.0.0'} @@ -30293,7 +30297,7 @@ packages: v8-compile-cache-lib: 3.0.1 yn: 3.1.1 - /ts-node@10.9.2(@types/node@20.4.2)(typescript@5.5.4): + /ts-node@10.9.2(@types/node@20.14.14)(typescript@5.5.4): resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} hasBin: true peerDependencies: @@ -30312,7 +30316,7 @@ packages: '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.3 - '@types/node': 20.4.2 + '@types/node': 20.14.14 acorn: 8.12.1 acorn-walk: 8.3.2 arg: 4.1.3 @@ -30766,7 +30770,7 @@ packages: pg: 8.11.5 reflect-metadata: 0.2.2 sha.js: 2.4.11 - ts-node: 10.9.2(@types/node@20.4.2)(typescript@5.5.4) + ts-node: 10.9.2(@types/node@20.14.14)(typescript@5.5.4) tslib: 2.6.2 uuid: 9.0.0 yargs: 17.7.2 From 1d6fb488fe6d7ecaff47c63af664b2786ed36fb3 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 7 Jan 2025 12:03:23 +0000 Subject: [PATCH 332/485] move machine resources to core --- internal-packages/run-engine/src/engine/index.ts | 3 ++- internal-packages/run-engine/src/engine/types.ts | 5 ----- packages/core/src/v3/schemas/runEngine.ts | 6 ++++++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 83dc41e65b..2cca839d54 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -6,6 +6,7 @@ import { CompleteRunAttemptResult, DequeuedMessage, ExecutionResult, + MachineResources, parsePacket, RunExecutionData, sanitizeError, @@ -61,7 +62,7 @@ import { isExecuting, isFinalRunStatus, } from "./statuses"; -import { HeartbeatTimeouts, MachineResources, RunEngineOptions, TriggerParams } from "./types"; +import { HeartbeatTimeouts, RunEngineOptions, TriggerParams } from "./types"; const workerCatalog = { finishWaitpoint: { diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index 1d3efb222c..dc40f7d1ed 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -34,11 +34,6 @@ export type HeartbeatTimeouts = { EXECUTING_WITH_WAITPOINTS: number; }; -export type MachineResources = { - cpu: number; - memory: number; -}; - export type TriggerParams = { friendlyId: string; number: number; diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 2eb5c52680..57a52095bc 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -191,3 +191,9 @@ export const WaitForDurationResult = z }) .and(ExecutionResult); export type WaitForDurationResult = z.infer; + +export const MachineResources = z.object({ + cpu: z.number(), + memory: z.number(), +}); +export type MachineResources = z.infer; From a76ef9844cdadff51cff91f4f5b85195adc00832 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 7 Jan 2025 12:10:29 +0000 Subject: [PATCH 333/485] add pre-dequeue callback to determine max resources --- .../routes/api.v1.worker-actions.dequeue.ts | 18 +++++++++++------ .../worker/workerGroupTokenService.server.ts | 10 +++++++++- packages/worker/src/supervisor/http.ts | 6 +++++- .../worker/src/supervisor/queueConsumer.ts | 20 ++++++++++++++++++- packages/worker/src/supervisor/schemas.ts | 6 ++++++ packages/worker/src/supervisor/session.ts | 6 +++++- 6 files changed, 56 insertions(+), 10 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts index fd19968fb1..e9d6cd0470 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts @@ -1,10 +1,16 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; -import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; +import { WorkerApiDequeueRequestBody, WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; +import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -export const loader = createLoaderWorkerApiRoute( - {}, - async ({ authenticatedWorker }): Promise> => { - return json(await authenticatedWorker.dequeue()); +export const action = createActionWorkerApiRoute( + { + body: WorkerApiDequeueRequestBody, + }, + async ({ authenticatedWorker, body }): Promise> => { + return json( + await authenticatedWorker.dequeue({ + maxResources: body.maxResources, + }) + ); } ); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 54bef5c7b5..1747fc319d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -18,6 +18,7 @@ import { ExecutionResult, MachinePreset, WaitForDurationResult, + MachineResources, } from "@trigger.dev/core/v3"; import { env } from "~/env.server"; import { $transaction } from "~/db.server"; @@ -525,12 +526,19 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } - async dequeue(maxRunCount = 10): Promise { + async dequeue({ + maxRunCount = 10, + maxResources, + }: { + maxRunCount?: number; + maxResources?: MachineResources; + } = {}): Promise { if (this.type === WorkerInstanceGroupType.MANAGED) { return await this._engine.dequeueFromMasterQueue({ consumerId: this.workerInstanceId, masterQueue: this.masterQueue, maxRunCount, + maxResources, }); } diff --git a/packages/worker/src/supervisor/http.ts b/packages/worker/src/supervisor/http.ts index c5f50f4bf2..aea21f0f7f 100644 --- a/packages/worker/src/supervisor/http.ts +++ b/packages/worker/src/supervisor/http.ts @@ -3,6 +3,7 @@ import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody, + WorkerApiDequeueRequestBody, WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody, WorkerApiHeartbeatResponseBody, @@ -61,14 +62,17 @@ export class SupervisorHttpClient { ); } - async dequeue() { + async dequeue(body: WorkerApiDequeueRequestBody) { return wrapZodFetch( WorkerApiDequeueResponseBody, `${this.apiUrl}/api/v1/worker-actions/dequeue`, { + method: "POST", headers: { ...this.defaultHeaders, + "Content-Type": "application/json", }, + body: JSON.stringify(body), } ); } diff --git a/packages/worker/src/supervisor/queueConsumer.ts b/packages/worker/src/supervisor/queueConsumer.ts index c9cdc8cc58..f087c472bd 100644 --- a/packages/worker/src/supervisor/queueConsumer.ts +++ b/packages/worker/src/supervisor/queueConsumer.ts @@ -1,14 +1,21 @@ +import { MachineResources } from "@trigger.dev/core/v3"; import { SupervisorHttpClient } from "./http.js"; import { WorkerApiDequeueResponseBody } from "./schemas.js"; type RunQueueConsumerOptions = { client: SupervisorHttpClient; intervalMs?: number; + preDequeue?: () => Promise<{ + maxResources?: MachineResources; + }>; onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; }; export class RunQueueConsumer { private readonly client: SupervisorHttpClient; + private readonly preDequeue?: () => Promise<{ + maxResources?: MachineResources; + }>; private readonly onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; private intervalMs: number; @@ -17,6 +24,7 @@ export class RunQueueConsumer { constructor(opts: RunQueueConsumerOptions) { this.isEnabled = false; this.intervalMs = opts.intervalMs ?? 5_000; + this.preDequeue = opts.preDequeue; this.onDequeue = opts.onDequeue; this.client = opts.client; } @@ -46,8 +54,18 @@ export class RunQueueConsumer { return; } + let maxResources: MachineResources | undefined; + if (this.preDequeue) { + try { + const preDequeueResult = await this.preDequeue(); + maxResources = preDequeueResult.maxResources; + } catch (preDequeueError) { + console.error("[RunQueueConsumer] preDequeue error", { error: preDequeueError }); + } + } + try { - const response = await this.client.dequeue(); + const response = await this.client.dequeue({ maxResources }); if (!response.success) { console.error("[RunQueueConsumer] Failed to dequeue", { error: response.error }); diff --git a/packages/worker/src/supervisor/schemas.ts b/packages/worker/src/supervisor/schemas.ts index 6fc646576e..6749e1386a 100644 --- a/packages/worker/src/supervisor/schemas.ts +++ b/packages/worker/src/supervisor/schemas.ts @@ -2,6 +2,7 @@ import { z } from "zod"; import { CompleteRunAttemptResult, DequeuedMessage, + MachineResources, RunExecutionData, StartRunAttemptResult, TaskRunExecutionResult, @@ -40,6 +41,11 @@ export const WorkerApiConnectResponseBody = z.object({ }); export type WorkerApiConnectResponseBody = z.infer; +export const WorkerApiDequeueRequestBody = z.object({ + maxResources: MachineResources.optional(), +}); +export type WorkerApiDequeueRequestBody = z.infer; + export const WorkerApiDequeueResponseBody = DequeuedMessage.array(); export type WorkerApiDequeueResponseBody = z.infer; diff --git a/packages/worker/src/supervisor/session.ts b/packages/worker/src/supervisor/session.ts index c21ebff139..2acbda0966 100644 --- a/packages/worker/src/supervisor/session.ts +++ b/packages/worker/src/supervisor/session.ts @@ -1,4 +1,4 @@ -import { HeartbeatService } from "@trigger.dev/core/v3"; +import { HeartbeatService, MachineResources } from "@trigger.dev/core/v3"; import { SupervisorHttpClient } from "./http.js"; import { SupervisorClientCommonOptions } from "./types.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; @@ -13,6 +13,9 @@ import { getDefaultWorkerHeaders } from "./util.js"; type SupervisorSessionOptions = SupervisorClientCommonOptions & { heartbeatIntervalSeconds?: number; dequeueIntervalMs?: number; + preDequeue?: () => Promise<{ + maxResources?: MachineResources; + }>; }; export class SupervisorSession extends EventEmitter { @@ -30,6 +33,7 @@ export class SupervisorSession extends EventEmitter { this.httpClient = new SupervisorHttpClient(opts); this.queueConsumer = new RunQueueConsumer({ client: this.httpClient, + preDequeue: opts.preDequeue, onDequeue: this.onDequeue.bind(this), intervalMs: opts.dequeueIntervalMs, }); From 5cf7c52cda0de8c2d6eb9cec0e73579078d188ee Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 7 Jan 2025 17:04:00 +0000 Subject: [PATCH 334/485] optionally skip dequeue --- .../worker/src/supervisor/queueConsumer.ts | 27 +++++++++++-------- packages/worker/src/supervisor/session.ts | 6 ++--- packages/worker/src/supervisor/types.ts | 7 +++++ 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/packages/worker/src/supervisor/queueConsumer.ts b/packages/worker/src/supervisor/queueConsumer.ts index f087c472bd..21f13e725f 100644 --- a/packages/worker/src/supervisor/queueConsumer.ts +++ b/packages/worker/src/supervisor/queueConsumer.ts @@ -1,21 +1,17 @@ -import { MachineResources } from "@trigger.dev/core/v3"; import { SupervisorHttpClient } from "./http.js"; import { WorkerApiDequeueResponseBody } from "./schemas.js"; +import { PreDequeueFn } from "./types.js"; type RunQueueConsumerOptions = { client: SupervisorHttpClient; intervalMs?: number; - preDequeue?: () => Promise<{ - maxResources?: MachineResources; - }>; + preDequeue?: PreDequeueFn; onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; }; export class RunQueueConsumer { private readonly client: SupervisorHttpClient; - private readonly preDequeue?: () => Promise<{ - maxResources?: MachineResources; - }>; + private readonly preDequeue?: PreDequeueFn; private readonly onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; private intervalMs: number; @@ -54,18 +50,27 @@ export class RunQueueConsumer { return; } - let maxResources: MachineResources | undefined; + let preDequeueResult: Awaited> | undefined; if (this.preDequeue) { try { - const preDequeueResult = await this.preDequeue(); - maxResources = preDequeueResult.maxResources; + preDequeueResult = await this.preDequeue(); } catch (preDequeueError) { console.error("[RunQueueConsumer] preDequeue error", { error: preDequeueError }); } } + if ( + preDequeueResult?.skipDequeue || + preDequeueResult?.maxResources?.cpu === 0 || + preDequeueResult?.maxResources?.memory === 0 + ) { + return this.scheduleNextDequeue(); + } + try { - const response = await this.client.dequeue({ maxResources }); + const response = await this.client.dequeue({ + maxResources: preDequeueResult?.maxResources, + }); if (!response.success) { console.error("[RunQueueConsumer] Failed to dequeue", { error: response.error }); diff --git a/packages/worker/src/supervisor/session.ts b/packages/worker/src/supervisor/session.ts index 2acbda0966..0548c94d42 100644 --- a/packages/worker/src/supervisor/session.ts +++ b/packages/worker/src/supervisor/session.ts @@ -1,6 +1,6 @@ import { HeartbeatService, MachineResources } from "@trigger.dev/core/v3"; import { SupervisorHttpClient } from "./http.js"; -import { SupervisorClientCommonOptions } from "./types.js"; +import { PreDequeueFn, SupervisorClientCommonOptions } from "./types.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; import { RunQueueConsumer } from "./queueConsumer.js"; import { WorkerEvents } from "./events.js"; @@ -13,9 +13,7 @@ import { getDefaultWorkerHeaders } from "./util.js"; type SupervisorSessionOptions = SupervisorClientCommonOptions & { heartbeatIntervalSeconds?: number; dequeueIntervalMs?: number; - preDequeue?: () => Promise<{ - maxResources?: MachineResources; - }>; + preDequeue?: PreDequeueFn; }; export class SupervisorSession extends EventEmitter { diff --git a/packages/worker/src/supervisor/types.ts b/packages/worker/src/supervisor/types.ts index dfc3d21ed0..6367574360 100644 --- a/packages/worker/src/supervisor/types.ts +++ b/packages/worker/src/supervisor/types.ts @@ -1,3 +1,5 @@ +import type { MachineResources } from "@trigger.dev/core/v3"; + export type SupervisorClientCommonOptions = { apiUrl: string; workerToken: string; @@ -5,3 +7,8 @@ export type SupervisorClientCommonOptions = { deploymentId?: string; managedWorkerSecret?: string; }; + +export type PreDequeueFn = () => Promise<{ + maxResources?: MachineResources; + skipDequeue?: boolean; +}>; From 70abb6376dc7a89dc1b458eb0e2adc71c85e9c8f Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 11:41:37 +0000 Subject: [PATCH 335/485] bump worker package --- packages/cli-v3/package.json | 2 +- packages/worker/package.json | 2 +- pnpm-lock.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 916a07db1e..4d219fc8f4 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -88,7 +88,7 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.3.8", "@trigger.dev/core": "workspace:3.3.8", - "@trigger.dev/worker": "workspace:3.3.7", + "@trigger.dev/worker": "workspace:3.3.8", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/worker/package.json b/packages/worker/package.json index 56304df679..5d23bc63ba 100644 --- a/packages/worker/package.json +++ b/packages/worker/package.json @@ -1,6 +1,6 @@ { "name": "@trigger.dev/worker", - "version": "3.3.7", + "version": "3.3.8", "description": "trigger.dev worker", "license": "MIT", "publishConfig": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 8012ca49f2..d9b8a8d5e4 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1113,7 +1113,7 @@ importers: specifier: workspace:3.3.8 version: link:../core '@trigger.dev/worker': - specifier: workspace:3.3.7 + specifier: workspace:3.3.8 version: link:../worker c12: specifier: ^1.11.1 From f319d3a254de6e46c62ae490f088142830069e0d Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:23:56 +0000 Subject: [PATCH 336/485] move worker to core --- .../routes/api.v1.worker-actions.connect.ts | 2 +- ...ployments.$deploymentFriendlyId.dequeue.ts | 2 +- .../routes/api.v1.worker-actions.dequeue.ts | 2 +- .../routes/api.v1.worker-actions.heartbeat.ts | 2 +- ...s.$snapshotFriendlyId.attempts.complete.ts | 2 +- ...hots.$snapshotFriendlyId.attempts.start.ts | 2 +- ...snapshots.$snapshotFriendlyId.heartbeat.ts | 2 +- ...shots.$snapshotFriendlyId.wait.duration.ts | 2 +- ...ns.runs.$runFriendlyId.snapshots.latest.ts | 2 +- apps/webapp/app/v3/handleSocketIo.server.ts | 2 +- .../worker/workerGroupTokenService.server.ts | 8 +-- apps/webapp/package.json | 1 - apps/webapp/tsconfig.json | 2 - packages/cli-v3/package.json | 1 - .../src/entryPoints/managed-run-controller.ts | 2 +- .../entryPoints/unmanaged-run-controller.ts | 2 +- packages/core/package.json | 2 +- .../src/v3/runEngineWorker}/consts.ts | 2 +- .../src/v3/runEngineWorker}/index.ts | 1 - .../v3/runEngineWorker}/supervisor/events.ts | 7 +- .../v3/runEngineWorker}/supervisor/http.ts | 2 +- .../supervisor/queueConsumer.ts | 0 .../v3/runEngineWorker}/supervisor/schemas.ts | 10 +-- .../v3/runEngineWorker}/supervisor/session.ts | 4 +- .../v3/runEngineWorker}/supervisor/types.ts | 2 +- .../v3/runEngineWorker}/supervisor/util.ts | 8 +-- .../src/v3/runEngineWorker}/types.ts | 0 .../src/v3/runEngineWorker}/util.ts | 0 .../src/v3/runEngineWorker}/workload/http.ts | 2 +- .../v3/runEngineWorker}/workload/schemas.ts | 0 .../src/v3/runEngineWorker}/workload/types.ts | 0 .../src/v3/runEngineWorker}/workload/util.ts | 0 packages/core/src/v3/workers/index.ts | 1 + packages/worker/LICENSE | 21 ------ packages/worker/README.md | 3 - packages/worker/package.json | 71 ------------------- packages/worker/src/version.ts | 1 - packages/worker/tsconfig.json | 8 --- packages/worker/tsconfig.src.json | 10 --- pnpm-lock.yaml | 44 ++---------- 40 files changed, 42 insertions(+), 193 deletions(-) rename packages/{worker/src => core/src/v3/runEngineWorker}/consts.ts (88%) rename packages/{worker/src => core/src/v3/runEngineWorker}/index.ts (81%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/events.ts (84%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/http.ts (98%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/queueConsumer.ts (100%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/schemas.ts (97%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/session.ts (97%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/types.ts (81%) rename packages/{worker/src => core/src/v3/runEngineWorker}/supervisor/util.ts (78%) rename packages/{worker/src => core/src/v3/runEngineWorker}/types.ts (100%) rename packages/{worker/src => core/src/v3/runEngineWorker}/util.ts (100%) rename packages/{worker/src => core/src/v3/runEngineWorker}/workload/http.ts (98%) rename packages/{worker/src => core/src/v3/runEngineWorker}/workload/schemas.ts (100%) rename packages/{worker/src => core/src/v3/runEngineWorker}/workload/types.ts (100%) rename packages/{worker/src => core/src/v3/runEngineWorker}/workload/util.ts (100%) delete mode 100644 packages/worker/LICENSE delete mode 100644 packages/worker/README.md delete mode 100644 packages/worker/package.json delete mode 100644 packages/worker/src/version.ts delete mode 100644 packages/worker/tsconfig.json delete mode 100644 packages/worker/tsconfig.src.json diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts index 024526a147..247984d454 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.connect.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody } from "@trigger.dev/worker"; +import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody } from "@trigger.dev/core/v3/workers"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts index fa56f895b2..d0b5d773cf 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -1,6 +1,6 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; -import { WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; +import { WorkerApiDequeueResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { $replica, prisma } from "~/db.server"; import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts index e9d6cd0470..4dd0798ad3 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiDequeueRequestBody, WorkerApiDequeueResponseBody } from "@trigger.dev/worker"; +import { WorkerApiDequeueRequestBody, WorkerApiDequeueResponseBody } from "@trigger.dev/core/v3/workers"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts index babe12d5ea..111cab6011 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiHeartbeatResponseBody, WorkerApiHeartbeatRequestBody } from "@trigger.dev/worker"; +import { WorkerApiHeartbeatResponseBody, WorkerApiHeartbeatRequestBody } from "@trigger.dev/core/v3/workers"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; export const action = createActionWorkerApiRoute( diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts index 4e33f04fec..81f53280e1 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiRunAttemptCompleteRequestBody, WorkerApiRunAttemptCompleteResponseBody, -} from "@trigger.dev/worker"; +} from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts index d8137e9b90..f7a8e874ec 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiRunAttemptStartRequestBody, WorkerApiRunAttemptStartResponseBody, -} from "@trigger.dev/worker"; +} from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts index 96cd8c7e45..0942bcde8b 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkloadHeartbeatResponseBody } from "@trigger.dev/worker"; +import { WorkloadHeartbeatResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts index f8676f6454..8f4630149a 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts @@ -2,7 +2,7 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WorkerApiWaitForDurationRequestBody, WorkerApiWaitForDurationResponseBody, -} from "@trigger.dev/worker"; +} from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts index 37422bf42a..d480acf01e 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { WorkerApiRunLatestSnapshotResponseBody } from "@trigger.dev/worker"; +import { WorkerApiRunLatestSnapshotResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { createLoaderWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/v3/handleSocketIo.server.ts b/apps/webapp/app/v3/handleSocketIo.server.ts index e8aa628a39..fe37bb64af 100644 --- a/apps/webapp/app/v3/handleSocketIo.server.ts +++ b/apps/webapp/app/v3/handleSocketIo.server.ts @@ -26,7 +26,7 @@ import { CrashTaskRunService } from "./services/crashTaskRun.server"; import { CreateTaskRunAttemptService } from "./services/createTaskRunAttempt.server"; import { UpdateFatalRunErrorService } from "./services/updateFatalRunError.server"; import { WorkerGroupTokenService } from "./services/worker/workerGroupTokenService.server"; -import type { WorkerClientToServerEvents, WorkerServerToClientEvents } from "@trigger.dev/worker"; +import type { WorkerClientToServerEvents, WorkerServerToClientEvents } from "@trigger.dev/core/v3/workers"; export const socketIo = singleton("socketIo", initalizeIoServer); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 1747fc319d..20903e018c 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -9,7 +9,7 @@ import { WorkerInstanceGroupType, } from "@trigger.dev/database"; import { z } from "zod"; -import { HEADER_NAME } from "@trigger.dev/worker"; +import { WORKER_HEADER_NAME } from "@trigger.dev/core/v3/workers"; import { TaskRunExecutionResult, DequeuedMessage, @@ -136,7 +136,7 @@ export class WorkerGroupTokenService extends WithRunEngine { return; } - const instanceName = request.headers.get(HEADER_NAME.WORKER_INSTANCE_NAME); + const instanceName = request.headers.get(WORKER_HEADER_NAME.WORKER_INSTANCE_NAME); if (!instanceName) { logger.error("[WorkerGroupTokenService] Instance name not found in request", { @@ -153,7 +153,7 @@ export class WorkerGroupTokenService extends WithRunEngine { } if (workerGroup.type === WorkerInstanceGroupType.MANAGED) { - const managedWorkerSecret = request.headers.get(HEADER_NAME.WORKER_MANAGED_SECRET); + const managedWorkerSecret = request.headers.get(WORKER_HEADER_NAME.WORKER_MANAGED_SECRET); if (!managedWorkerSecret) { logger.error("[WorkerGroupTokenService] Managed secret not found in request", { @@ -187,7 +187,7 @@ export class WorkerGroupTokenService extends WithRunEngine { const workerInstance = await this.getOrCreateWorkerInstance({ workerGroup, instanceName, - deploymentId: request.headers.get(HEADER_NAME.WORKER_DEPLOYMENT_ID) ?? undefined, + deploymentId: request.headers.get(WORKER_HEADER_NAME.WORKER_DEPLOYMENT_ID) ?? undefined, }); if (!workerInstance) { diff --git a/apps/webapp/package.json b/apps/webapp/package.json index 0d4e7e9a79..028f7543c4 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -101,7 +101,6 @@ "@trigger.dev/otlp-importer": "workspace:*", "@trigger.dev/platform": "1.0.14", "@trigger.dev/sdk": "workspace:*", - "@trigger.dev/worker": "workspace:*", "@trigger.dev/yalt": "npm:@trigger.dev/yalt", "@types/pg": "8.6.6", "@uiw/react-codemirror": "^4.19.5", diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index b402860b1b..bb6a03acc1 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -31,8 +31,6 @@ "@trigger.dev/yalt/*": ["../../packages/yalt/src/*"], "@trigger.dev/otlp-importer": ["../../internal-packages/otlp-importer/src/index"], "@trigger.dev/otlp-importer/*": ["../../internal-packages/otlp-importer/src/*"], - "@trigger.dev/worker": ["../../packages/worker/src/index"], - "@trigger.dev/worker/*": ["../../packages/worker/src/*"], "emails": ["../../internal-packages/emails/src/index"], "emails/*": ["../../internal-packages/emails/src/*"], "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], diff --git a/packages/cli-v3/package.json b/packages/cli-v3/package.json index 4d219fc8f4..05e23e7bca 100644 --- a/packages/cli-v3/package.json +++ b/packages/cli-v3/package.json @@ -88,7 +88,6 @@ "@opentelemetry/semantic-conventions": "1.25.1", "@trigger.dev/build": "workspace:3.3.8", "@trigger.dev/core": "workspace:3.3.8", - "@trigger.dev/worker": "workspace:3.3.8", "c12": "^1.11.1", "chalk": "^5.2.0", "cli-table3": "^0.6.3", diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 9394b97c15..daf68a34ee 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -16,7 +16,7 @@ import { WorkloadHttpClient, WorkloadServerToClientEvents, type WorkloadRunAttemptStartResponseBody, -} from "@trigger.dev/worker"; +} from "@trigger.dev/core/v3/workers"; import { assertExhaustive } from "../utilities/assertExhaustive.js"; import { setTimeout as wait } from "timers/promises"; import { io, Socket } from "socket.io-client"; diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts index 11989e89da..49741be012 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-controller.ts @@ -6,7 +6,7 @@ import { CLOUD_API_URL } from "../consts.js"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; import { WorkerManifest } from "@trigger.dev/core/v3"; -import { SupervisorSession } from "@trigger.dev/worker"; +import { SupervisorSession } from "@trigger.dev/core/v3/workers"; const Env = z.object({ TRIGGER_API_URL: z.string().url().default(CLOUD_API_URL), diff --git a/packages/core/package.json b/packages/core/package.json index d2a50a003c..c95352d26b 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -203,6 +203,7 @@ "humanize-duration": "^3.27.3", "jose": "^5.4.0", "nanoid": "^3.3.4", + "socket.io": "4.7.4", "socket.io-client": "4.7.5", "superjson": "^2.2.1", "tinyexec": "^0.3.2", @@ -221,7 +222,6 @@ "defu": "^6.1.4", "esbuild": "^0.23.0", "rimraf": "^3.0.2", - "socket.io": "4.7.4", "ts-essentials": "10.0.1", "tshy": "^3.0.2", "tsx": "4.17.0", diff --git a/packages/worker/src/consts.ts b/packages/core/src/v3/runEngineWorker/consts.ts similarity index 88% rename from packages/worker/src/consts.ts rename to packages/core/src/v3/runEngineWorker/consts.ts index 7f1c475301..45fc6efd44 100644 --- a/packages/worker/src/consts.ts +++ b/packages/core/src/v3/runEngineWorker/consts.ts @@ -1,4 +1,4 @@ -export const HEADER_NAME = { +export const WORKER_HEADER_NAME = { WORKER_INSTANCE_NAME: "x-trigger-worker-instance-name", WORKER_DEPLOYMENT_ID: "x-trigger-worker-deployment-id", WORKER_MANAGED_SECRET: "x-trigger-worker-managed-secret", diff --git a/packages/worker/src/index.ts b/packages/core/src/v3/runEngineWorker/index.ts similarity index 81% rename from packages/worker/src/index.ts rename to packages/core/src/v3/runEngineWorker/index.ts index ae89268885..98566ae225 100644 --- a/packages/worker/src/index.ts +++ b/packages/core/src/v3/runEngineWorker/index.ts @@ -1,4 +1,3 @@ -export { VERSION as WORKER_VERSION } from "./version.js"; export * from "./consts.js"; export * from "./supervisor/http.js"; export * from "./supervisor/schemas.js"; diff --git a/packages/worker/src/supervisor/events.ts b/packages/core/src/v3/runEngineWorker/supervisor/events.ts similarity index 84% rename from packages/worker/src/supervisor/events.ts rename to packages/core/src/v3/runEngineWorker/supervisor/events.ts index b08c330a37..a51c504a3e 100644 --- a/packages/worker/src/supervisor/events.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/events.ts @@ -1,8 +1,5 @@ -import { - DequeuedMessage, - StartRunAttemptResult, - TaskRunExecutionResult, -} from "@trigger.dev/core/v3"; +import { TaskRunExecutionResult } from "../../schemas/common.js"; +import { DequeuedMessage, StartRunAttemptResult } from "../../schemas/runEngine.js"; export type WorkerEvents = { runQueueMessage: [ diff --git a/packages/worker/src/supervisor/http.ts b/packages/core/src/v3/runEngineWorker/supervisor/http.ts similarity index 98% rename from packages/worker/src/supervisor/http.ts rename to packages/core/src/v3/runEngineWorker/supervisor/http.ts index aea21f0f7f..6bc1910ac4 100644 --- a/packages/worker/src/supervisor/http.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/http.ts @@ -1,5 +1,4 @@ import { z } from "zod"; -import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; import { WorkerApiConnectRequestBody, WorkerApiConnectResponseBody, @@ -19,6 +18,7 @@ import { } from "./schemas.js"; import { SupervisorClientCommonOptions } from "./types.js"; import { getDefaultWorkerHeaders } from "./util.js"; +import { ApiError, zodfetch } from "../../zodfetch.js"; type SupervisorHttpClientOptions = SupervisorClientCommonOptions; diff --git a/packages/worker/src/supervisor/queueConsumer.ts b/packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts similarity index 100% rename from packages/worker/src/supervisor/queueConsumer.ts rename to packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts diff --git a/packages/worker/src/supervisor/schemas.ts b/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts similarity index 97% rename from packages/worker/src/supervisor/schemas.ts rename to packages/core/src/v3/runEngineWorker/supervisor/schemas.ts index 6749e1386a..92f3d3057d 100644 --- a/packages/worker/src/supervisor/schemas.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts @@ -1,13 +1,13 @@ import { z } from "zod"; +import { TaskRunExecutionResult } from "../../schemas/common.js"; import { - CompleteRunAttemptResult, - DequeuedMessage, MachineResources, - RunExecutionData, + DequeuedMessage, StartRunAttemptResult, - TaskRunExecutionResult, + CompleteRunAttemptResult, + RunExecutionData, WaitForDurationResult, -} from "@trigger.dev/core/v3"; +} from "../../schemas/runEngine.js"; export const WorkerApiHeartbeatRequestBody = z.object({ cpu: z.object({ diff --git a/packages/worker/src/supervisor/session.ts b/packages/core/src/v3/runEngineWorker/supervisor/session.ts similarity index 97% rename from packages/worker/src/supervisor/session.ts rename to packages/core/src/v3/runEngineWorker/supervisor/session.ts index 0548c94d42..02bf712b5f 100644 --- a/packages/worker/src/supervisor/session.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/session.ts @@ -1,14 +1,14 @@ -import { HeartbeatService, MachineResources } from "@trigger.dev/core/v3"; import { SupervisorHttpClient } from "./http.js"; import { PreDequeueFn, SupervisorClientCommonOptions } from "./types.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; import { RunQueueConsumer } from "./queueConsumer.js"; import { WorkerEvents } from "./events.js"; import EventEmitter from "events"; -import { VERSION } from "../version.js"; +import { VERSION } from "../../../version.js"; import { io, Socket } from "socket.io-client"; import { WorkerClientToServerEvents, WorkerServerToClientEvents } from "../types.js"; import { getDefaultWorkerHeaders } from "./util.js"; +import { HeartbeatService } from "../../utils/heartbeat.js"; type SupervisorSessionOptions = SupervisorClientCommonOptions & { heartbeatIntervalSeconds?: number; diff --git a/packages/worker/src/supervisor/types.ts b/packages/core/src/v3/runEngineWorker/supervisor/types.ts similarity index 81% rename from packages/worker/src/supervisor/types.ts rename to packages/core/src/v3/runEngineWorker/supervisor/types.ts index 6367574360..80678b6f18 100644 --- a/packages/worker/src/supervisor/types.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/types.ts @@ -1,4 +1,4 @@ -import type { MachineResources } from "@trigger.dev/core/v3"; +import { MachineResources } from "../../schemas/runEngine.js"; export type SupervisorClientCommonOptions = { apiUrl: string; diff --git a/packages/worker/src/supervisor/util.ts b/packages/core/src/v3/runEngineWorker/supervisor/util.ts similarity index 78% rename from packages/worker/src/supervisor/util.ts rename to packages/core/src/v3/runEngineWorker/supervisor/util.ts index 2ed2b41c8c..131c5e5298 100644 --- a/packages/worker/src/supervisor/util.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/util.ts @@ -1,4 +1,4 @@ -import { HEADER_NAME } from "../consts.js"; +import { WORKER_HEADER_NAME } from "../consts.js"; import { createHeaders } from "../util.js"; import { SupervisorClientCommonOptions } from "./types.js"; @@ -7,9 +7,9 @@ export function getDefaultWorkerHeaders( ): Record { return createHeaders({ Authorization: `Bearer ${options.workerToken}`, - [HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, - [HEADER_NAME.WORKER_DEPLOYMENT_ID]: options.deploymentId, - [HEADER_NAME.WORKER_MANAGED_SECRET]: options.managedWorkerSecret, + [WORKER_HEADER_NAME.WORKER_INSTANCE_NAME]: options.instanceName, + [WORKER_HEADER_NAME.WORKER_DEPLOYMENT_ID]: options.deploymentId, + [WORKER_HEADER_NAME.WORKER_MANAGED_SECRET]: options.managedWorkerSecret, }); } diff --git a/packages/worker/src/types.ts b/packages/core/src/v3/runEngineWorker/types.ts similarity index 100% rename from packages/worker/src/types.ts rename to packages/core/src/v3/runEngineWorker/types.ts diff --git a/packages/worker/src/util.ts b/packages/core/src/v3/runEngineWorker/util.ts similarity index 100% rename from packages/worker/src/util.ts rename to packages/core/src/v3/runEngineWorker/util.ts diff --git a/packages/worker/src/workload/http.ts b/packages/core/src/v3/runEngineWorker/workload/http.ts similarity index 98% rename from packages/worker/src/workload/http.ts rename to packages/core/src/v3/runEngineWorker/workload/http.ts index f64248ba58..5ed550e14d 100644 --- a/packages/worker/src/workload/http.ts +++ b/packages/core/src/v3/runEngineWorker/workload/http.ts @@ -1,5 +1,4 @@ import { z } from "zod"; -import { zodfetch, ApiError } from "@trigger.dev/core/v3/zodfetch"; import { WorkloadHeartbeatRequestBody, WorkloadHeartbeatResponseBody, @@ -14,6 +13,7 @@ import { } from "./schemas.js"; import { WorkloadClientCommonOptions } from "./types.js"; import { getDefaultWorkloadHeaders } from "./util.js"; +import { ApiError, zodfetch } from "../../zodfetch.js"; type WorkloadHttpClientOptions = WorkloadClientCommonOptions; diff --git a/packages/worker/src/workload/schemas.ts b/packages/core/src/v3/runEngineWorker/workload/schemas.ts similarity index 100% rename from packages/worker/src/workload/schemas.ts rename to packages/core/src/v3/runEngineWorker/workload/schemas.ts diff --git a/packages/worker/src/workload/types.ts b/packages/core/src/v3/runEngineWorker/workload/types.ts similarity index 100% rename from packages/worker/src/workload/types.ts rename to packages/core/src/v3/runEngineWorker/workload/types.ts diff --git a/packages/worker/src/workload/util.ts b/packages/core/src/v3/runEngineWorker/workload/util.ts similarity index 100% rename from packages/worker/src/workload/util.ts rename to packages/core/src/v3/runEngineWorker/workload/util.ts diff --git a/packages/core/src/v3/workers/index.ts b/packages/core/src/v3/workers/index.ts index 2912d69387..e97414249d 100644 --- a/packages/core/src/v3/workers/index.ts +++ b/packages/core/src/v3/workers/index.ts @@ -18,3 +18,4 @@ export { StandardMetadataManager } from "../runMetadata/manager.js"; export { StandardWaitUntilManager } from "../waitUntil/manager.js"; export { ManagedRuntimeManager } from "../runtime/managedRuntimeManager.js"; export { UnmanagedRuntimeManager } from "../runtime/unmanagedRuntimeManager.js"; +export * from "../runEngineWorker/index.js"; diff --git a/packages/worker/LICENSE b/packages/worker/LICENSE deleted file mode 100644 index e51e7b10aa..0000000000 --- a/packages/worker/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Trigger.dev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/worker/README.md b/packages/worker/README.md deleted file mode 100644 index 53b5a8a581..0000000000 --- a/packages/worker/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# @trigger.dev/worker - -This package provides shared worker functionality. diff --git a/packages/worker/package.json b/packages/worker/package.json deleted file mode 100644 index 5d23bc63ba..0000000000 --- a/packages/worker/package.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "name": "@trigger.dev/worker", - "version": "3.3.8", - "description": "trigger.dev worker", - "license": "MIT", - "publishConfig": { - "access": "public" - }, - "repository": { - "type": "git", - "url": "https://github.com/triggerdotdev/trigger.dev", - "directory": "packages/worker" - }, - "type": "module", - "files": [ - "dist" - ], - "tshy": { - "selfLink": false, - "main": true, - "module": true, - "project": "./tsconfig.src.json", - "exports": { - "./package.json": "./package.json", - ".": "./src/index.ts" - }, - "sourceDialects": [ - "@triggerdotdev/source" - ] - }, - "scripts": { - "clean": "rimraf dist .tshy .tshy-build .turbo", - "build": "tshy && pnpm run update-version", - "dev": "tshy --watch", - "typecheck": "tsc --noEmit -p tsconfig.src.json", - "update-version": "tsx ../../scripts/updateVersion.ts", - "check-exports": "attw --pack ." - }, - "dependencies": { - "@trigger.dev/core": "workspace:*", - "socket.io": "4.7.4", - "socket.io-client": "4.7.5", - "zod": "3.23.8" - }, - "devDependencies": { - "@arethetypeswrong/cli": "^0.15.4", - "rimraf": "6.0.1", - "tshy": "^3.0.2", - "tsx": "4.17.0" - }, - "engines": { - "node": ">=18.20.0" - }, - "exports": { - "./package.json": "./package.json", - ".": { - "import": { - "@triggerdotdev/source": "./src/index.ts", - "types": "./dist/esm/index.d.ts", - "default": "./dist/esm/index.js" - }, - "require": { - "types": "./dist/commonjs/index.d.ts", - "default": "./dist/commonjs/index.js" - } - } - }, - "main": "./dist/commonjs/index.js", - "types": "./dist/commonjs/index.d.ts", - "module": "./dist/esm/index.js" -} diff --git a/packages/worker/src/version.ts b/packages/worker/src/version.ts deleted file mode 100644 index 2e47a88682..0000000000 --- a/packages/worker/src/version.ts +++ /dev/null @@ -1 +0,0 @@ -export const VERSION = "0.0.0"; diff --git a/packages/worker/tsconfig.json b/packages/worker/tsconfig.json deleted file mode 100644 index 16881b51b6..0000000000 --- a/packages/worker/tsconfig.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "extends": "../../.configs/tsconfig.base.json", - "references": [ - { - "path": "./tsconfig.src.json" - } - ] -} diff --git a/packages/worker/tsconfig.src.json b/packages/worker/tsconfig.src.json deleted file mode 100644 index db06c53317..0000000000 --- a/packages/worker/tsconfig.src.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "extends": "./tsconfig.json", - "include": ["./src/**/*.ts"], - "compilerOptions": { - "isolatedDeclarations": false, - "composite": true, - "sourceMap": true, - "customConditions": ["@triggerdotdev/source"] - } -} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d9b8a8d5e4..f2f1c89034 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -381,9 +381,6 @@ importers: '@trigger.dev/sdk': specifier: workspace:* version: link:../../packages/trigger-sdk - '@trigger.dev/worker': - specifier: workspace:* - version: link:../../packages/worker '@trigger.dev/yalt': specifier: npm:@trigger.dev/yalt version: 2.3.19 @@ -1112,9 +1109,6 @@ importers: '@trigger.dev/core': specifier: workspace:3.3.8 version: link:../core - '@trigger.dev/worker': - specifier: workspace:3.3.8 - version: link:../worker c12: specifier: ^1.11.1 version: 1.11.1(magicast@0.3.4) @@ -1338,6 +1332,9 @@ importers: nanoid: specifier: ^3.3.4 version: 3.3.7 + socket.io: + specifier: 4.7.4 + version: 4.7.4 socket.io-client: specifier: 4.7.5 version: 4.7.5 @@ -1387,9 +1384,6 @@ importers: rimraf: specifier: ^3.0.2 version: 3.0.2 - socket.io: - specifier: 4.7.4 - version: 4.7.4 ts-essentials: specifier: 10.0.1 version: 10.0.1(typescript@5.5.4) @@ -1553,34 +1547,6 @@ importers: specifier: 3.23.8 version: 3.23.8 - packages/worker: - dependencies: - '@trigger.dev/core': - specifier: workspace:* - version: link:../core - socket.io: - specifier: 4.7.4 - version: 4.7.4 - socket.io-client: - specifier: 4.7.5 - version: 4.7.5 - zod: - specifier: 3.23.8 - version: 3.23.8 - devDependencies: - '@arethetypeswrong/cli': - specifier: ^0.15.4 - version: 0.15.4 - rimraf: - specifier: 6.0.1 - version: 6.0.1 - tshy: - specifier: ^3.0.2 - version: 3.0.2 - tsx: - specifier: 4.17.0 - version: 4.17.0 - references/bun-catalog: dependencies: '@trigger.dev/sdk': @@ -15826,6 +15792,7 @@ packages: /@socket.io/component-emitter@3.1.0: resolution: {integrity: sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==} + dev: false /@socket.io/redis-adapter@8.3.0(socket.io-adapter@2.5.4): resolution: {integrity: sha512-ly0cra+48hDmChxmIpnESKrc94LjRL80TEmZVscuQ/WWkRP81nNj8W8cCGMqbI4L6NCuAaPRSzZF1a9GlAxxnA==} @@ -29033,6 +29000,7 @@ packages: - bufferutil - supports-color - utf-8-validate + dev: false /socket.io-client@4.7.3: resolution: {integrity: sha512-nU+ywttCyBitXIl9Xe0RSEfek4LneYkJxCeNnKCuhwoH4jGXO1ipIUw/VA/+Vvv2G1MTym11fzFC0SxkrcfXDw==} @@ -29070,6 +29038,7 @@ packages: debug: 4.3.7 transitivePeerDependencies: - supports-color + dev: false /socket.io@4.7.3: resolution: {integrity: sha512-SE+UIQXBQE+GPG2oszWMlsEmWtHVqw/h1VrYJGK5/MC7CH5p58N448HwIrtREcvR4jfdOJAY4ieQfxMr55qbbw==} @@ -29103,6 +29072,7 @@ packages: - bufferutil - supports-color - utf-8-validate + dev: false /socket.io@4.7.5: resolution: {integrity: sha512-DmeAkF6cwM9jSfmp6Dr/5/mfMwb5Z5qRrSXLpo3Fq5SqyU8CMF15jIN4ZhfSwu35ksM1qmHZDQ/DK5XTccSTvA==} From 74dd91f138cde9d273c61f9ade2353985f18649e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:00:10 +0000 Subject: [PATCH 337/485] fix ReadableStream type error --- .configs/tsconfig.base.json | 2 +- packages/core/src/v3/apiClient/runStream.ts | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.configs/tsconfig.base.json b/.configs/tsconfig.base.json index 7224a9a85d..3ce4c2db29 100644 --- a/.configs/tsconfig.base.json +++ b/.configs/tsconfig.base.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "es2022", - "lib": ["ES2022", "DOM", "DOM.Iterable"], + "lib": ["ES2022", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], "module": "NodeNext", "moduleResolution": "NodeNext", "moduleDetection": "force", diff --git a/packages/core/src/v3/apiClient/runStream.ts b/packages/core/src/v3/apiClient/runStream.ts index f8c56b746d..afa0707bdd 100644 --- a/packages/core/src/v3/apiClient/runStream.ts +++ b/packages/core/src/v3/apiClient/runStream.ts @@ -505,7 +505,6 @@ const isSafari = () => { */ if (isSafari()) { - // @ts-expect-error ReadableStream.prototype.values ??= function ({ preventCancel = false } = {}) { const reader = this.getReader(); return { @@ -521,7 +520,7 @@ if (isSafari()) { throw e; } }, - async return(value: unknown) { + async return(value: any) { if (!preventCancel) { const cancelPromise = reader.cancel(value); reader.releaseLock(); @@ -537,6 +536,5 @@ if (isSafari()) { }; }; - // @ts-expect-error ReadableStream.prototype[Symbol.asyncIterator] ??= ReadableStream.prototype.values; } From 4f8ce0b1e0336b9f71758d7074cd47a74d7dbafd Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:16:00 +0000 Subject: [PATCH 338/485] fix another type issue --- packages/core/src/v3/apiClient/runStream.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/core/src/v3/apiClient/runStream.ts b/packages/core/src/v3/apiClient/runStream.ts index afa0707bdd..c6dcf6912b 100644 --- a/packages/core/src/v3/apiClient/runStream.ts +++ b/packages/core/src/v3/apiClient/runStream.ts @@ -508,19 +508,22 @@ if (isSafari()) { ReadableStream.prototype.values ??= function ({ preventCancel = false } = {}) { const reader = this.getReader(); return { - async next() { + async next(): Promise> { try { const result = await reader.read(); if (result.done) { reader.releaseLock(); } - return result; + return { + done: result.done, + value: result.value, + }; } catch (e) { reader.releaseLock(); throw e; } }, - async return(value: any) { + async return(value: any): Promise> { if (!preventCancel) { const cancelPromise = reader.cancel(value); reader.releaseLock(); From b8cb8dc0ab509ec1244d397954e5339c3503912f Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:29:31 +0000 Subject: [PATCH 339/485] update a few more tsconfigs --- apps/coordinator/tsconfig.json | 4 +--- apps/kubernetes-provider/tsconfig.json | 2 +- apps/webapp/tsconfig.json | 2 +- internal-packages/redis-worker/tsconfig.json | 2 +- internal-packages/run-engine/tsconfig.json | 2 +- internal-packages/testcontainers/tsconfig.json | 2 +- internal-packages/zod-worker/tsconfig.json | 2 +- 7 files changed, 7 insertions(+), 9 deletions(-) diff --git a/apps/coordinator/tsconfig.json b/apps/coordinator/tsconfig.json index 3c03760853..15cdfe9c1e 100644 --- a/apps/coordinator/tsconfig.json +++ b/apps/coordinator/tsconfig.json @@ -1,8 +1,6 @@ { - "include": ["./src/**/*.ts"], - "exclude": ["node_modules"], "compilerOptions": { - "target": "es2016", + "target": "es2018", "module": "commonjs", "esModuleInterop": true, "resolveJsonModule": true, diff --git a/apps/kubernetes-provider/tsconfig.json b/apps/kubernetes-provider/tsconfig.json index 661823ef74..3a866dd2b8 100644 --- a/apps/kubernetes-provider/tsconfig.json +++ b/apps/kubernetes-provider/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es2016", + "target": "es2018", "module": "commonjs", "esModuleInterop": true, "forceConsistentCasingInFileNames": true, diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index bb6a03acc1..7b2bd165ac 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -3,7 +3,7 @@ "include": ["remix.env.d.ts", "global.d.ts", "**/*.ts", "**/*.tsx"], "compilerOptions": { "types": ["vitest/globals"], - "lib": ["DOM", "DOM.Iterable", "ES2019"], + "lib": ["DOM", "DOM.Iterable", "DOM.AsyncIterable", "ES2019"], "isolatedModules": true, "esModuleInterop": true, "jsx": "react-jsx", diff --git a/internal-packages/redis-worker/tsconfig.json b/internal-packages/redis-worker/tsconfig.json index 766df37eae..ff096d3e9f 100644 --- a/internal-packages/redis-worker/tsconfig.json +++ b/internal-packages/redis-worker/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable"], + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], "module": "CommonJS", "moduleResolution": "Node", "moduleDetection": "force", diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json index 0ac9414b19..b09603c6a3 100644 --- a/internal-packages/run-engine/tsconfig.json +++ b/internal-packages/run-engine/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable"], + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], "module": "CommonJS", "moduleResolution": "Node", "moduleDetection": "force", diff --git a/internal-packages/testcontainers/tsconfig.json b/internal-packages/testcontainers/tsconfig.json index 4a36f08ffc..e5cea6ed2d 100644 --- a/internal-packages/testcontainers/tsconfig.json +++ b/internal-packages/testcontainers/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "es2022", - "lib": ["ES2022", "DOM", "DOM.Iterable"], + "lib": ["ES2022", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], "module": "CommonJS", "moduleResolution": "Node", "moduleDetection": "force", diff --git a/internal-packages/zod-worker/tsconfig.json b/internal-packages/zod-worker/tsconfig.json index 66ecfc9677..e43f8c0490 100644 --- a/internal-packages/zod-worker/tsconfig.json +++ b/internal-packages/zod-worker/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable"], + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], "module": "CommonJS", "moduleResolution": "Node", "moduleDetection": "force", From b12b61749f3cdf1500ac1cefa9b549bd7fc52cb4 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Wed, 8 Jan 2025 15:43:10 +0000 Subject: [PATCH 340/485] add metadata changes introduced in #1563 --- .../app/models/runtimeEnvironment.server.ts | 39 +++++++++++++-- .../routeBuilders/apiBuilder.server.ts | 49 +++++++++++++++++++ .../webapp/app/v3/runEngineHandlers.server.ts | 27 ++++++++++ .../app/v3/services/completeAttempt.server.ts | 1 - .../run-engine/src/engine/eventBus.ts | 11 ++++- .../run-engine/src/engine/index.ts | 10 ++++ .../src/entryPoints/managed-run-worker.ts | 7 +++ .../src/entryPoints/unmanaged-run-worker.ts | 7 +++ 8 files changed, 145 insertions(+), 6 deletions(-) diff --git a/apps/webapp/app/models/runtimeEnvironment.server.ts b/apps/webapp/app/models/runtimeEnvironment.server.ts index 868a39bf11..6ada43d170 100644 --- a/apps/webapp/app/models/runtimeEnvironment.server.ts +++ b/apps/webapp/app/models/runtimeEnvironment.server.ts @@ -1,10 +1,13 @@ -import type { Prisma, RuntimeEnvironment } from "@trigger.dev/database"; +import { AuthenticatedEnvironment } from "@internal/testcontainers"; +import type { Prisma, PrismaClientOrTransaction, RuntimeEnvironment } from "@trigger.dev/database"; import { prisma } from "~/db.server"; import { getUsername } from "~/utils/username"; export type { RuntimeEnvironment }; -export async function findEnvironmentByApiKey(apiKey: string) { +export async function findEnvironmentByApiKey( + apiKey: string +): Promise { const environment = await prisma.runtimeEnvironment.findUnique({ where: { apiKey, @@ -24,7 +27,9 @@ export async function findEnvironmentByApiKey(apiKey: string) { return environment; } -export async function findEnvironmentByPublicApiKey(apiKey: string) { +export async function findEnvironmentByPublicApiKey( + apiKey: string +): Promise { const environment = await prisma.runtimeEnvironment.findUnique({ where: { pkApiKey: apiKey, @@ -44,7 +49,7 @@ export async function findEnvironmentByPublicApiKey(apiKey: string) { return environment; } -export async function findEnvironmentById(id: string) { +export async function findEnvironmentById(id: string): Promise { const environment = await prisma.runtimeEnvironment.findUnique({ where: { id, @@ -64,6 +69,32 @@ export async function findEnvironmentById(id: string) { return environment; } +export async function findEnvironmentFromRun( + runId: string, + tx?: PrismaClientOrTransaction +): Promise { + const taskRun = await (tx ?? prisma).taskRun.findUnique({ + where: { + id: runId, + }, + include: { + runtimeEnvironment: { + include: { + project: true, + organization: true, + orgMember: true, + }, + }, + }, + }); + + if (!taskRun) { + return null; + } + + return taskRun?.runtimeEnvironment; +} + export async function createNewSession(environment: RuntimeEnvironment, ipAddress: string) { const session = await prisma.runtimeEnvironmentSession.create({ data: { diff --git a/apps/webapp/app/services/routeBuilders/apiBuilder.server.ts b/apps/webapp/app/services/routeBuilders/apiBuilder.server.ts index 28b9c66bf0..8ce3728727 100644 --- a/apps/webapp/app/services/routeBuilders/apiBuilder.server.ts +++ b/apps/webapp/app/services/routeBuilders/apiBuilder.server.ts @@ -230,6 +230,19 @@ export function createLoaderApiRoute< if (error instanceof Response) { return await wrapResponse(request, error, corsStrategy !== "none"); } + + logger.error("Error in loader", { + error: + error instanceof Error + ? { + name: error.name, + message: error.message, + stack: error.stack, + } + : String(error), + url: request.url, + }); + return await wrapResponse( request, json({ error: "Internal Server Error" }, { status: 500 }), @@ -770,6 +783,19 @@ export function createLoaderWorkerApiRoute< if (error instanceof Response) { return error; } + + logger.error("Error in loader", { + error: + error instanceof Error + ? { + name: error.name, + message: error.message, + stack: error.stack, + } + : String(error), + url: request.url, + }); + return json({ error: "Internal Server Error" }, { status: 500 }); } }; @@ -785,6 +811,7 @@ type WorkerActionRouteBuilderOptions< searchParams?: TSearchParamsSchema; headers?: THeadersSchema; body?: TBodySchema; + method?: "POST" | "PUT" | "DELETE" | "PATCH"; }; type WorkerActionHandlerFunction< @@ -823,6 +850,15 @@ export function createActionWorkerApiRoute< > ) { return async function action({ request, params }: ActionFunctionArgs) { + if (options.method) { + if (request.method.toUpperCase() !== options.method) { + return json( + { error: "Method not allowed" }, + { status: 405, headers: { Allow: options.method } } + ); + } + } + const { params: paramsSchema, searchParams: searchParamsSchema, @@ -903,6 +939,19 @@ export function createActionWorkerApiRoute< if (error instanceof Response) { return error; } + + logger.error("Error in action", { + error: + error instanceof Error + ? { + name: error.name, + message: error.message, + stack: error.stack, + } + : String(error), + url: request.url, + }); + return json({ error: "Internal Server Error" }, { status: 500 }); } }; diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 952f72847c..bb1e239be0 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -9,6 +9,8 @@ import { roomFromFriendlyRunId, socketIo } from "./handleSocketIo.server"; import { engine } from "./runEngine.server"; import { PerformTaskRunAlertsService } from "./services/alerts/performTaskRunAlerts.server"; import { RunId } from "@trigger.dev/core/v3/apps"; +import { updateMetadataService } from "~/services/metadata/updateMetadata.server"; +import { findEnvironmentFromRun } from "~/models/runtimeEnvironment.server"; export function registerRunEngineEventBusHandlers() { engine.eventBus.on("runSucceeded", async ({ time, run }) => { @@ -235,6 +237,31 @@ export function registerRunEngineEventBusHandlers() { } }); + engine.eventBus.on("runMetadataUpdated", async ({ time, run }) => { + const env = await findEnvironmentFromRun(run.id); + + if (!env) { + logger.error("[runMetadataUpdated] Failed to find environment", { runId: run.id }); + return; + } + + try { + await updateMetadataService.call(env, run.id, run.metadata); + } catch (e) { + logger.error("[runMetadataUpdated] Failed to update metadata", { + taskRun: run.id, + error: + e instanceof Error + ? { + name: e.name, + message: e.message, + stack: e.stack, + } + : e, + }); + } + }); + engine.eventBus.on("executionSnapshotCreated", async ({ time, run, snapshot }) => { try { const foundRun = await prisma.taskRun.findUnique({ diff --git a/apps/webapp/app/v3/services/completeAttempt.server.ts b/apps/webapp/app/v3/services/completeAttempt.server.ts index 502fc71eea..866175fa83 100644 --- a/apps/webapp/app/v3/services/completeAttempt.server.ts +++ b/apps/webapp/app/v3/services/completeAttempt.server.ts @@ -28,7 +28,6 @@ import { CancelAttemptService } from "./cancelAttempt.server"; import { CreateCheckpointService } from "./createCheckpoint.server"; import { FinalizeTaskRunService } from "./finalizeTaskRun.server"; import { RetryAttemptService } from "./retryAttempt.server"; -import { updateMetadataService } from "~/services/metadata/updateMetadata.server"; type FoundAttempt = Awaited>; diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 7c54638935..b36e886694 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,6 +1,6 @@ import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; import { AuthenticatedEnvironment } from "../shared"; -import { TaskRunError } from "@trigger.dev/core/v3"; +import { FlushedRunMetadata, TaskRunError } from "@trigger.dev/core/v3"; export type EventBusEvents = { runAttemptStarted: [ @@ -79,6 +79,15 @@ export type EventBusEvents = { }; }, ]; + runMetadataUpdated: [ + { + time: Date; + run: { + id: string; + metadata: FlushedRunMetadata; + }; + }, + ]; workerNotification: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2cca839d54..8cdfb07e11 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1185,6 +1185,16 @@ export class RunEngine { snapshotId: string; completion: TaskRunExecutionResult; }): Promise { + if (completion.metadata) { + this.eventBus.emit("runMetadataUpdated", { + time: new Date(), + run: { + id: runId, + metadata: completion.metadata, + }, + }); + } + switch (completion.ok) { case true: { return this.#attemptSucceeded({ runId, snapshotId, completion, tx: this.prisma }); diff --git a/packages/cli-v3/src/entryPoints/managed-run-worker.ts b/packages/cli-v3/src/entryPoints/managed-run-worker.ts index e1c38c836d..e1355e19b7 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-worker.ts @@ -217,6 +217,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -247,6 +248,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -278,6 +280,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -304,6 +307,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -356,6 +360,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: usageSample.cpuTime, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } @@ -379,6 +384,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: usageSample.cpuTime, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } @@ -401,6 +407,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } diff --git a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts index c2f03fb052..f6fc15b887 100644 --- a/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts +++ b/packages/cli-v3/src/entryPoints/unmanaged-run-worker.ts @@ -217,6 +217,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -247,6 +248,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -278,6 +280,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -304,6 +307,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); @@ -356,6 +360,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: usageSample.cpuTime, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } @@ -379,6 +384,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: usageSample.cpuTime, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } @@ -401,6 +407,7 @@ const zodIpc = new ZodIpcConnection({ usage: { durationMs: 0, }, + metadata: runMetadataManager.stopAndReturnLastFlush(), }, }); } From c8b835aa3b36d555c4aea7869528e686a7f96586 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 15 Jan 2025 12:49:59 +0000 Subject: [PATCH 341/485] Run Engine 2.0 trigger idempotency (#1613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Return isCached from the trigger API endpoint * Fix for the wrong type when blocking a run * Render the idempotent run in the inspector * Event repository for idempotency * Debug events off by default, added an admin toggle to show them * triggerAndWait idempotency span * Some improvements to the reference idempotency task * Removed the cached tracing from the SDK * Server-side creating cached span * Improved idempotency test task * Create cached task spans in a better way * Idempotency span support inc batch trigger * Simplified how the spans are done, using more of the existing code * Improved the idempotency test task * Added Waitpoint Batch type, add to TaskRunWaitpoint with order * Pass batch ids through to the run engine when triggering * Added batchIndex * Better batch support in the run engine * Added settings to batch trigger service, before major overhaul * Allow the longer run/batch ids in the filters * Changed how batching works, includes breaking changes in CLI * Removed batch idempotency because it gets put on the runs instead * Added `runs` to the batch.retrieve call/API * Set firstAttemptStartedAt when creating the first attempt * Do nothing when receiving a BATCH waitpoint * Some fixes in the new batch trigger service… mostly just passing missing optional params through * Tweaked the idempotency test task for more situations * Only block with a batch if it’s a batchTriggerAndWait… 🤦‍♂️ * Added another case to the idempotency test task: multiple of the same idempotencyKey in a single batch * Support for the same run multiple times in the same batch * Small tweaks * Make sure to complete batches, even if they’re not andWait ones * Export RunDuplicateIdempotencyKeyError from the run engine --- .../app/assets/icons/TaskCachedIcon.tsx | 49 ++ .../app/components/runs/v3/BatchFilters.tsx | 4 +- .../app/components/runs/v3/RunFilters.tsx | 4 +- .../webapp/app/components/runs/v3/RunIcon.tsx | 3 + .../app/presenters/v3/SpanPresenter.server.ts | 65 ++- .../routes/api.v1.tasks.$taskId.trigger.ts | 1 + apps/webapp/app/routes/api.v1.tasks.batch.ts | 12 +- .../app/routes/api.v2.batches.$batchId.ts | 40 ++ apps/webapp/app/routes/api.v2.tasks.batch.ts | 154 +++++++ .../route.tsx | 34 +- apps/webapp/app/v3/eventRepository.server.ts | 43 +- .../webapp/app/v3/runEngineHandlers.server.ts | 23 + .../app/v3/services/batchTriggerV3.server.ts | 419 ++++-------------- .../app/v3/services/triggerTask.server.ts | 9 +- .../app/v3/services/triggerTaskV2.server.ts | 114 +++-- .../migration.sql | 2 + .../migration.sql | 13 + .../migration.sql | 8 + .../migration.sql | 2 + .../migration.sql | 14 + .../database/prisma/schema.prisma | 36 +- .../run-engine/src/engine/eventBus.ts | 7 + .../src/engine/executionSnapshots.ts | 39 +- .../run-engine/src/engine/index.ts | 365 +++++++++++---- .../src/engine/tests/batchTrigger.test.ts | 4 +- .../engine/tests/batchTriggerAndWait.test.ts | 363 +++++++++++++++ .../src/engine/tests/triggerAndWait.test.ts | 2 +- .../src/engine/tests/waitpoints.test.ts | 6 +- .../run-engine/src/engine/types.ts | 5 +- internal-packages/run-engine/src/index.ts | 2 +- packages/core/src/v3/apiClient/index.ts | 26 +- packages/core/src/v3/idempotencyKeys.ts | 22 + .../core/src/v3/runtime/devRuntimeManager.ts | 49 +- packages/core/src/v3/runtime/index.ts | 2 +- .../src/v3/runtime/managedRuntimeManager.ts | 32 +- packages/core/src/v3/runtime/manager.ts | 2 +- .../core/src/v3/runtime/noopRuntimeManager.ts | 2 +- .../core/src/v3/runtime/prodRuntimeManager.ts | 46 +- .../src/v3/runtime/unmanagedRuntimeManager.ts | 40 +- packages/core/src/v3/schemas/api.ts | 36 ++ packages/core/src/v3/schemas/runEngine.ts | 22 + .../core/src/v3/semanticInternalAttributes.ts | 1 + packages/core/src/v3/types/tasks.ts | 20 +- packages/trigger-sdk/src/v3/shared.ts | 193 ++++---- references/hello-world/src/trigger/example.ts | 5 +- .../hello-world/src/trigger/idempotency.ts | 322 +++++++++++--- 46 files changed, 1871 insertions(+), 791 deletions(-) create mode 100644 apps/webapp/app/assets/icons/TaskCachedIcon.tsx create mode 100644 apps/webapp/app/routes/api.v2.batches.$batchId.ts create mode 100644 apps/webapp/app/routes/api.v2.tasks.batch.ts create mode 100644 internal-packages/database/prisma/migrations/20250106172943_added_span_id_to_complete_to_task_run_waitpoint/migration.sql create mode 100644 internal-packages/database/prisma/migrations/20250109131442_added_batch_and_index_to_task_run_waitpoint_and_task_run_execution_snapshot/migration.sql create mode 100644 internal-packages/database/prisma/migrations/20250109173506_waitpoint_added_batch_type/migration.sql create mode 100644 internal-packages/database/prisma/migrations/20250109175955_waitpoint_added_completed_by_batch_id_index/migration.sql create mode 100644 internal-packages/database/prisma/migrations/20250114153223_task_run_waitpoint_unique_constraint_added_batch_index/migration.sql create mode 100644 internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts diff --git a/apps/webapp/app/assets/icons/TaskCachedIcon.tsx b/apps/webapp/app/assets/icons/TaskCachedIcon.tsx new file mode 100644 index 0000000000..650f9be396 --- /dev/null +++ b/apps/webapp/app/assets/icons/TaskCachedIcon.tsx @@ -0,0 +1,49 @@ +export function TaskCachedIcon({ className }: { className?: string }) { + return ( + + + + + + + + + + + + + + + + + + + ); +} diff --git a/apps/webapp/app/components/runs/v3/BatchFilters.tsx b/apps/webapp/app/components/runs/v3/BatchFilters.tsx index f7ab261d4e..69c342fe67 100644 --- a/apps/webapp/app/components/runs/v3/BatchFilters.tsx +++ b/apps/webapp/app/components/runs/v3/BatchFilters.tsx @@ -359,8 +359,8 @@ function BatchIdDropdown({ if (batchId) { if (!batchId.startsWith("batch_")) { error = "Batch IDs start with 'batch_'"; - } else if (batchId.length !== 27) { - error = "Batch IDs are 27 characters long"; + } else if (batchId.length !== 27 && batchId.length !== 31) { + error = "Batch IDs are 27/32 characters long"; } } diff --git a/apps/webapp/app/components/runs/v3/RunFilters.tsx b/apps/webapp/app/components/runs/v3/RunFilters.tsx index 6cc75a6129..0937ccf945 100644 --- a/apps/webapp/app/components/runs/v3/RunFilters.tsx +++ b/apps/webapp/app/components/runs/v3/RunFilters.tsx @@ -763,8 +763,8 @@ function RunIdDropdown({ if (runId) { if (!runId.startsWith("run_")) { error = "Run IDs start with 'run_'"; - } else if (runId.length !== 25) { - error = "Run IDs are 25 characters long"; + } else if (runId.length !== 25 && runId.length !== 29) { + error = "Run IDs are 25/30 characters long"; } } diff --git a/apps/webapp/app/components/runs/v3/RunIcon.tsx b/apps/webapp/app/components/runs/v3/RunIcon.tsx index 41e442bf72..84c386706d 100644 --- a/apps/webapp/app/components/runs/v3/RunIcon.tsx +++ b/apps/webapp/app/components/runs/v3/RunIcon.tsx @@ -7,6 +7,7 @@ import { } from "@heroicons/react/20/solid"; import { AttemptIcon } from "~/assets/icons/AttemptIcon"; import { TaskIcon } from "~/assets/icons/TaskIcon"; +import { TaskCachedIcon } from "~/assets/icons/TaskCachedIcon"; import { NamedIcon } from "~/components/primitives/NamedIcon"; import { cn } from "~/utils/cn"; @@ -41,6 +42,8 @@ export function RunIcon({ name, className, spanName }: TaskIconProps) { switch (name) { case "task": return ; + case "task-cached": + return ; case "scheduled": return ; case "attempt": diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index c8a9b50529..ae902d1317 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -2,6 +2,7 @@ import { MachinePresetName, parsePacket, prettyPrintPacket, + SemanticInternalAttributes, TaskRunError, } from "@trigger.dev/core/v3"; import { RUNNING_STATUSES } from "~/components/runs/v3/TaskRunStatus"; @@ -39,7 +40,22 @@ export class SpanPresenter extends BasePresenter { throw new Error("Project not found"); } - const run = await this.getRun(spanId); + const parentRun = await this._prisma.taskRun.findFirst({ + select: { + traceId: true, + }, + where: { + friendlyId: runFriendlyId, + }, + }); + + if (!parentRun) { + return; + } + + const { traceId } = parentRun; + + const run = await this.getRun(traceId, spanId); if (run) { return { type: "run" as const, @@ -48,7 +64,7 @@ export class SpanPresenter extends BasePresenter { } //get the run - const span = await this.getSpan(runFriendlyId, spanId); + const span = await this.getSpan(traceId, spanId); if (!span) { throw new Error("Span not found"); @@ -60,10 +76,17 @@ export class SpanPresenter extends BasePresenter { }; } - async getRun(spanId: string) { + async getRun(traceId: string, spanId: string) { + const span = await eventRepository.getSpan(spanId, traceId); + + if (!span) { + return; + } + const run = await this._replica.taskRun.findFirst({ select: { id: true, + spanId: true, traceId: true, //metadata number: true, @@ -92,6 +115,7 @@ export class SpanPresenter extends BasePresenter { //status + duration status: true, startedAt: true, + firstAttemptStartedAt: true, createdAt: true, updatedAt: true, queuedAt: true, @@ -99,6 +123,7 @@ export class SpanPresenter extends BasePresenter { logsDeletedAt: true, //idempotency idempotencyKey: true, + idempotencyKeyExpiresAt: true, //delayed delayUntil: true, //ttl @@ -161,9 +186,13 @@ export class SpanPresenter extends BasePresenter { }, }, }, - where: { - spanId, - }, + where: span.originalRun + ? { + friendlyId: span.originalRun, + } + : { + spanId, + }, }); if (!run) { @@ -238,8 +267,6 @@ export class SpanPresenter extends BasePresenter { } } - const span = await eventRepository.getSpan(spanId, run.traceId); - const metadata = run.metadata ? await prettyPrintPacket(run.metadata, run.metadataType, { filteredKeys: ["$$streams", "$$streamsVersion", "$$streamsBaseUrl"], @@ -296,6 +323,7 @@ export class SpanPresenter extends BasePresenter { status: run.status, createdAt: run.createdAt, startedAt: run.startedAt, + firstAttemptStartedAt: run.firstAttemptStartedAt, updatedAt: run.updatedAt, delayUntil: run.delayUntil, expiredAt: run.expiredAt, @@ -307,6 +335,8 @@ export class SpanPresenter extends BasePresenter { sdkVersion: run.lockedToVersion?.sdkVersion, isTest: run.isTest, environmentId: run.runtimeEnvironment.id, + idempotencyKey: run.idempotencyKey, + idempotencyKeyExpiresAt: run.idempotencyKeyExpiresAt, schedule: run.schedule ? { friendlyId: run.schedule.friendlyId, @@ -349,24 +379,13 @@ export class SpanPresenter extends BasePresenter { engine: run.engine, masterQueue: run.masterQueue, secondaryMasterQueue: run.secondaryMasterQueue, + spanId: run.spanId, + isCached: !!span.originalRun, }; } - async getSpan(runFriendlyId: string, spanId: string) { - const run = await this._prisma.taskRun.findFirst({ - select: { - traceId: true, - }, - where: { - friendlyId: runFriendlyId, - }, - }); - - if (!run) { - return; - } - - const span = await eventRepository.getSpan(spanId, run.traceId); + async getSpan(traceId: string, spanId: string) { + const span = await eventRepository.getSpan(spanId, traceId); if (!span) { return; diff --git a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts index 96eec3ba31..2bb709080f 100644 --- a/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts +++ b/apps/webapp/app/routes/api.v1.tasks.$taskId.trigger.ts @@ -108,6 +108,7 @@ const { action, loader } = createActionApiRoute( return json( { id: run.friendlyId, + isCached: run.isCached, }, { headers: $responseHeaders, diff --git a/apps/webapp/app/routes/api.v1.tasks.batch.ts b/apps/webapp/app/routes/api.v1.tasks.batch.ts index 591d04f0ce..c989e950be 100644 --- a/apps/webapp/app/routes/api.v1.tasks.batch.ts +++ b/apps/webapp/app/routes/api.v1.tasks.batch.ts @@ -9,13 +9,11 @@ import { AuthenticatedEnvironment, getOneTimeUseToken } from "~/services/apiAuth import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; -import { determineEngineVersion } from "~/v3/engineVersion.server"; import { ServiceValidationError } from "~/v3/services/baseService.server"; import { BatchProcessingStrategy, BatchTriggerV2Service, } from "~/v3/services/batchTriggerV2.server"; -import { BatchTriggerV3Service } from "~/v3/services/batchTriggerV3.server"; import { OutOfEntitlementError } from "~/v3/services/triggerTask.server"; import { HeadersSchema } from "./api.v1.tasks.$taskId.trigger"; @@ -88,15 +86,7 @@ const { action, loader } = createActionApiRoute( resolveIdempotencyKeyTTL(idempotencyKeyTTL) ?? new Date(Date.now() + 24 * 60 * 60 * 1000 * 30); - const version = await determineEngineVersion({ - environment: authentication.environment, - version: engineVersion ?? undefined, - }); - - const service = - version === "V1" - ? new BatchTriggerV2Service(batchProcessingStrategy ?? undefined) - : new BatchTriggerV3Service(batchProcessingStrategy ?? undefined); + const service = new BatchTriggerV2Service(batchProcessingStrategy ?? undefined); try { const batch = await service.call(authentication.environment, body, { diff --git a/apps/webapp/app/routes/api.v2.batches.$batchId.ts b/apps/webapp/app/routes/api.v2.batches.$batchId.ts new file mode 100644 index 0000000000..150978331e --- /dev/null +++ b/apps/webapp/app/routes/api.v2.batches.$batchId.ts @@ -0,0 +1,40 @@ +import { json } from "@remix-run/server-runtime"; +import { z } from "zod"; +import { $replica } from "~/db.server"; +import { createLoaderApiRoute } from "~/services/routeBuilders/apiBuilder.server"; + +const ParamsSchema = z.object({ + batchId: z.string(), +}); + +export const loader = createLoaderApiRoute( + { + params: ParamsSchema, + allowJWT: true, + corsStrategy: "all", + findResource: (params, auth) => { + return $replica.batchTaskRun.findFirst({ + where: { + friendlyId: params.batchId, + runtimeEnvironmentId: auth.environment.id, + }, + }); + }, + authorization: { + action: "read", + resource: (batch) => ({ batch: batch.friendlyId }), + superScopes: ["read:runs", "read:all", "admin"], + }, + }, + async ({ resource: batch }) => { + return json({ + id: batch.friendlyId, + status: batch.status, + idempotencyKey: batch.idempotencyKey ?? undefined, + createdAt: batch.createdAt, + updatedAt: batch.updatedAt, + runCount: batch.runCount, + runs: batch.runIds, + }); + } +); diff --git a/apps/webapp/app/routes/api.v2.tasks.batch.ts b/apps/webapp/app/routes/api.v2.tasks.batch.ts new file mode 100644 index 0000000000..72f271e9cf --- /dev/null +++ b/apps/webapp/app/routes/api.v2.tasks.batch.ts @@ -0,0 +1,154 @@ +import { json } from "@remix-run/server-runtime"; +import { + BatchTriggerTaskV3RequestBody, + BatchTriggerTaskV3Response, + generateJWT, +} from "@trigger.dev/core/v3"; +import { env } from "~/env.server"; +import { AuthenticatedEnvironment, getOneTimeUseToken } from "~/services/apiAuth.server"; +import { logger } from "~/services/logger.server"; +import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; +import { ServiceValidationError } from "~/v3/services/baseService.server"; +import { + BatchProcessingStrategy, + BatchTriggerV3Service, +} from "~/v3/services/batchTriggerV3.server"; +import { OutOfEntitlementError } from "~/v3/services/triggerTask.server"; +import { HeadersSchema } from "./api.v1.tasks.$taskId.trigger"; + +const { action, loader } = createActionApiRoute( + { + headers: HeadersSchema.extend({ + "batch-processing-strategy": BatchProcessingStrategy.nullish(), + }), + body: BatchTriggerTaskV3RequestBody, + allowJWT: true, + maxContentLength: env.BATCH_TASK_PAYLOAD_MAXIMUM_SIZE, + authorization: { + action: "batchTrigger", + resource: (_, __, ___, body) => ({ + tasks: Array.from(new Set(body.items.map((i) => i.task))), + }), + superScopes: ["write:tasks", "admin"], + }, + corsStrategy: "all", + }, + async ({ body, headers, params, authentication }) => { + if (!body.items.length) { + return json({ error: "Batch cannot be triggered with no items" }, { status: 400 }); + } + + // Check the there are fewer than MAX_BATCH_V2_TRIGGER_ITEMS items + if (body.items.length > env.MAX_BATCH_V2_TRIGGER_ITEMS) { + return json( + { + error: `Batch size of ${body.items.length} is too large. Maximum allowed batch size is ${env.MAX_BATCH_V2_TRIGGER_ITEMS}.`, + }, + { status: 400 } + ); + } + + const { + "trigger-version": triggerVersion, + "x-trigger-span-parent-as-link": spanParentAsLink, + "x-trigger-worker": isFromWorker, + "x-trigger-client": triggerClient, + "x-trigger-engine-version": engineVersion, + "batch-processing-strategy": batchProcessingStrategy, + traceparent, + tracestate, + } = headers; + + const oneTimeUseToken = await getOneTimeUseToken(authentication); + + logger.debug("Batch trigger request", { + triggerVersion, + spanParentAsLink, + isFromWorker, + triggerClient, + traceparent, + tracestate, + batchProcessingStrategy, + }); + + const traceContext = + traceparent && isFromWorker // If the request is from a worker, we should pass the trace context + ? { traceparent, tracestate } + : undefined; + + const service = new BatchTriggerV3Service(batchProcessingStrategy ?? undefined); + + try { + const batch = await service.call(authentication.environment, body, { + triggerVersion: triggerVersion ?? undefined, + traceContext, + spanParentAsLink: spanParentAsLink === 1, + oneTimeUseToken, + }); + + const $responseHeaders = await responseHeaders( + batch, + authentication.environment, + triggerClient + ); + + return json(batch, { status: 202, headers: $responseHeaders }); + } catch (error) { + logger.error("Batch trigger error", { + error: { + message: (error as Error).message, + stack: (error as Error).stack, + }, + }); + + if (error instanceof ServiceValidationError) { + return json({ error: error.message }, { status: 422 }); + } else if (error instanceof OutOfEntitlementError) { + return json({ error: error.message }, { status: 422 }); + } else if (error instanceof Error) { + return json( + { error: error.message }, + { status: 500, headers: { "x-should-retry": "false" } } + ); + } + + return json({ error: "Something went wrong" }, { status: 500 }); + } + } +); + +async function responseHeaders( + batch: BatchTriggerTaskV3Response, + environment: AuthenticatedEnvironment, + triggerClient?: string | null +): Promise> { + const claimsHeader = JSON.stringify({ + sub: environment.id, + pub: true, + }); + + if (triggerClient === "browser") { + const claims = { + sub: environment.id, + pub: true, + scopes: [`read:batch:${batch.id}`], + }; + + const jwt = await generateJWT({ + secretKey: environment.apiKey, + payload: claims, + expirationTime: "1h", + }); + + return { + "x-trigger-jwt-claims": claimsHeader, + "x-trigger-jwt": jwt, + }; + } + + return { + "x-trigger-jwt-claims": claimsHeader, + }; +} + +export { action, loader }; diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 1594e82cfd..f1fdd77ea2 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -44,7 +44,6 @@ import { RunTag } from "~/components/runs/v3/RunTag"; import { SpanEvents } from "~/components/runs/v3/SpanEvents"; import { SpanTitle } from "~/components/runs/v3/SpanTitle"; import { TaskRunAttemptStatusCombo } from "~/components/runs/v3/TaskRunAttemptStatus"; -import { TaskRunsTable } from "~/components/runs/v3/TaskRunsTable"; import { TaskRunStatusCombo } from "~/components/runs/v3/TaskRunStatus"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; @@ -58,7 +57,6 @@ import { cn } from "~/utils/cn"; import { formatCurrencyAccurate } from "~/utils/numberFormatter"; import { v3BatchPath, - v3BatchRunsPath, v3RunDownloadLogsPath, v3RunPath, v3RunSpanPath, @@ -427,12 +425,15 @@ function RunBody({
- {run.taskIdentifier} + + {run.taskIdentifier} + {run.isCached ? " (cached)" : null} +
{runParam && closePanel && ( @@ -602,6 +603,22 @@ function RunBody({ )} + + Idempotency + +
{run.idempotencyKey ? run.idempotencyKey : "–"}
+ {run.idempotencyKey && ( +
+ Expires:{" "} + {run.idempotencyKeyExpiresAt ? ( + + ) : ( + "–" + )} +
+ )} +
+
Version @@ -804,12 +821,17 @@ function RunBody({
{run.friendlyId !== runParam && ( - Focus on run + {run.isCached ? "Jump to original run" : "Focus on run"} )}
diff --git a/apps/webapp/app/v3/eventRepository.server.ts b/apps/webapp/app/v3/eventRepository.server.ts index 5cf7b5313d..42ef5cc28f 100644 --- a/apps/webapp/app/v3/eventRepository.server.ts +++ b/apps/webapp/app/v3/eventRepository.server.ts @@ -1,4 +1,4 @@ -import { Attributes, Link, TraceFlags } from "@opentelemetry/api"; +import { Attributes, AttributeValue, Link, TraceFlags } from "@opentelemetry/api"; import { RandomIdGenerator } from "@opentelemetry/sdk-trace-base"; import { SemanticResourceAttributes } from "@opentelemetry/semantic-conventions"; import { @@ -603,6 +603,11 @@ export class EventRepository { spanEvent.environmentType === "DEVELOPMENT" ); + const originalRun = rehydrateAttribute( + spanEvent.properties, + SemanticInternalAttributes.ORIGINAL_RUN_ID + ); + return { ...spanEvent, ...span.data, @@ -612,6 +617,7 @@ export class EventRepository { events: spanEvents, show, links, + originalRun, }; }); } @@ -754,7 +760,10 @@ export class EventRepository { }); } - public async recordEvent(message: string, options: TraceEventOptions & { duration?: number }) { + public async recordEvent( + message: string, + options: TraceEventOptions & { duration?: number; parentId?: string } + ) { const propagatedContext = extractContextFromCarrier(options.context ?? {}); const startTime = options.startTime ?? getNowInNanoseconds(); @@ -763,7 +772,7 @@ export class EventRepository { (options.endTime ? calculateDurationFromStart(startTime, options.endTime) : 100); const traceId = propagatedContext?.traceparent?.traceId ?? this.generateTraceId(); - const parentId = propagatedContext?.traceparent?.spanId; + const parentId = options.parentId ?? propagatedContext?.traceparent?.spanId; const tracestate = propagatedContext?.tracestate; const spanId = options.spanIdSeed ? this.#generateDeterministicSpanId(traceId, options.spanIdSeed) @@ -847,7 +856,7 @@ export class EventRepository { public async traceEvent( message: string, - options: TraceEventOptions & { incomplete?: boolean }, + options: TraceEventOptions & { incomplete?: boolean; isError?: boolean }, callback: ( e: EventBuilder, traceContext: Record, @@ -944,6 +953,7 @@ export class EventRepository { tracestate, duration: options.incomplete ? 0 : duration, isPartial: options.incomplete, + isError: options.isError, message: message, serviceName: "api server", serviceNamespace: "trigger.dev", @@ -1223,7 +1233,7 @@ function excludePartialEventsWithCorrespondingFullEvent(batch: CreatableEvent[]) ); } -function extractContextFromCarrier(carrier: Record) { +export function extractContextFromCarrier(carrier: Record) { const traceparent = carrier["traceparent"]; const tracestate = carrier["tracestate"]; @@ -1550,3 +1560,26 @@ function rehydrateShow(properties: Prisma.JsonValue): { actions?: boolean } | un return; } + +function rehydrateAttribute( + properties: Prisma.JsonValue, + key: string +): T | undefined { + if (properties === null || properties === undefined) { + return; + } + + if (typeof properties !== "object") { + return; + } + + if (Array.isArray(properties)) { + return; + } + + const value = properties[key]; + + if (!value) return; + + return value as T; +} diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index bb1e239be0..0a42a7351c 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -138,6 +138,29 @@ export function registerRunEngineEventBusHandlers() { } }); + engine.eventBus.on("cachedRunCompleted", async ({ time, spanId, hasError }) => { + try { + const completedEvent = await eventRepository.completeEvent(spanId, { + endTime: time, + attributes: { + isError: hasError, + }, + }); + + if (!completedEvent) { + logger.error("[cachedRunCompleted] Failed to complete event for unknown reason", { + spanId, + }); + return; + } + } catch (error) { + logger.error("[cachedRunCompleted] Failed to complete event for unknown reason", { + error: error instanceof Error ? error.message : error, + spanId, + }); + } + }); + engine.eventBus.on("runExpired", async ({ time, run }) => { try { const completedEvent = await eventRepository.completeEvent(run.spanId, { diff --git a/apps/webapp/app/v3/services/batchTriggerV3.server.ts b/apps/webapp/app/v3/services/batchTriggerV3.server.ts index 66c259f83a..b0bf3529fb 100644 --- a/apps/webapp/app/v3/services/batchTriggerV3.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerV3.server.ts @@ -1,6 +1,7 @@ import { BatchTriggerTaskV2RequestBody, - BatchTriggerTaskV2Response, + BatchTriggerTaskV3RequestBody, + BatchTriggerTaskV3Response, IOPacket, packetRequiresOffloading, parsePacket, @@ -18,7 +19,6 @@ import { downloadPacketFromObjectStore, uploadPacketToObjectStore } from "../r2. import { startActiveSpan } from "../tracer.server"; import { ServiceValidationError, WithRunEngine } from "./baseService.server"; import { OutOfEntitlementError, TriggerTaskService } from "./triggerTask.server"; -import { guardQueueSizeLimitsForEnv } from "./triggerTaskV2.server"; const PROCESSING_BATCH_SIZE = 50; const ASYNC_BATCH_PROCESS_SIZE_THRESHOLD = 20; @@ -40,8 +40,6 @@ export const BatchProcessingOptions = z.object({ export type BatchProcessingOptions = z.infer; export type BatchTriggerTaskServiceOptions = { - idempotencyKey?: string; - idempotencyKeyExpiresAt?: Date; triggerVersion?: string; traceContext?: Record; spanParentAsLink?: boolean; @@ -65,59 +63,14 @@ export class BatchTriggerV3Service extends WithRunEngine { public async call( environment: AuthenticatedEnvironment, - body: BatchTriggerTaskV2RequestBody, + body: BatchTriggerTaskV3RequestBody, options: BatchTriggerTaskServiceOptions = {} - ): Promise { + ): Promise { try { - return await this.traceWithEnv( + return await this.traceWithEnv( "call()", environment, async (span) => { - const existingBatch = options.idempotencyKey - ? await this._prisma.batchTaskRun.findUnique({ - where: { - runtimeEnvironmentId_idempotencyKey: { - runtimeEnvironmentId: environment.id, - idempotencyKey: options.idempotencyKey, - }, - }, - }) - : undefined; - - if (existingBatch) { - if ( - existingBatch.idempotencyKeyExpiresAt && - existingBatch.idempotencyKeyExpiresAt < new Date() - ) { - logger.debug("[BatchTriggerV3][call] Idempotency key has expired", { - idempotencyKey: options.idempotencyKey, - batch: { - id: existingBatch.id, - friendlyId: existingBatch.friendlyId, - runCount: existingBatch.runCount, - idempotencyKeyExpiresAt: existingBatch.idempotencyKeyExpiresAt, - idempotencyKey: existingBatch.idempotencyKey, - }, - }); - - // Update the existing batch to remove the idempotency key - await this._prisma.batchTaskRun.update({ - where: { id: existingBatch.id }, - data: { idempotencyKey: null }, - }); - - // Don't return, just continue with the batch trigger - } else { - span.setAttribute("batchId", existingBatch.friendlyId); - - return this.#respondWithExistingBatch( - existingBatch, - environment, - body.resumeParentOnCompletion ? body.parentRunId : undefined - ); - } - } - const { id, friendlyId } = BatchId.generate(); span.setAttribute("batchId", friendlyId); @@ -129,160 +82,6 @@ export class BatchTriggerV3Service extends WithRunEngine { } } - const idempotencyKeys = body.items.map((i) => i.options?.idempotencyKey).filter(Boolean); - - const cachedRuns = - idempotencyKeys.length > 0 - ? await this._prisma.taskRun.findMany({ - where: { - runtimeEnvironmentId: environment.id, - idempotencyKey: { - in: body.items.map((i) => i.options?.idempotencyKey).filter(Boolean), - }, - }, - select: { - friendlyId: true, - idempotencyKey: true, - idempotencyKeyExpiresAt: true, - }, - }) - : []; - - if (cachedRuns.length) { - logger.debug("[BatchTriggerV3][call] Found cached runs", { - cachedRuns, - batchId: friendlyId, - }); - } - - // Now we need to create an array of all the run IDs, in order - // If we have a cached run, that isn't expired, we should use that run ID - // If we have a cached run, that is expired, we should generate a new run ID and save that cached run ID to a set of expired run IDs - // If we don't have a cached run, we should generate a new run ID - const expiredRunIds = new Set(); - let cachedRunCount = 0; - - const runs = body.items.map((item) => { - const cachedRun = cachedRuns.find( - (r) => r.idempotencyKey === item.options?.idempotencyKey - ); - - const runId = RunId.generate(); - - if (cachedRun) { - if ( - cachedRun.idempotencyKeyExpiresAt && - cachedRun.idempotencyKeyExpiresAt < new Date() - ) { - expiredRunIds.add(cachedRun.friendlyId); - - return { - id: runId.friendlyId, - isCached: false, - idempotencyKey: item.options?.idempotencyKey ?? undefined, - taskIdentifier: item.task, - }; - } - - cachedRunCount++; - - return { - id: cachedRun.friendlyId, - isCached: true, - idempotencyKey: item.options?.idempotencyKey ?? undefined, - taskIdentifier: item.task, - }; - } - - return { - id: runId.friendlyId, - isCached: false, - idempotencyKey: item.options?.idempotencyKey ?? undefined, - taskIdentifier: item.task, - }; - }); - - //block the parent with any existing children - if (body.resumeParentOnCompletion && body.parentRunId) { - const existingChildFriendlyIds = runs.flatMap((r) => (r.isCached ? [r.id] : [])); - - if (existingChildFriendlyIds.length > 0) { - await this.#blockParentRun({ - parentRunId: body.parentRunId, - childFriendlyIds: existingChildFriendlyIds, - environment, - }); - } - } - - // Calculate how many new runs we need to create - const newRunCount = body.items.length - cachedRunCount; - - if (newRunCount === 0) { - logger.debug("[BatchTriggerV3][call] All runs are cached", { - batchId: friendlyId, - }); - - await this._prisma.batchTaskRun.create({ - data: { - friendlyId, - runtimeEnvironmentId: environment.id, - idempotencyKey: options.idempotencyKey, - idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, - runCount: body.items.length, - runIds: runs.map((r) => r.id), - //todo is this correct? Surely some of the runs could still be in progress? - status: "COMPLETED", - batchVersion: "v2", - oneTimeUseToken: options.oneTimeUseToken, - }, - }); - - return { - id: friendlyId, - isCached: false, - idempotencyKey: options.idempotencyKey ?? undefined, - runs, - }; - } - - const queueSizeGuard = await guardQueueSizeLimitsForEnv( - this._engine, - environment, - newRunCount - ); - - logger.debug("Queue size guard result", { - newRunCount, - queueSizeGuard, - environment: { - id: environment.id, - type: environment.type, - organization: environment.organization, - project: environment.project, - }, - }); - - if (!queueSizeGuard.isWithinLimits) { - throw new ServiceValidationError( - `Cannot trigger ${newRunCount} tasks as the queue size limit for this environment has been reached. The maximum size is ${queueSizeGuard.maximumSize}` - ); - } - - // Expire the cached runs that are no longer valid - if (expiredRunIds.size) { - logger.debug("Expiring cached runs", { - expiredRunIds: Array.from(expiredRunIds), - batchId: friendlyId, - }); - - // TODO: is there a limit to the number of items we can update in a single query? - await this._prisma.taskRun.updateMany({ - where: { friendlyId: { in: Array.from(expiredRunIds) } }, - data: { idempotencyKey: null }, - }); - } - // Upload to object store const payloadPacket = await this.#handlePayloadPacket( body.items, @@ -292,9 +91,7 @@ export class BatchTriggerV3Service extends WithRunEngine { const batch = await this.#createAndProcessBatchTaskRun( friendlyId, - runs, payloadPacket, - newRunCount, environment, body, options @@ -308,7 +105,7 @@ export class BatchTriggerV3Service extends WithRunEngine { id: batch.friendlyId, isCached: false, idempotencyKey: batch.idempotencyKey ?? undefined, - runs, + runCount: body.items.length, }; } ); @@ -347,27 +144,19 @@ export class BatchTriggerV3Service extends WithRunEngine { async #createAndProcessBatchTaskRun( batchId: string, - runs: Array<{ - id: string; - isCached: boolean; - idempotencyKey: string | undefined; - taskIdentifier: string; - }>, payloadPacket: IOPacket, - newRunCount: number, environment: AuthenticatedEnvironment, body: BatchTriggerTaskV2RequestBody, options: BatchTriggerTaskServiceOptions = {} ) { - if (newRunCount <= ASYNC_BATCH_PROCESS_SIZE_THRESHOLD) { + if (body.items.length <= ASYNC_BATCH_PROCESS_SIZE_THRESHOLD) { const batch = await this._prisma.batchTaskRun.create({ data: { + id: BatchId.fromFriendlyId(batchId), friendlyId: batchId, runtimeEnvironmentId: environment.id, - idempotencyKey: options.idempotencyKey, - idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, - runCount: newRunCount, - runIds: runs.map((r) => r.id), + runCount: body.items.length, + runIds: [], payload: payloadPacket.data, payloadType: payloadPacket.dataType, options, @@ -376,6 +165,15 @@ export class BatchTriggerV3Service extends WithRunEngine { }, }); + if (body.parentRunId && body.resumeParentOnCompletion) { + await this._engine.blockRunWithCreatedBatch({ + runId: RunId.fromFriendlyId(body.parentRunId), + batchId: batch.id, + environmentId: environment.id, + projectId: environment.projectId, + }); + } + const result = await this.#processBatchTaskRunItems({ batch, environment, @@ -445,12 +243,11 @@ export class BatchTriggerV3Service extends WithRunEngine { return await $transaction(this._prisma, async (tx) => { const batch = await tx.batchTaskRun.create({ data: { + id: BatchId.fromFriendlyId(batchId), friendlyId: batchId, runtimeEnvironmentId: environment.id, - idempotencyKey: options.idempotencyKey, - idempotencyKeyExpiresAt: options.idempotencyKeyExpiresAt, runCount: body.items.length, - runIds: runs.map((r) => r.id), + runIds: [], payload: payloadPacket.data, payloadType: payloadPacket.dataType, options, @@ -459,6 +256,16 @@ export class BatchTriggerV3Service extends WithRunEngine { }, }); + if (body.parentRunId && body.resumeParentOnCompletion) { + await this._engine.blockRunWithCreatedBatch({ + runId: RunId.fromFriendlyId(body.parentRunId), + batchId: batch.id, + environmentId: environment.id, + projectId: environment.projectId, + tx, + }); + } + switch (this._batchProcessingStrategy) { case "sequential": { await this.#enqueueBatchTaskRun({ @@ -475,7 +282,7 @@ export class BatchTriggerV3Service extends WithRunEngine { } case "parallel": { const ranges = Array.from({ - length: Math.ceil(newRunCount / PROCESSING_BATCH_SIZE), + length: Math.ceil(body.items.length / PROCESSING_BATCH_SIZE), }).map((_, index) => ({ start: index * PROCESSING_BATCH_SIZE, count: PROCESSING_BATCH_SIZE, @@ -507,52 +314,6 @@ export class BatchTriggerV3Service extends WithRunEngine { } } - async #respondWithExistingBatch( - batch: BatchTaskRun, - environment: AuthenticatedEnvironment, - blockParentRunId: string | undefined - ): Promise { - // Resolve the payload - const payloadPacket = await downloadPacketFromObjectStore( - { - data: batch.payload ?? undefined, - dataType: batch.payloadType, - }, - environment - ); - - const payload = await parsePacket(payloadPacket).then( - (p) => p as BatchTriggerTaskV2RequestBody["items"] - ); - - const runs = batch.runIds.map((id, index) => { - const item = payload[index]; - - return { - id, - taskIdentifier: item.task, - isCached: true, - idempotencyKey: item.options?.idempotencyKey ?? undefined, - }; - }); - - //block the parent with all of the children - if (blockParentRunId) { - await this.#blockParentRun({ - parentRunId: blockParentRunId, - childFriendlyIds: batch.runIds, - environment, - }); - } - - return { - id: batch.friendlyId, - idempotencyKey: batch.idempotencyKey ?? undefined, - isCached: true, - runs, - }; - } - async processBatchTaskRun(options: BatchProcessingOptions) { logger.debug("[BatchTriggerV3][processBatchTaskRun] Processing batch", { options, @@ -630,6 +391,8 @@ export class BatchTriggerV3Service extends WithRunEngine { batchSize: options.range.count, items: $payload, options: $options, + parentRunId: options.parentRunId, + resumeParentOnCompletion: options.resumeParentOnCompletion, }); switch (result.status) { @@ -737,36 +500,42 @@ export class BatchTriggerV3Service extends WithRunEngine { | { status: "INCOMPLETE"; workingIndex: number } | { status: "ERROR"; error: string; workingIndex: number } > { - // Grab the next PROCESSING_BATCH_SIZE runIds - const runFriendlyIds = batch.runIds.slice(currentIndex, currentIndex + batchSize); + // Grab the next PROCESSING_BATCH_SIZE items + const itemsToProcess = items.slice(currentIndex, currentIndex + batchSize); logger.debug("[BatchTriggerV3][processBatchTaskRun] Processing batch items", { batchId: batch.friendlyId, currentIndex, - runIds: runFriendlyIds, runCount: batch.runCount, }); - // Combine the "window" between currentIndex and currentIndex + PROCESSING_BATCH_SIZE with the runId and the item in the payload which is an array - const itemsToProcess = runFriendlyIds.map((runFriendlyId, index) => ({ - runFriendlyId, - item: items[index + currentIndex], - })); - let workingIndex = currentIndex; + let runIds: string[] = []; + for (const item of itemsToProcess) { try { - await this.#processBatchTaskRunItem({ + const run = await this.#processBatchTaskRunItem({ batch, environment, - task: item, + item, currentIndex: workingIndex, options, parentRunId, resumeParentOnCompletion, }); + if (!run) { + logger.error("[BatchTriggerV3][processBatchTaskRun] Failed to process item", { + batchId: batch.friendlyId, + currentIndex: workingIndex, + }); + + throw new Error("[BatchTriggerV3][processBatchTaskRun] Failed to process item"); + } + + runIds.push(run.friendlyId); + workingIndex++; } catch (error) { logger.error("[BatchTriggerV3][processBatchTaskRun] Failed to process item", { @@ -783,18 +552,45 @@ export class BatchTriggerV3Service extends WithRunEngine { } } + //add the run ids to the batch + const updatedBatch = await this._prisma.batchTaskRun.update({ + where: { id: batch.id }, + data: { + runIds: { + push: runIds, + }, + }, + }); + // if there are more items to process, requeue the batch if (workingIndex < batch.runCount) { return { status: "INCOMPLETE", workingIndex }; } + //triggered all the runs + if (updatedBatch.runIds.length === updatedBatch.runCount) { + //unblock the parent run from the batch + //this prevents the parent continuing before all the runs are created + if (parentRunId && resumeParentOnCompletion) { + await this._engine.unblockRunForCreatedBatch({ + runId: RunId.fromFriendlyId(parentRunId), + batchId: batch.id, + environmentId: environment.id, + projectId: environment.projectId, + }); + } + + //if all the runs were idempotent, it's possible the batch is already completed + await this._engine.tryCompleteBatch({ batchId: batch.id }); + } + return { status: "COMPLETE" }; } async #processBatchTaskRunItem({ batch, environment, - task, + item, currentIndex, options, parentRunId, @@ -802,7 +598,7 @@ export class BatchTriggerV3Service extends WithRunEngine { }: { batch: BatchTaskRun; environment: AuthenticatedEnvironment; - task: { runFriendlyId: string; item: BatchTriggerTaskV2RequestBody["items"][number] }; + item: BatchTriggerTaskV2RequestBody["items"][number]; currentIndex: number; options?: BatchTriggerTaskServiceOptions; parentRunId: string | undefined; @@ -810,33 +606,38 @@ export class BatchTriggerV3Service extends WithRunEngine { }) { logger.debug("[BatchTriggerV3][processBatchTaskRunItem] Processing item", { batchId: batch.friendlyId, - runId: task.runFriendlyId, currentIndex, }); const triggerTaskService = new TriggerTaskService(); - await triggerTaskService.call( - task.item.task, + const run = await triggerTaskService.call( + item.task, environment, { - ...task.item, + ...item, options: { - ...task.item.options, + ...item.options, parentRunId, resumeParentOnCompletion, + parentBatch: batch.id, }, }, { triggerVersion: options?.triggerVersion, traceContext: options?.traceContext, spanParentAsLink: options?.spanParentAsLink, - batchId: batch.friendlyId, - skipChecks: true, - runFriendlyId: task.runFriendlyId, + batchId: batch.id, + batchIndex: currentIndex, }, "V2" ); + + return run + ? { + friendlyId: run.friendlyId, + } + : undefined; } async #enqueueBatchTaskRun(options: BatchProcessingOptions, tx?: PrismaClientOrTransaction) { @@ -877,38 +678,4 @@ export class BatchTriggerV3Service extends WithRunEngine { }; }); } - - async #blockParentRun({ - parentRunId, - childFriendlyIds, - environment, - }: { - parentRunId: string; - childFriendlyIds: string[]; - environment: AuthenticatedEnvironment; - }) { - const runsWithAssociatedWaitpoints = await this._prisma.taskRun.findMany({ - where: { - id: { - in: childFriendlyIds.map((r) => RunId.fromFriendlyId(r)), - }, - }, - select: { - associatedWaitpoint: { - select: { - id: true, - }, - }, - }, - }); - - await this._engine.blockRunWithWaitpoint({ - runId: RunId.fromFriendlyId(parentRunId), - waitpointId: runsWithAssociatedWaitpoints.flatMap((r) => - r.associatedWaitpoint ? [r.associatedWaitpoint.id] : [] - ), - environmentId: environment.id, - projectId: environment.projectId, - }); - } } diff --git a/apps/webapp/app/v3/services/triggerTask.server.ts b/apps/webapp/app/v3/services/triggerTask.server.ts index 55ed259b8d..2a4b6028b1 100644 --- a/apps/webapp/app/v3/services/triggerTask.server.ts +++ b/apps/webapp/app/v3/services/triggerTask.server.ts @@ -14,6 +14,7 @@ export type TriggerTaskServiceOptions = { spanParentAsLink?: boolean; parentAsLinkType?: "replay" | "trigger"; batchId?: string; + batchIndex?: number; customIcon?: string; runFriendlyId?: string; skipChecks?: boolean; @@ -41,7 +42,13 @@ export class TriggerTaskService extends WithRunEngine { switch (v) { case "V1": { - return await this.callV1(taskId, environment, body, options); + const run = await this.callV1(taskId, environment, body, options); + return run + ? { + ...run, + isCached: false, + } + : undefined; } case "V2": { return await this.callV2(taskId, environment, body, options); diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index ad1dd097d0..b0633640fa 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -1,32 +1,34 @@ +import { RunEngine, RunDuplicateIdempotencyKeyError } from "@internal/run-engine"; import { IOPacket, + packetRequiresOffloading, QueueOptions, SemanticInternalAttributes, TriggerTaskRequestBody, - packetRequiresOffloading, } from "@trigger.dev/core/v3"; +import { BatchId, RunId, stringifyDuration } from "@trigger.dev/core/v3/apps"; +import { Prisma, TaskRun } from "@trigger.dev/database"; import { env } from "~/env.server"; +import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { autoIncrementCounter } from "~/services/autoIncrementCounter.server"; +import { logger } from "~/services/logger.server"; +import { getEntitlement } from "~/services/platform.v3.server"; +import { parseDelay } from "~/utils/delays"; +import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; +import { handleMetadataPacket } from "~/utils/packets"; import { sanitizeQueueName } from "~/v3/marqs/index.server"; import { eventRepository } from "../eventRepository.server"; +import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; import { uploadPacketToObjectStore } from "../r2.server"; +import { isFinalRunStatus } from "../taskStatus"; import { startActiveSpan } from "../tracer.server"; -import { getEntitlement } from "~/services/platform.v3.server"; +import { clampMaxDuration } from "../utils/maxDuration"; import { ServiceValidationError, WithRunEngine } from "./baseService.server"; -import { logger } from "~/services/logger.server"; -import { isFinalRunStatus } from "../taskStatus"; -import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; -import { findCurrentWorkerFromEnvironment } from "../models/workerDeployment.server"; -import { handleMetadataPacket } from "~/utils/packets"; -import { WorkerGroupService } from "./worker/workerGroupService.server"; -import { parseDelay } from "~/utils/delays"; -import { RunId, stringifyDuration } from "@trigger.dev/core/v3/apps"; import { OutOfEntitlementError, TriggerTaskServiceOptions } from "./triggerTask.server"; -import { Prisma } from "@trigger.dev/database"; -import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; -import { clampMaxDuration } from "../utils/maxDuration"; -import { RunEngine } from "@internal/run-engine"; +import { WorkerGroupService } from "./worker/workerGroupService.server"; + +type Result = TaskRun & { isCached: boolean }; /** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV2 extends WithRunEngine { @@ -40,7 +42,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { environment: AuthenticatedEnvironment; body: TriggerTaskRequestBody; options?: TriggerTaskServiceOptions; - }) { + }): Promise { return await this.traceWithEnv("call()", environment, async (span) => { span.setAttribute("taskId", taskId); @@ -94,16 +96,66 @@ export class TriggerTaskServiceV2 extends WithRunEngine { body.options?.resumeParentOnCompletion && body.options?.parentRunId ) { - await this._engine.blockRunWithWaitpoint({ - runId: RunId.fromFriendlyId(body.options.parentRunId), - waitpointId: existingRun.associatedWaitpoint.id, - environmentId: environment.id, - projectId: environment.projectId, - tx: this._prisma, - }); + await eventRepository.traceEvent( + `${taskId} (cached)`, + { + context: options.traceContext, + spanParentAsLink: options.spanParentAsLink, + parentAsLinkType: options.parentAsLinkType, + kind: "SERVER", + environment, + taskSlug: taskId, + attributes: { + properties: { + [SemanticInternalAttributes.SHOW_ACTIONS]: true, + [SemanticInternalAttributes.ORIGINAL_RUN_ID]: existingRun.friendlyId, + }, + style: { + icon: "task-cached", + }, + runIsTest: body.options?.test ?? false, + batchId: options.batchId ? BatchId.toFriendlyId(options.batchId) : undefined, + idempotencyKey, + runId: existingRun.friendlyId, + }, + incomplete: existingRun.associatedWaitpoint.status === "PENDING", + isError: existingRun.associatedWaitpoint.outputIsError, + immediate: true, + }, + async (event) => { + //log a message + await eventRepository.recordEvent( + `There's an existing run for idempotencyKey: ${idempotencyKey}`, + { + taskSlug: taskId, + environment, + attributes: { + runId: existingRun.friendlyId, + }, + context: options.traceContext, + parentId: event.spanId, + } + ); + //block run with waitpoint + await this._engine.blockRunWithWaitpoint({ + runId: RunId.fromFriendlyId(body.options!.parentRunId!), + waitpoints: existingRun.associatedWaitpoint!.id, + spanIdToComplete: event.spanId, + batch: options?.batchId + ? { + id: options.batchId, + index: options.batchIndex ?? 0, + } + : undefined, + environmentId: environment.id, + projectId: environment.projectId, + tx: this._prisma, + }); + } + ); } - return existingRun; + return { ...existingRun, isCached: true }; } } @@ -195,7 +247,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { icon: options.customIcon ?? "task", }, runIsTest: body.options?.test ?? false, - batchId: options.batchId, + batchId: options.batchId ? BatchId.toFriendlyId(options.batchId) : undefined, idempotencyKey, }, incomplete: true, @@ -299,7 +351,12 @@ export class TriggerTaskServiceV2 extends WithRunEngine { oneTimeUseToken: options.oneTimeUseToken, parentTaskRunId: parentRun?.id, rootTaskRunId: parentRun?.rootTaskRunId ?? parentRun?.id, - batchId: body.options?.parentBatch ?? undefined, + batch: options?.batchId + ? { + id: options.batchId, + index: options.batchIndex ?? 0, + } + : undefined, resumeParentOnCompletion: body.options?.resumeParentOnCompletion, depth, metadata: metadataPacket?.data, @@ -313,7 +370,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { this._prisma ); - return taskRun; + return { ...taskRun, isCached: false }; }, async (_, tx) => { const counter = await tx.taskRunNumberCounter.findUnique({ @@ -335,6 +392,11 @@ export class TriggerTaskServiceV2 extends WithRunEngine { } ); } catch (error) { + if (error instanceof RunDuplicateIdempotencyKeyError) { + //retry calling this function, because this time it will return the idempotent run + return await this.call({ taskId, environment, body, options }); + } + // Detect a prisma transaction Unique constraint violation if (error instanceof Prisma.PrismaClientKnownRequestError) { logger.debug("TriggerTask: Prisma transaction error", { diff --git a/internal-packages/database/prisma/migrations/20250106172943_added_span_id_to_complete_to_task_run_waitpoint/migration.sql b/internal-packages/database/prisma/migrations/20250106172943_added_span_id_to_complete_to_task_run_waitpoint/migration.sql new file mode 100644 index 0000000000..8d624ba757 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250106172943_added_span_id_to_complete_to_task_run_waitpoint/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "TaskRunWaitpoint" ADD COLUMN "spanIdToComplete" TEXT; diff --git a/internal-packages/database/prisma/migrations/20250109131442_added_batch_and_index_to_task_run_waitpoint_and_task_run_execution_snapshot/migration.sql b/internal-packages/database/prisma/migrations/20250109131442_added_batch_and_index_to_task_run_waitpoint_and_task_run_execution_snapshot/migration.sql new file mode 100644 index 0000000000..5756f7fa5d --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250109131442_added_batch_and_index_to_task_run_waitpoint_and_task_run_execution_snapshot/migration.sql @@ -0,0 +1,13 @@ +-- AlterTable +ALTER TABLE "TaskRunExecutionSnapshot" ADD COLUMN "batchId" TEXT, +ADD COLUMN "completedWaitpointOrder" TEXT[]; + +-- AlterTable +ALTER TABLE "TaskRunWaitpoint" ADD COLUMN "batchId" TEXT, +ADD COLUMN "batchIndex" INTEGER; + +-- AddForeignKey +ALTER TABLE "TaskRunExecutionSnapshot" ADD CONSTRAINT "TaskRunExecutionSnapshot_batchId_fkey" FOREIGN KEY ("batchId") REFERENCES "BatchTaskRun"("id") ON DELETE SET NULL ON UPDATE CASCADE; + +-- AddForeignKey +ALTER TABLE "TaskRunWaitpoint" ADD CONSTRAINT "TaskRunWaitpoint_batchId_fkey" FOREIGN KEY ("batchId") REFERENCES "BatchTaskRun"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/internal-packages/database/prisma/migrations/20250109173506_waitpoint_added_batch_type/migration.sql b/internal-packages/database/prisma/migrations/20250109173506_waitpoint_added_batch_type/migration.sql new file mode 100644 index 0000000000..1e1fead5a5 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250109173506_waitpoint_added_batch_type/migration.sql @@ -0,0 +1,8 @@ +-- AlterEnum +ALTER TYPE "WaitpointType" ADD VALUE 'BATCH'; + +-- AlterTable +ALTER TABLE "Waitpoint" ADD COLUMN "completedByBatchId" TEXT; + +-- AddForeignKey +ALTER TABLE "Waitpoint" ADD CONSTRAINT "Waitpoint_completedByBatchId_fkey" FOREIGN KEY ("completedByBatchId") REFERENCES "BatchTaskRun"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/internal-packages/database/prisma/migrations/20250109175955_waitpoint_added_completed_by_batch_id_index/migration.sql b/internal-packages/database/prisma/migrations/20250109175955_waitpoint_added_completed_by_batch_id_index/migration.sql new file mode 100644 index 0000000000..7d691d17e1 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250109175955_waitpoint_added_completed_by_batch_id_index/migration.sql @@ -0,0 +1,2 @@ +-- CreateIndex +CREATE INDEX "Waitpoint_completedByBatchId_idx" ON "Waitpoint"("completedByBatchId"); diff --git a/internal-packages/database/prisma/migrations/20250114153223_task_run_waitpoint_unique_constraint_added_batch_index/migration.sql b/internal-packages/database/prisma/migrations/20250114153223_task_run_waitpoint_unique_constraint_added_batch_index/migration.sql new file mode 100644 index 0000000000..22a41947d4 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250114153223_task_run_waitpoint_unique_constraint_added_batch_index/migration.sql @@ -0,0 +1,14 @@ +/* + Warnings: + + - A unique constraint covering the columns `[taskRunId,waitpointId,batchIndex]` on the table `TaskRunWaitpoint` will be added. If there are existing duplicate values, this will fail. + +*/ +-- DropIndex +DROP INDEX "TaskRunWaitpoint_taskRunId_waitpointId_key"; + +-- CreateIndex (multiple can have null batchIndex, so we need the other one below) +CREATE UNIQUE INDEX "TaskRunWaitpoint_taskRunId_waitpointId_batchIndex_key" ON "TaskRunWaitpoint" ("taskRunId", "waitpointId", "batchIndex"); + +-- CreateIndex (where batchIndex is null) +CREATE UNIQUE INDEX "TaskRunWaitpoint_taskRunId_waitpointId_batchIndex_null_key" ON "TaskRunWaitpoint"("taskRunId", "waitpointId") WHERE "batchIndex" IS NULL; diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 91ca18ec98..e2e68864db 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1946,12 +1946,18 @@ model TaskRunExecutionSnapshot { run TaskRun @relation(fields: [runId], references: [id]) runStatus TaskRunStatus + batchId String? + batch BatchTaskRun? @relation(fields: [batchId], references: [id]) + /// This is the current run attempt number. Users can define how many attempts they want for a run. attemptNumber Int? /// Waitpoints that have been completed for this execution completedWaitpoints Waitpoint[] @relation("completedWaitpoints") + /// An array of waitpoint IDs in the correct order, used for batches + completedWaitpointOrder String[] + /// Checkpoint checkpointId String? checkpoint TaskRunCheckpoint? @relation(fields: [checkpointId], references: [id]) @@ -2050,6 +2056,10 @@ model Waitpoint { /// If it's a DATETIME type waitpoint, this is the date completedAfter DateTime? + /// If it's a BATCH type waitpoint, this is the associated batch + completedByBatchId String? + completedByBatch BatchTaskRun? @relation(fields: [completedByBatchId], references: [id], onDelete: SetNull) + /// The runs this waitpoint is blocking blockingTaskRuns TaskRunWaitpoint[] @@ -2071,12 +2081,14 @@ model Waitpoint { updatedAt DateTime @updatedAt @@unique([environmentId, idempotencyKey]) + @@index([completedByBatchId]) } enum WaitpointType { RUN DATETIME MANUAL + BATCH } enum WaitpointStatus { @@ -2096,10 +2108,22 @@ model TaskRunWaitpoint { project Project @relation(fields: [projectId], references: [id], onDelete: Cascade, onUpdate: Cascade) projectId String + /// This span id is completed when the waitpoint is completed. This is used with cached runs (idempotent) + spanIdToComplete String? + + //associated batch + batchId String? + batch BatchTaskRun? @relation(fields: [batchId], references: [id]) + //if there's an associated batch and this isn't set it's for the entire batch + //if it is set, it's a specific run in the batch + batchIndex Int? + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt - @@unique([taskRunId, waitpointId]) + /// There are two constraints, the one below and also one that Prisma doesn't support + /// The second one implemented in SQL only prevents a TaskRun + Waitpoint with a null batchIndex + @@unique([taskRunId, waitpointId, batchIndex]) @@index([taskRunId]) @@index([waitpointId]) } @@ -2480,6 +2504,7 @@ model BatchTaskRun { runtimeEnvironment RuntimeEnvironment @relation(fields: [runtimeEnvironmentId], references: [id], onDelete: Cascade, onUpdate: Cascade) status BatchTaskRunStatus @default(PENDING) runtimeEnvironmentId String + /// This only includes new runs, not idempotent runs. runs TaskRun[] createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -2493,6 +2518,15 @@ model BatchTaskRun { options Json? batchVersion String @default("v1") + //engine v2 + /// Snapshots that reference this batch + executionSnapshots TaskRunExecutionSnapshot[] + /// Specific run blockers, + runsBlocked TaskRunWaitpoint[] + /// Waitpoints that are blocked by this batch. + /// When a Batch is created it blocks execution of the associated parent run (for andWait) + waitpoints Waitpoint[] + /// optional token that can be used to authenticate the task run oneTimeUseToken String? diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index b36e886694..2b8b8961be 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -79,6 +79,13 @@ export type EventBusEvents = { }; }, ]; + cachedRunCompleted: [ + { + time: Date; + spanId: string; + hasError: boolean; + }, + ]; runMetadataUpdated: [ { time: Date; diff --git a/internal-packages/run-engine/src/engine/executionSnapshots.ts b/internal-packages/run-engine/src/engine/executionSnapshots.ts index eb2dfcf42e..5daca4f419 100644 --- a/internal-packages/run-engine/src/engine/executionSnapshots.ts +++ b/internal-packages/run-engine/src/engine/executionSnapshots.ts @@ -1,5 +1,5 @@ import { CompletedWaitpoint, ExecutionResult } from "@trigger.dev/core/v3"; -import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; +import { BatchId, RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; import { PrismaClientOrTransaction, TaskRunCheckpoint, @@ -35,10 +35,24 @@ export async function getLatestExecutionSnapshot( ...snapshot, friendlyId: SnapshotId.toFriendlyId(snapshot.id), runFriendlyId: RunId.toFriendlyId(snapshot.runId), - completedWaitpoints: snapshot.completedWaitpoints.map( - (w) => - ({ + completedWaitpoints: snapshot.completedWaitpoints.flatMap((w) => { + //get all indexes of the waitpoint in the completedWaitpointOrder + //we do this because the same run can be in a batch multiple times (i.e. same idempotencyKey) + let indexes: (number | undefined)[] = []; + for (let i = 0; i < snapshot.completedWaitpointOrder.length; i++) { + if (snapshot.completedWaitpointOrder[i] === w.id) { + indexes.push(i); + } + } + + if (indexes.length === 0) { + indexes.push(undefined); + } + + return indexes.map((index) => { + return { id: w.id, + index: index === -1 ? undefined : index, friendlyId: w.friendlyId, type: w.type, completedAt: w.completedAt ?? new Date(), @@ -50,14 +64,27 @@ export async function getLatestExecutionSnapshot( ? { id: w.completedByTaskRunId, friendlyId: RunId.toFriendlyId(w.completedByTaskRunId), + batch: snapshot.batchId + ? { + id: snapshot.batchId, + friendlyId: BatchId.toFriendlyId(snapshot.batchId), + } + : undefined, } : undefined, completedAfter: w.completedAfter ?? undefined, + completedByBatch: w.completedByBatchId + ? { + id: w.completedByBatchId, + friendlyId: BatchId.toFriendlyId(w.completedByBatchId), + } + : undefined, output: w.output ?? undefined, outputType: w.outputType, outputIsError: w.outputIsError, - }) satisfies CompletedWaitpoint - ), + } satisfies CompletedWaitpoint; + }); + }), }; } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 8cdfb07e11..0464bd7577 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -22,6 +22,7 @@ import { WaitForDurationResult, } from "@trigger.dev/core/v3"; import { + BatchId, getMaxDuration, parseNaturalLanguageDuration, QueueId, @@ -237,7 +238,7 @@ export class RunEngine { tags, parentTaskRunId, rootTaskRunId, - batchId, + batch, resumeParentOnCompletion, depth, metadata, @@ -252,7 +253,7 @@ export class RunEngine { const prisma = tx ?? this.prisma; return this.#trace( - "createRunAttempt", + "trigger", { friendlyId, environmentId: environment.id, @@ -268,67 +269,97 @@ export class RunEngine { } //create run - const taskRun = await prisma.taskRun.create({ - data: { - id: RunId.fromFriendlyId(friendlyId), - engine: "V2", - status, - number, - friendlyId, - runtimeEnvironmentId: environment.id, - projectId: environment.project.id, - idempotencyKey, - idempotencyKeyExpiresAt, - taskIdentifier, - payload, - payloadType, - context, - traceContext, - traceId, - spanId, - parentSpanId, - lockedToVersionId, - taskVersion, - sdkVersion, - cliVersion, - concurrencyKey, - queue: queueName, - masterQueue, - secondaryMasterQueue, - isTest, - delayUntil, - queuedAt, - maxAttempts, - priorityMs, - ttl, - tags: - tags.length === 0 - ? undefined - : { - connect: tags, - }, - runTags: tags.length === 0 ? undefined : tags.map((tag) => tag.name), - oneTimeUseToken, - parentTaskRunId, - rootTaskRunId, - batchId, - resumeParentOnCompletion, - depth, - metadata, - metadataType, - seedMetadata, - seedMetadataType, - maxDurationInSeconds, - executionSnapshots: { - create: { - engine: "V2", - executionStatus: "RUN_CREATED", - description: "Run was created", - runStatus: status, + let taskRun: TaskRun; + try { + taskRun = await prisma.taskRun.create({ + data: { + id: RunId.fromFriendlyId(friendlyId), + engine: "V2", + status, + number, + friendlyId, + runtimeEnvironmentId: environment.id, + projectId: environment.project.id, + idempotencyKey, + idempotencyKeyExpiresAt, + taskIdentifier, + payload, + payloadType, + context, + traceContext, + traceId, + spanId, + parentSpanId, + lockedToVersionId, + taskVersion, + sdkVersion, + cliVersion, + concurrencyKey, + queue: queueName, + masterQueue, + secondaryMasterQueue, + isTest, + delayUntil, + queuedAt, + maxAttempts, + priorityMs, + ttl, + tags: + tags.length === 0 + ? undefined + : { + connect: tags, + }, + runTags: tags.length === 0 ? undefined : tags.map((tag) => tag.name), + oneTimeUseToken, + parentTaskRunId, + rootTaskRunId, + batchId: batch?.id, + resumeParentOnCompletion, + depth, + metadata, + metadataType, + seedMetadata, + seedMetadataType, + maxDurationInSeconds, + executionSnapshots: { + create: { + engine: "V2", + executionStatus: "RUN_CREATED", + description: "Run was created", + runStatus: status, + }, }, }, - }, - }); + }); + } catch (error) { + if (error instanceof Prisma.PrismaClientKnownRequestError) { + this.logger.debug("engine.trigger(): Prisma transaction error", { + code: error.code, + message: error.message, + meta: error.meta, + idempotencyKey, + environmentId: environment.id, + }); + + if (error.code === "P2002") { + this.logger.debug("engine.trigger(): throwing RunDuplicateIdempotencyKeyError", { + code: error.code, + message: error.message, + meta: error.meta, + idempotencyKey, + environmentId: environment.id, + }); + + //this happens if a unique constraint failed, i.e. duplicate idempotency + throw new RunDuplicateIdempotencyKeyError( + `Run with idempotency key ${idempotencyKey} already exists` + ); + } + } + + throw error; + } span.setAttribute("runId", taskRun.id); @@ -345,9 +376,10 @@ export class RunEngine { //this will block the parent run from continuing until this waitpoint is completed (and removed) await this.blockRunWithWaitpoint({ runId: parentTaskRunId, - waitpointId: associatedWaitpoint.id, + waitpoints: associatedWaitpoint.id, environmentId: associatedWaitpoint.environmentId, projectId: associatedWaitpoint.projectId, + batch, tx: prisma, }); @@ -546,7 +578,7 @@ export class RunEngine { "Tried to dequeue a run that is not in a valid state to be dequeued.", }, checkpointId: snapshot.checkpointId ?? undefined, - completedWaitpointIds: snapshot.completedWaitpoints.map((wp) => wp.id), + completedWaitpoints: snapshot.completedWaitpoints, error: `Tried to dequeue a run that is not in a valid state to be dequeued.`, }); @@ -767,7 +799,7 @@ export class RunEngine { description: "Run was dequeued for execution", }, checkpointId: snapshot.checkpointId ?? undefined, - completedWaitpointIds: snapshot.completedWaitpoints.map((wp) => wp.id), + completedWaitpoints: snapshot.completedWaitpoints, }); return { @@ -1040,6 +1072,7 @@ export class RunEngine { data: { status: "EXECUTING", attemptNumber: nextAttemptNumber, + firstAttemptStartedAt: taskRun.attemptNumber === null ? new Date() : undefined, }, include: { tags: true, @@ -1280,7 +1313,7 @@ export class RunEngine { //block the run const blockResult = await this.blockRunWithWaitpoint({ runId, - waitpointId: waitpoint.id, + waitpoints: waitpoint.id, environmentId: waitpoint.environmentId, projectId: waitpoint.projectId, tx: prisma, @@ -1549,6 +1582,110 @@ export class RunEngine { }); } + /** This block a run with a BATCH waitpoint. + * The waitpoint will be created, and it will block the parent run. + */ + async blockRunWithCreatedBatch({ + runId, + batchId, + environmentId, + projectId, + tx, + }: { + runId: string; + batchId: string; + environmentId: string; + projectId: string; + tx?: PrismaClientOrTransaction; + }): Promise { + const prisma = tx ?? this.prisma; + + try { + const waitpoint = await prisma.waitpoint.create({ + data: { + ...WaitpointId.generate(), + type: "BATCH", + idempotencyKey: batchId, + userProvidedIdempotencyKey: false, + completedByBatchId: batchId, + environmentId, + projectId, + }, + }); + + await this.blockRunWithWaitpoint({ + runId, + waitpoints: waitpoint.id, + environmentId, + projectId, + batch: { id: batchId }, + tx: prisma, + }); + + return waitpoint; + } catch (error) { + if (error instanceof Prisma.PrismaClientKnownRequestError) { + // duplicate idempotency key + if (error.code === "P2002") { + return null; + } else { + throw error; + } + } + throw error; + } + } + + /** + * This is called when all the runs for a batch have been created. + * This does NOT mean that all the runs for the batch are completed. + */ + async unblockRunForCreatedBatch({ + runId, + batchId, + environmentId, + projectId, + tx, + }: { + runId: string; + batchId: string; + environmentId: string; + projectId: string; + tx?: PrismaClientOrTransaction; + }): Promise { + const prisma = tx ?? this.prisma; + + const waitpoint = await prisma.waitpoint.findFirst({ + where: { + completedByBatchId: batchId, + }, + }); + + if (!waitpoint) { + this.logger.error("RunEngine.unblockRunForBatch(): Waitpoint not found", { + runId, + batchId, + }); + throw new ServiceValidationError("Waitpoint not found for batch", 404); + } + + await this.completeWaitpoint({ + id: waitpoint.id, + output: { value: "Batch waitpoint completed", isError: false }, + }); + } + + async tryCompleteBatch({ batchId }: { batchId: string }): Promise { + await this.worker.enqueue({ + //this will debounce the call + id: `tryCompleteBatch:${batchId}`, + job: "tryCompleteBatch", + payload: { batchId: batchId }, + //2s in the future + availableAt: new Date(Date.now() + 2_000), + }); + } + async getWaitpoint({ waitpointId, environmentId, @@ -1585,21 +1722,25 @@ export class RunEngine { */ async blockRunWithWaitpoint({ runId, - waitpointId, + waitpoints, projectId, failAfter, + spanIdToComplete, + batch, tx, }: { runId: string; - waitpointId: string | string[]; + waitpoints: string | string[]; environmentId: string; projectId: string; failAfter?: Date; + spanIdToComplete?: string; + batch?: { id: string; index?: number }; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; - let waitpointIds = typeof waitpointId === "string" ? [waitpointId] : waitpointId; + let $waitpoints = typeof waitpoints === "string" ? [waitpoints] : waitpoints; return await this.runLock.lock([runId], 5000, async (signal) => { let snapshot: TaskRunExecutionSnapshot = await getLatestExecutionSnapshot(prisma, runId); @@ -1607,16 +1748,19 @@ export class RunEngine { //block the run with the waitpoints, returning how many waitpoints are pending const insert = await prisma.$queryRaw<{ pending_count: BigInt }[]>` WITH inserted AS ( - INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt") + INSERT INTO "TaskRunWaitpoint" ("id", "taskRunId", "waitpointId", "projectId", "createdAt", "updatedAt", "spanIdToComplete", "batchId", "batchIndex") SELECT gen_random_uuid(), ${runId}, w.id, ${projectId}, NOW(), - NOW() + NOW(), + ${spanIdToComplete ?? null}, + ${batch?.id ?? null}, + ${batch?.index ?? null} FROM "Waitpoint" w - WHERE w.id IN (${Prisma.join(waitpointIds)}) + WHERE w.id IN (${Prisma.join($waitpoints)}) ON CONFLICT DO NOTHING RETURNING "waitpointId" ) @@ -1647,15 +1791,16 @@ export class RunEngine { executionStatus: newStatus, description: "Run was blocked by a waitpoint.", }, + batchId: batch?.id ?? snapshot.batchId ?? undefined, }); } if (failAfter) { - for (const waitpointId of waitpointIds) { + for (const waitpoint of $waitpoints) { await this.worker.enqueue({ - id: `finishWaitpoint.${waitpointId}`, + id: `finishWaitpoint.${waitpoint}`, job: "finishWaitpoint", - payload: { waitpointId, error: "Waitpoint timed out" }, + payload: { waitpointId: waitpoint, error: "Waitpoint timed out" }, availableAt: failAfter, }); } @@ -1705,7 +1850,7 @@ export class RunEngine { // 1. Find the TaskRuns blocked by this waitpoint const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ where: { waitpointId: id }, - select: { taskRunId: true }, + select: { taskRunId: true, spanIdToComplete: true }, }); if (affectedTaskRuns.length === 0) { @@ -1748,6 +1893,15 @@ export class RunEngine { //50ms in the future availableAt: new Date(Date.now() + 50), }); + + // emit an event to complete associated cached runs + if (run.spanIdToComplete) { + this.eventBus.emit("cachedRunCompleted", { + time: new Date(), + spanId: run.spanIdToComplete, + hasError: output?.isError ?? false, + }); + } } return result.updatedWaitpoint; @@ -1865,6 +2019,12 @@ export class RunEngine { status: snapshot.runStatus, attemptNumber: snapshot.attemptNumber ?? undefined, }, + batch: snapshot.batchId + ? { + id: snapshot.batchId, + friendlyId: BatchId.toFriendlyId(snapshot.batchId), + } + : undefined, checkpoint: snapshot.checkpoint ? { id: snapshot.checkpoint.id, @@ -2482,6 +2642,8 @@ export class RunEngine { const blockingWaitpoints = await this.prisma.taskRunWaitpoint.findMany({ where: { taskRunId: runId }, select: { + batchId: true, + batchIndex: true, waitpoint: { select: { id: true, status: true }, }, @@ -2531,7 +2693,11 @@ export class RunEngine { executionStatus: "EXECUTING", description: "Run was continued, whilst still executing.", }, - completedWaitpointIds: blockingWaitpoints.map((b) => b.waitpoint.id), + batchId: snapshot.batchId ?? undefined, + completedWaitpoints: blockingWaitpoints.map((b) => ({ + id: b.waitpoint.id, + index: b.batchIndex ?? undefined, + })), }); //we reacquire the concurrency if it's still running because we're not going to be dequeuing (which also does this) @@ -2545,7 +2711,11 @@ export class RunEngine { executionStatus: "QUEUED", description: "Run is QUEUED, because all waitpoints are completed.", }, - completedWaitpointIds: blockingWaitpoints.map((b) => b.waitpoint.id), + batchId: snapshot.batchId ?? undefined, + completedWaitpoints: blockingWaitpoints.map((b) => ({ + id: b.waitpoint.id, + index: b.batchIndex ?? undefined, + })), }); //put it back in the queue, with the original timestamp (w/ priority) @@ -2751,8 +2921,9 @@ export class RunEngine { { run, snapshot, + batchId, checkpointId, - completedWaitpointIds, + completedWaitpoints, error, }: { run: { id: string; status: TaskRunStatus; attemptNumber?: number | null }; @@ -2760,8 +2931,12 @@ export class RunEngine { executionStatus: TaskRunExecutionStatus; description: string; }; + batchId?: string; checkpointId?: string; - completedWaitpointIds?: string[]; + completedWaitpoints?: { + id: string; + index?: number; + }[]; error?: string; } ) { @@ -2773,12 +2948,17 @@ export class RunEngine { runId: run.id, runStatus: run.status, attemptNumber: run.attemptNumber ?? undefined, - checkpointId: checkpointId ?? undefined, + batchId, + checkpointId, completedWaitpoints: { - connect: completedWaitpointIds?.map((id) => ({ id })), + connect: completedWaitpoints?.map((w) => ({ id: w.id })), }, + completedWaitpointOrder: completedWaitpoints + ?.filter((c) => c.index !== undefined) + .sort((a, b) => a.index! - b.index!) + .map((w) => w.id), isValid: error ? false : true, - error: error ?? undefined, + error, }, include: { checkpoint: true, @@ -2801,7 +2981,7 @@ export class RunEngine { }, snapshot: { ...newSnapshot, - completedWaitpointIds: completedWaitpointIds ?? [], + completedWaitpointIds: completedWaitpoints?.map((w) => w.id) ?? [], }, }); @@ -3020,14 +3200,7 @@ export class RunEngine { */ async #finalizeRun({ id, batchId }: { id: string; batchId: string | null }) { if (batchId) { - await this.worker.enqueue({ - //this will debounce the call - id: `tryCompleteBatch:${batchId}`, - job: "tryCompleteBatch", - payload: { batchId: batchId }, - //2s in the future - availableAt: new Date(Date.now() + 2_000), - }); + await this.tryCompleteBatch({ batchId }); } } @@ -3162,10 +3335,16 @@ export class ServiceValidationError extends Error { } } -//todo temporary during development class NotImplementedError extends Error { constructor(message: string) { - console.error("NOT IMPLEMENTED YET", { message }); + console.error("This isn't implemented", { message }); + super(message); + } +} + +export class RunDuplicateIdempotencyKeyError extends Error { + constructor(message: string) { super(message); + this.name = "RunDuplicateIdempotencyKeyError"; } } diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index f1025d4d0d..ee76462a16 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -79,7 +79,7 @@ describe("RunEngine batchTrigger", () => { queueName: "task/test-task", isTest: false, tags: [], - batchId: batch.id, + batch: { id: batch.id, index: 0 }, }, prisma ); @@ -100,7 +100,7 @@ describe("RunEngine batchTrigger", () => { queueName: "task/test-task", isTest: false, tags: [], - batchId: batch.id, + batch: { id: batch.id, index: 1 }, }, prisma ); diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts new file mode 100644 index 0000000000..159d219734 --- /dev/null +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -0,0 +1,363 @@ +import { + assertNonNullable, + containerTest, + setupAuthenticatedEnvironment, + setupBackgroundWorker, +} from "@internal/testcontainers"; +import { trace } from "@opentelemetry/api"; +import { expect } from "vitest"; +import { RunEngine } from "../index.js"; +import { setTimeout } from "node:timers/promises"; +import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; + +describe("RunEngine batchTriggerAndWait", () => { + containerTest( + "batchTriggerAndWait (no idempotency)", + { timeout: 15_000 }, + async ({ prisma, redisContainer }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + redis: { + host: redisContainer.getHost(), + port: redisContainer.getPort(), + password: redisContainer.getPassword(), + enableAutoPipelining: true, + }, + worker: { + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //create a batch + const batch = await prisma.batchTaskRun.create({ + data: { + friendlyId: generateFriendlyId("batch"), + runtimeEnvironmentId: authenticatedEnvironment.id, + }, + }); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue parent + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(initialExecutionData); + const attemptResult = await engine.startRunAttempt({ + runId: parentRun.id, + snapshotId: initialExecutionData.snapshot.id, + }); + + //block using the batch + await engine.blockRunWithCreatedBatch({ + runId: parentRun.id, + batchId: batch.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }); + + const afterBlockedByBatch = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(afterBlockedByBatch); + expect(afterBlockedByBatch.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + const child1 = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + batch: { id: batch.id, index: 0 }, + }, + prisma + ); + + const parentAfterChild1 = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentAfterChild1); + expect(parentAfterChild1.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + const child2 = await engine.trigger( + { + number: 2, + friendlyId: "run_c12345", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t123456", + spanId: "s123456", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + batch: { id: batch.id, index: 1 }, + }, + prisma + ); + + const parentAfterChild2 = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentAfterChild2); + expect(parentAfterChild2.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoints = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + orderBy: { + createdAt: "asc", + }, + }); + expect(runWaitpoints.length).toBe(3); + const child1Waitpoint = runWaitpoints.find( + (w) => w.waitpoint.completedByTaskRunId === child1.id + ); + expect(child1Waitpoint?.waitpoint.type).toBe("RUN"); + expect(child1Waitpoint?.waitpoint.completedByTaskRunId).toBe(child1.id); + expect(child1Waitpoint?.batchId).toBe(batch.id); + expect(child1Waitpoint?.batchIndex).toBe(0); + const child2Waitpoint = runWaitpoints.find( + (w) => w.waitpoint.completedByTaskRunId === child2.id + ); + expect(child2Waitpoint?.waitpoint.type).toBe("RUN"); + expect(child2Waitpoint?.waitpoint.completedByTaskRunId).toBe(child2.id); + expect(child2Waitpoint?.batchId).toBe(batch.id); + expect(child2Waitpoint?.batchIndex).toBe(1); + const batchWaitpoint = runWaitpoints.find((w) => w.waitpoint.type === "BATCH"); + expect(batchWaitpoint?.waitpoint.type).toBe("BATCH"); + expect(batchWaitpoint?.waitpoint.completedByBatchId).toBe(batch.id); + + await engine.unblockRunForCreatedBatch({ + runId: parentRun.id, + batchId: batch.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + }); + + //dequeue and start the 1st child + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: child1.masterQueue, + maxRunCount: 1, + }); + const childAttempt1 = await engine.startRunAttempt({ + runId: child1.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); + + // complete the 1st child + await engine.completeRunAttempt({ + runId: child1.id, + snapshotId: childAttempt1.snapshot.id, + completion: { + id: child1.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); + + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ runId: child1.id }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + + const child1WaitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: child1Waitpoint?.waitpointId, + }, + }); + expect(child1WaitpointAfter?.completedAt).not.toBeNull(); + expect(child1WaitpointAfter?.status).toBe("COMPLETED"); + expect(child1WaitpointAfter?.output).toBe('{"foo":"bar"}'); + + await setTimeout(500); + + const runWaitpointsAfterFirstChild = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointsAfterFirstChild.length).toBe(3); + + //parent snapshot + const parentExecutionDataAfterFirstChildComplete = await engine.getRunExecutionData({ + runId: parentRun.id, + }); + assertNonNullable(parentExecutionDataAfterFirstChildComplete); + expect(parentExecutionDataAfterFirstChildComplete.snapshot.executionStatus).toBe( + "EXECUTING_WITH_WAITPOINTS" + ); + expect(parentExecutionDataAfterFirstChildComplete.batch?.id).toBe(batch.id); + expect(parentExecutionDataAfterFirstChildComplete.completedWaitpoints.length).toBe(0); + + //dequeue and start the 2nd child + const dequeuedChild2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: child2.masterQueue, + maxRunCount: 1, + }); + const childAttempt2 = await engine.startRunAttempt({ + runId: child2.id, + snapshotId: dequeuedChild2[0].snapshot.id, + }); + await engine.completeRunAttempt({ + runId: child2.id, + snapshotId: childAttempt2.snapshot.id, + completion: { + id: child2.id, + ok: true, + output: '{"baz":"qux"}', + outputType: "application/json", + }, + }); + + //child snapshot + const child2ExecutionDataAfter = await engine.getRunExecutionData({ runId: child1.id }); + assertNonNullable(child2ExecutionDataAfter); + expect(child2ExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + + const child2WaitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: child2Waitpoint?.waitpointId, + }, + }); + expect(child2WaitpointAfter?.completedAt).not.toBeNull(); + expect(child2WaitpointAfter?.status).toBe("COMPLETED"); + expect(child2WaitpointAfter?.output).toBe('{"baz":"qux"}'); + + await setTimeout(500); + + const runWaitpointsAfterSecondChild = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointsAfterSecondChild.length).toBe(0); + + //parent snapshot + const parentExecutionDataAfterSecondChildComplete = await engine.getRunExecutionData({ + runId: parentRun.id, + }); + assertNonNullable(parentExecutionDataAfterSecondChildComplete); + expect(parentExecutionDataAfterSecondChildComplete.snapshot.executionStatus).toBe( + "EXECUTING" + ); + expect(parentExecutionDataAfterSecondChildComplete.batch?.id).toBe(batch.id); + expect(parentExecutionDataAfterSecondChildComplete.completedWaitpoints.length).toBe(3); + + const completedWaitpoint0 = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( + (w) => w.index === 0 + ); + assertNonNullable(completedWaitpoint0); + expect(completedWaitpoint0.id).toBe(child1Waitpoint!.waitpointId); + expect(completedWaitpoint0.completedByTaskRun?.id).toBe(child1.id); + expect(completedWaitpoint0.completedByTaskRun?.batch?.id).toBe(batch.id); + expect(completedWaitpoint0.output).toBe('{"foo":"bar"}'); + expect(completedWaitpoint0.index).toBe(0); + + const completedWaitpoint1 = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( + (w) => w.index === 1 + ); + assertNonNullable(completedWaitpoint1); + expect(completedWaitpoint1.id).toBe(child2Waitpoint!.waitpointId); + expect(completedWaitpoint1.completedByTaskRun?.id).toBe(child2.id); + expect(completedWaitpoint1.completedByTaskRun?.batch?.id).toBe(batch.id); + expect(completedWaitpoint1.index).toBe(1); + expect(completedWaitpoint1.output).toBe('{"baz":"qux"}'); + + const batchWaitpointAfter = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( + (w) => w.type === "BATCH" + ); + expect(batchWaitpointAfter?.id).toBe(batchWaitpoint?.waitpointId); + expect(batchWaitpointAfter?.completedByBatch?.id).toBe(batch.id); + expect(batchWaitpointAfter?.index).toBeUndefined(); + + const batchAfter = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter?.status === "COMPLETED"); + } finally { + engine.quit(); + } + } + ); +}); diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 8c8bab2bd0..5776f4479a 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -368,7 +368,7 @@ describe("RunEngine triggerAndWait", () => { }); const blockedResult = await engine.blockRunWithWaitpoint({ runId: parentRun2.id, - waitpointId: childRunWithWaitpoint.associatedWaitpoint!.id, + waitpoints: childRunWithWaitpoint.associatedWaitpoint!.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.project.id, tx: prisma, diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 419efacb85..d1429bc1a0 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -340,7 +340,7 @@ describe("RunEngine Waitpoints", () => { //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpointId: waitpoint.id, + waitpoints: waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, }); @@ -479,7 +479,7 @@ describe("RunEngine Waitpoints", () => { //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpointId: waitpoint.id, + waitpoints: waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, //fail after 200ms @@ -608,7 +608,7 @@ describe("RunEngine Waitpoints", () => { waitpoints.map((waitpoint) => engine.blockRunWithWaitpoint({ runId: run.id, - waitpointId: waitpoint.id, + waitpoints: waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, }) diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index dc40f7d1ed..9ee63b7744 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -65,7 +65,10 @@ export type TriggerParams = { tags: { id: string; name: string }[]; parentTaskRunId?: string; rootTaskRunId?: string; - batchId?: string; + batch?: { + id: string; + index: number; + }; resumeParentOnCompletion?: boolean; depth?: number; metadata?: string; diff --git a/internal-packages/run-engine/src/index.ts b/internal-packages/run-engine/src/index.ts index b71175be2a..e63b0dd836 100644 --- a/internal-packages/run-engine/src/index.ts +++ b/internal-packages/run-engine/src/index.ts @@ -1 +1 @@ -export { RunEngine } from "./engine/index"; +export { RunEngine, RunDuplicateIdempotencyKeyError } from "./engine/index"; diff --git a/packages/core/src/v3/apiClient/index.ts b/packages/core/src/v3/apiClient/index.ts index 59d2a16a49..d2115552b4 100644 --- a/packages/core/src/v3/apiClient/index.ts +++ b/packages/core/src/v3/apiClient/index.ts @@ -4,8 +4,8 @@ import { generateJWT } from "../jwt.js"; import { AddTagsRequestBody, BatchTaskRunExecutionResult, - BatchTriggerTaskV2RequestBody, - BatchTriggerTaskV2Response, + BatchTriggerTaskV3RequestBody, + BatchTriggerTaskV3Response, CanceledRunResponse, CreateEnvironmentVariableRequestBody, CreateScheduleOptions, @@ -18,7 +18,7 @@ import { ListScheduleOptions, ReplayRunResponse, RescheduleRunRequestBody, - RetrieveBatchResponse, + RetrieveBatchV2Response, RetrieveRunResponse, ScheduleObject, TaskRunExecutionResult, @@ -42,9 +42,9 @@ import { } from "./core.js"; import { ApiError } from "./errors.js"; import { + AnyRealtimeRun, AnyRunShape, RealtimeRun, - AnyRealtimeRun, RunShape, RunStreamCallback, RunSubscription, @@ -75,8 +75,6 @@ export type ClientTriggerOptions = { }; export type ClientBatchTriggerOptions = ClientTriggerOptions & { - idempotencyKey?: string; - idempotencyKeyTTL?: string; processingStrategy?: "parallel" | "sequential"; }; @@ -100,10 +98,10 @@ const DEFAULT_ZOD_FETCH_OPTIONS: ZodFetchOptions = { export { isRequestOptions }; export type { + AnyRealtimeRun, AnyRunShape, ApiRequestOptions, RealtimeRun, - AnyRealtimeRun, RunShape, RunStreamCallback, RunSubscription, @@ -234,19 +232,17 @@ export class ApiClient { }); } - batchTriggerV2( - body: BatchTriggerTaskV2RequestBody, + batchTriggerV3( + body: BatchTriggerTaskV3RequestBody, clientOptions?: ClientBatchTriggerOptions, requestOptions?: TriggerRequestOptions ) { return zodfetch( - BatchTriggerTaskV2Response, - `${this.baseUrl}/api/v1/tasks/batch`, + BatchTriggerTaskV3Response, + `${this.baseUrl}/api/v2/tasks/batch`, { method: "POST", headers: this.#getHeaders(clientOptions?.spanParentAsLink ?? false, { - "idempotency-key": clientOptions?.idempotencyKey, - "idempotency-key-ttl": clientOptions?.idempotencyKeyTTL, "batch-processing-strategy": clientOptions?.processingStrategy, }), body: JSON.stringify(body), @@ -713,8 +709,8 @@ export class ApiClient { retrieveBatch(batchId: string, requestOptions?: ZodFetchOptions) { return zodfetch( - RetrieveBatchResponse, - `${this.baseUrl}/api/v1/batches/${batchId}`, + RetrieveBatchV2Response, + `${this.baseUrl}/api/v2/batches/${batchId}`, { method: "GET", headers: this.#getHeaders(false), diff --git a/packages/core/src/v3/idempotencyKeys.ts b/packages/core/src/v3/idempotencyKeys.ts index 7a8e053018..7544dc1ee2 100644 --- a/packages/core/src/v3/idempotencyKeys.ts +++ b/packages/core/src/v3/idempotencyKeys.ts @@ -8,6 +8,28 @@ export function isIdempotencyKey( return typeof value === "string" && value.length === 64; } +export function flattenIdempotencyKey( + idempotencyKey?: + | IdempotencyKey + | string + | string[] + | (undefined | IdempotencyKey | string | string[])[] +): IdempotencyKey | string | string[] | undefined { + if (!idempotencyKey) { + return; + } + + if (Array.isArray(idempotencyKey)) { + return idempotencyKey.flatMap((key) => { + const k = flattenIdempotencyKey(key); + if (!k) return []; + return [k]; + }) as string[]; + } + + return idempotencyKey; +} + export async function makeIdempotencyKey( idempotencyKey?: IdempotencyKey | string | string[] ): Promise { diff --git a/packages/core/src/v3/runtime/devRuntimeManager.ts b/packages/core/src/v3/runtime/devRuntimeManager.ts index acad2c3d0f..35e009ac5e 100644 --- a/packages/core/src/v3/runtime/devRuntimeManager.ts +++ b/packages/core/src/v3/runtime/devRuntimeManager.ts @@ -49,39 +49,42 @@ export class DevRuntimeManager implements RuntimeManager { async waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { - if (!params.runs.length) { - return Promise.resolve({ id: params.id, items: [] }); - } + throw new Error("Method not implemented."); - const promise = Promise.all( - params.runs.map((runId) => { - return new Promise((resolve, reject) => { - const pendingCompletion = this._pendingCompletionNotifications.get(runId); + // if (!params.runs.length) { + // return Promise.resolve({ id: params.id, items: [] }); + // } - if (pendingCompletion) { - this._pendingCompletionNotifications.delete(runId); + // const promise = Promise.all( + // params.runs.map((runId) => { + // return new Promise((resolve, reject) => { + // const pendingCompletion = this._pendingCompletionNotifications.get(runId); - resolve(pendingCompletion); + // if (pendingCompletion) { + // this._pendingCompletionNotifications.delete(runId); - return; - } + // resolve(pendingCompletion); - this._taskWaits.set(runId, { resolve }); - }); - }) - ); + // return; + // } - await this.#tryFlushMetadata(); + // this._taskWaits.set(runId, { resolve }); + // }); + // }) + // ); + // await this.#tryFlushMetadata(); + + // const results = await promise; - const results = await promise; + // const results = await promise; - return { - id: params.id, - items: results, - }; + // return { + // id: params.id, + // items: results, + // }; } resumeTask(completion: TaskRunExecutionResult, runId: string): void { diff --git a/packages/core/src/v3/runtime/index.ts b/packages/core/src/v3/runtime/index.ts index 7eecb99296..a1fe0a804b 100644 --- a/packages/core/src/v3/runtime/index.ts +++ b/packages/core/src/v3/runtime/index.ts @@ -39,7 +39,7 @@ export class RuntimeAPI { public waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { return usage.pauseAsync(() => this.#getRuntimeManager().waitForBatch(params)); diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index 90eddcd5e7..ed3f4d13e0 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -65,17 +65,18 @@ export class ManagedRuntimeManager implements RuntimeManager { async waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { - if (!params.runs.length) { + if (!params.runCount) { return Promise.resolve({ id: params.id, items: [] }); } const promise = Promise.all( - params.runs.map((runId) => { + Array.from({ length: params.runCount }, (_, index) => { + const resolverId = `${params.id}_${index}`; return new Promise((resolve, reject) => { - this.resolversByWaitId.set(runId, resolve); + this.resolversByWaitId.set(resolverId, resolve); }); }) ); @@ -99,8 +100,21 @@ export class ManagedRuntimeManager implements RuntimeManager { private completeWaitpoint(waitpoint: CompletedWaitpoint): void { console.log("completeWaitpoint", waitpoint); - const waitId = - waitpoint.completedByTaskRun?.friendlyId ?? this.resolversByWaitpoint.get(waitpoint.id); + let waitId: string | undefined; + + if (waitpoint.completedByTaskRun) { + if (waitpoint.completedByTaskRun.batch) { + waitId = `${waitpoint.completedByTaskRun.batch.friendlyId}_${waitpoint.index}`; + } else { + waitId = waitpoint.completedByTaskRun.friendlyId; + } + } else if (waitpoint.completedByBatch) { + //no waitpoint resolves associated with batch completions + //a batch completion isn't when all the runs from a batch are completed + return; + } else { + waitId = this.resolversByWaitpoint.get(waitpoint.id); + } if (!waitId) { // TODO: Handle failures better @@ -124,10 +138,12 @@ export class ManagedRuntimeManager implements RuntimeManager { } private waitpointToTaskRunExecutionResult(waitpoint: CompletedWaitpoint): TaskRunExecutionResult { + if (!waitpoint.completedByTaskRun?.friendlyId) throw new Error("Missing completedByTaskRun"); + if (waitpoint.outputIsError) { return { ok: false, - id: waitpoint.id, + id: waitpoint.completedByTaskRun.friendlyId, error: waitpoint.output ? JSON.parse(waitpoint.output) : { @@ -138,7 +154,7 @@ export class ManagedRuntimeManager implements RuntimeManager { } else { return { ok: true, - id: waitpoint.id, + id: waitpoint.completedByTaskRun.friendlyId, output: waitpoint.output, outputType: waitpoint.outputType ?? "application/json", } satisfies TaskRunSuccessfulExecutionResult; diff --git a/packages/core/src/v3/runtime/manager.ts b/packages/core/src/v3/runtime/manager.ts index 56acfe3cf2..d42e86cfad 100644 --- a/packages/core/src/v3/runtime/manager.ts +++ b/packages/core/src/v3/runtime/manager.ts @@ -11,7 +11,7 @@ export interface RuntimeManager { waitForTask(params: { id: string; ctx: TaskRunContext }): Promise; waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise; } diff --git a/packages/core/src/v3/runtime/noopRuntimeManager.ts b/packages/core/src/v3/runtime/noopRuntimeManager.ts index 16e96de3e9..30ee5fe788 100644 --- a/packages/core/src/v3/runtime/noopRuntimeManager.ts +++ b/packages/core/src/v3/runtime/noopRuntimeManager.ts @@ -32,7 +32,7 @@ export class NoopRuntimeManager implements RuntimeManager { waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { return Promise.resolve({ diff --git a/packages/core/src/v3/runtime/prodRuntimeManager.ts b/packages/core/src/v3/runtime/prodRuntimeManager.ts index 4fd63e04bd..8c209d1e4c 100644 --- a/packages/core/src/v3/runtime/prodRuntimeManager.ts +++ b/packages/core/src/v3/runtime/prodRuntimeManager.ts @@ -80,34 +80,36 @@ export class ProdRuntimeManager implements RuntimeManager { async waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { - if (!params.runs.length) { - return Promise.resolve({ id: params.id, items: [] }); - } + throw new Error("Method not implemented."); - const promise = Promise.all( - params.runs.map((runId) => { - return new Promise((resolve, reject) => { - this._taskWaits.set(runId, { resolve }); - }); - }) - ); - - await this.ipc.send("WAIT_FOR_BATCH", { - batchFriendlyId: params.id, - runFriendlyIds: params.runs, - }); + // if (!params.runs.length) { + // return Promise.resolve({ id: params.id, items: [] }); + // } - const results = await promise; + // const promise = Promise.all( + // params.runs.map((runId) => { + // return new Promise((resolve, reject) => { + // this._taskWaits.set(runId, { resolve }); + // }); + // }) + // ); - clock.reset(); + // await this.ipc.send("WAIT_FOR_BATCH", { + // batchFriendlyId: params.id, + // runFriendlyIds: params.runs, + // }); + + // const results = await promise; + + // clock.reset(); - return { - id: params.id, - items: results, - }; + // return { + // id: params.id, + // items: results, + // }; } resumeTask(completion: TaskRunExecutionResult): void { diff --git a/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts b/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts index 88b0350590..19796a1b6d 100644 --- a/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/unmanagedRuntimeManager.ts @@ -40,27 +40,29 @@ export class UnmanagedRuntimeManager implements RuntimeManager { async waitForBatch(params: { id: string; - runs: string[]; + runCount: number; ctx: TaskRunContext; }): Promise { - if (!params.runs.length) { - return Promise.resolve({ id: params.id, items: [] }); - } - - const promise = Promise.all( - params.runs.map((runId) => { - return new Promise((resolve, reject) => { - this._taskWaits.set(runId, { resolve }); - }); - }) - ); - - const results = await promise; - - return { - id: params.id, - items: results, - }; + throw new Error("Method not implemented."); + + // if (!params.runs.length) { + // return Promise.resolve({ id: params.id, items: [] }); + // } + + // const promise = Promise.all( + // params.runs.map((runId) => { + // return new Promise((resolve, reject) => { + // this._taskWaits.set(runId, { resolve }); + // }); + // }) + // ); + + // const results = await promise; + + // return { + // id: params.id, + // items: results, + // }; } async completeWaitpoints(waitpoints: Waitpoint[]): Promise { diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 6c7fde1a65..780bfc9b6b 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -119,6 +119,7 @@ export type TriggerTaskRequestBody = z.infer; export const TriggerTaskResponse = z.object({ id: z.string(), + isCached: z.boolean().optional(), }); export type TriggerTaskResponse = z.infer; @@ -191,6 +192,29 @@ export const BatchTriggerTaskV2Response = z.object({ export type BatchTriggerTaskV2Response = z.infer; +export const BatchTriggerTaskV3RequestBody = z.object({ + items: BatchTriggerTaskItem.array(), + /** + * RunEngine v2 + * If triggered inside another run, the parentRunId is the friendly ID of the parent run. + */ + parentRunId: z.string().optional(), + /** + * RunEngine v2 + * Should be `true` if `triggerAndWait` or `batchTriggerAndWait` + */ + resumeParentOnCompletion: z.boolean().optional(), +}); + +export type BatchTriggerTaskV3RequestBody = z.infer; + +export const BatchTriggerTaskV3Response = z.object({ + id: z.string(), + runCount: z.number(), +}); + +export type BatchTriggerTaskV3Response = z.infer; + export const BatchTriggerTaskResponse = z.object({ batchId: z.string(), runs: z.string().array(), @@ -807,6 +831,18 @@ export const RetrieveBatchResponse = z.object({ export type RetrieveBatchResponse = z.infer; +export const RetrieveBatchV2Response = z.object({ + id: z.string(), + status: BatchStatus, + idempotencyKey: z.string().optional(), + createdAt: z.coerce.date(), + updatedAt: z.coerce.date(), + runCount: z.number(), + runs: z.array(z.string()), +}); + +export type RetrieveBatchV2Response = z.infer; + export const SubscribeRealtimeStreamChunkRawShape = z.object({ id: z.string(), runId: z.string(), diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index 57a52095bc..e13f82d9c3 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -43,12 +43,14 @@ export const WaitpointType = { RUN: "RUN", DATETIME: "DATETIME", MANUAL: "MANUAL", + BATCH: "BATCH", } satisfies Enum; export type WaitpointType = (typeof WaitpointType)[keyof typeof WaitpointType]; export const CompletedWaitpoint = z.object({ id: z.string(), + index: z.number().optional(), friendlyId: z.string(), type: z.enum(Object.values(WaitpointType) as [WaitpointType]), completedAt: z.coerce.date(), @@ -58,10 +60,24 @@ export const CompletedWaitpoint = z.object({ .object({ id: z.string(), friendlyId: z.string(), + /** If the run has an associated batch */ + batch: z + .object({ + id: z.string(), + friendlyId: z.string(), + }) + .optional(), }) .optional(), /** For type === "DATETIME" */ completedAfter: z.coerce.date().optional(), + /** For type === "BATCH" */ + completedByBatch: z + .object({ + id: z.string(), + friendlyId: z.string(), + }) + .optional(), output: z.string().optional(), outputType: z.string().optional(), outputIsError: z.boolean(), @@ -164,6 +180,12 @@ export const RunExecutionData = z.object({ version: z.literal("1"), snapshot: ExecutionSnapshot, run: BaseRunMetadata, + batch: z + .object({ + id: z.string(), + friendlyId: z.string(), + }) + .optional(), checkpoint: z .object({ id: z.string(), diff --git a/packages/core/src/v3/semanticInternalAttributes.ts b/packages/core/src/v3/semanticInternalAttributes.ts index 98b14f1aa3..ed765b251d 100644 --- a/packages/core/src/v3/semanticInternalAttributes.ts +++ b/packages/core/src/v3/semanticInternalAttributes.ts @@ -12,6 +12,7 @@ export const SemanticInternalAttributes = { ATTEMPT_NUMBER: "ctx.attempt.number", RUN_ID: "ctx.run.id", RUN_IS_TEST: "ctx.run.isTest", + ORIGINAL_RUN_ID: "$original_run_id", BATCH_ID: "ctx.batch.id", TASK_SLUG: "ctx.task.id", TASK_PATH: "ctx.task.filePath", diff --git a/packages/core/src/v3/types/tasks.ts b/packages/core/src/v3/types/tasks.ts index a4d2edf9ee..e2843b764f 100644 --- a/packages/core/src/v3/types/tasks.ts +++ b/packages/core/src/v3/types/tasks.ts @@ -396,9 +396,7 @@ export type AnyBatchedRunHandle = BatchedRunHandle; export type BatchRunHandle = BrandedRun< { batchId: string; - isCached: boolean; - idempotencyKey?: string; - runs: Array>; + runCount: number; publicAccessToken: string; }, TOutput, @@ -777,7 +775,7 @@ export type TriggerOptions = { maxDuration?: number; }; -export type TriggerAndWaitOptions = Omit; +export type TriggerAndWaitOptions = TriggerOptions; export type BatchTriggerOptions = { idempotencyKey?: IdempotencyKey | string | string[]; @@ -796,19 +794,7 @@ export type BatchTriggerOptions = { triggerSequentially?: boolean; }; -export type BatchTriggerAndWaitOptions = { - /** - * When true, triggers tasks sequentially in batch order. This ensures ordering but may be slower, - * especially for large batches. - * - * When false (default), triggers tasks in parallel for better performance, but order is not guaranteed. - * - * Note: This only affects the order of run creation, not the actual task execution. - * - * @default false - */ - triggerSequentially?: boolean; -}; +export type BatchTriggerAndWaitOptions = BatchTriggerOptions; export type TaskMetadataWithFunctions = TaskMetadata & { fns: { diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index 86e0175662..13a4500649 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -1,4 +1,4 @@ -import { SpanKind } from "@opentelemetry/api"; +import { SpanKind, SpanStatusCode } from "@opentelemetry/api"; import { SerializableJson } from "@trigger.dev/core"; import { accessoryAttributes, @@ -24,6 +24,7 @@ import { TaskRunExecutionResult, TaskRunPromise, TaskFromIdentifier, + flattenIdempotencyKey, } from "@trigger.dev/core/v3"; import { PollOptions, runs } from "./runs.js"; import { tracer } from "./tracer.js"; @@ -584,10 +585,10 @@ export async function batchTriggerById( ): Promise>> { const apiClient = apiClientManager.clientOrThrow(); - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const taskMetadata = taskCatalog.getTask(item.id); const parsedPayload = taskMetadata?.fns.parsePayload @@ -596,6 +597,10 @@ export async function batchTriggerById( const payloadPacket = await stringifyIO(parsedPayload); + const batchItemIdempotencyKey = await makeIdempotencyKey( + flattenIdempotencyKey([options?.idempotencyKey, `${index}`]) + ); + return { task: item.id, payload: payloadPacket.data, @@ -604,15 +609,15 @@ export async function batchTriggerById( concurrencyKey: item.options?.concurrencyKey, test: taskContext.ctx?.run.isTest, payloadType: payloadPacket.dataType, - idempotencyKey: await makeIdempotencyKey(item.options?.idempotencyKey), - idempotencyKeyTTL: item.options?.idempotencyKeyTTL, delay: item.options?.delay, ttl: item.options?.ttl, tags: item.options?.tags, maxAttempts: item.options?.maxAttempts, - parentAttempt: taskContext.ctx?.attempt.id, metadata: item.options?.metadata, maxDuration: item.options?.maxDuration, + idempotencyKey: + (await makeIdempotencyKey(item.options?.idempotencyKey)) ?? batchItemIdempotencyKey, + idempotencyKeyTTL: item.options?.idempotencyKeyTTL ?? options?.idempotencyKeyTTL, }, }; }) @@ -621,8 +626,6 @@ export async function batchTriggerById( }, { spanParentAsLink: true, - idempotencyKey: await makeIdempotencyKey(options?.idempotencyKey), - idempotencyKeyTTL: options?.idempotencyKeyTTL, processingStrategy: options?.triggerSequentially ? "sequential" : undefined, }, { @@ -635,20 +638,8 @@ export async function batchTriggerById( span.setAttribute("batchId", body.id); } - if ("runs" in body && Array.isArray(body.runs)) { - span.setAttribute("runCount", body.runs.length); - } - - if ("isCached" in body && typeof body.isCached === "boolean") { - if (body.isCached) { - console.warn(`Result is a cached response because the request was idempotent.`); - } - - span.setAttribute("isCached", body.isCached); - } - - if ("idempotencyKey" in body && typeof body.idempotencyKey === "string") { - span.setAttribute("idempotencyKey", body.idempotencyKey); + if ("runCount" in body && typeof body.runCount === "number") { + span.setAttribute("runCount", body.runCount); } } }, @@ -658,9 +649,7 @@ export async function batchTriggerById( const handle = { batchId: response.id, - isCached: response.isCached, - idempotencyKey: response.idempotencyKey, - runs: response.runs, + runCount: response.runCount, publicAccessToken: response.publicAccessToken, }; @@ -760,10 +749,10 @@ export async function batchTriggerByIdAndWait( return await tracer.startActiveSpan( "batch.triggerAndWait()", async (span) => { - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const taskMetadata = taskCatalog.getTask(item.id); const parsedPayload = taskMetadata?.fns.parsePayload @@ -772,6 +761,10 @@ export async function batchTriggerByIdAndWait( const payloadPacket = await stringifyIO(parsedPayload); + const batchItemIdempotencyKey = await makeIdempotencyKey( + flattenIdempotencyKey([options?.idempotencyKey, `${index}`]) + ); + return { task: item.id, payload: payloadPacket.data, @@ -787,11 +780,14 @@ export async function batchTriggerByIdAndWait( maxAttempts: item.options?.maxAttempts, metadata: item.options?.metadata, maxDuration: item.options?.maxDuration, + idempotencyKey: + (await makeIdempotencyKey(item.options?.idempotencyKey)) ?? + batchItemIdempotencyKey, + idempotencyKeyTTL: item.options?.idempotencyKeyTTL ?? options?.idempotencyKeyTTL, }, }; }) ), - dependentAttempt: ctx.attempt.id, parentRunId: ctx.run.id, resumeParentOnCompletion: true, }, @@ -802,20 +798,11 @@ export async function batchTriggerByIdAndWait( ); span.setAttribute("batchId", response.id); - span.setAttribute("runCount", response.runs.length); - span.setAttribute("isCached", response.isCached); - - if (response.isCached) { - console.warn(`Result is a cached response because the request was idempotent.`); - } - - if (response.idempotencyKey) { - span.setAttribute("idempotencyKey", response.idempotencyKey); - } + span.setAttribute("runCount", response.runCount); const result = await runtime.waitForBatch({ id: response.id, - runs: response.runs.map((run) => run.id), + runCount: response.runCount, ctx, }); @@ -921,10 +908,10 @@ export async function batchTriggerTasks( ): Promise> { const apiClient = apiClientManager.clientOrThrow(); - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const taskMetadata = taskCatalog.getTask(item.task.id); const parsedPayload = taskMetadata?.fns.parsePayload @@ -933,6 +920,10 @@ export async function batchTriggerTasks( const payloadPacket = await stringifyIO(parsedPayload); + const batchItemIdempotencyKey = await makeIdempotencyKey( + flattenIdempotencyKey([options?.idempotencyKey, `${index}`]) + ); + return { task: item.task.id, payload: payloadPacket.data, @@ -941,15 +932,15 @@ export async function batchTriggerTasks( concurrencyKey: item.options?.concurrencyKey, test: taskContext.ctx?.run.isTest, payloadType: payloadPacket.dataType, - idempotencyKey: await makeIdempotencyKey(item.options?.idempotencyKey), - idempotencyKeyTTL: item.options?.idempotencyKeyTTL, delay: item.options?.delay, ttl: item.options?.ttl, tags: item.options?.tags, maxAttempts: item.options?.maxAttempts, - parentAttempt: taskContext.ctx?.attempt.id, metadata: item.options?.metadata, maxDuration: item.options?.maxDuration, + idempotencyKey: + (await makeIdempotencyKey(item.options?.idempotencyKey)) ?? batchItemIdempotencyKey, + idempotencyKeyTTL: item.options?.idempotencyKeyTTL ?? options?.idempotencyKeyTTL, }, }; }) @@ -958,8 +949,6 @@ export async function batchTriggerTasks( }, { spanParentAsLink: true, - idempotencyKey: await makeIdempotencyKey(options?.idempotencyKey), - idempotencyKeyTTL: options?.idempotencyKeyTTL, processingStrategy: options?.triggerSequentially ? "sequential" : undefined, }, { @@ -972,20 +961,8 @@ export async function batchTriggerTasks( span.setAttribute("batchId", body.id); } - if ("runs" in body && Array.isArray(body.runs)) { - span.setAttribute("runCount", body.runs.length); - } - - if ("isCached" in body && typeof body.isCached === "boolean") { - if (body.isCached) { - console.warn(`Result is a cached response because the request was idempotent.`); - } - - span.setAttribute("isCached", body.isCached); - } - - if ("idempotencyKey" in body && typeof body.idempotencyKey === "string") { - span.setAttribute("idempotencyKey", body.idempotencyKey); + if ("runCount" in body && typeof body.runCount === "number") { + span.setAttribute("runCount", body.runCount); } } }, @@ -995,9 +972,7 @@ export async function batchTriggerTasks( const handle = { batchId: response.id, - isCached: response.isCached, - idempotencyKey: response.idempotencyKey, - runs: response.runs, + runCount: response.runCount, publicAccessToken: response.publicAccessToken, }; @@ -1099,10 +1074,10 @@ export async function batchTriggerAndWaitTasks { - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const taskMetadata = taskCatalog.getTask(item.task.id); const parsedPayload = taskMetadata?.fns.parsePayload @@ -1111,6 +1086,10 @@ export async function batchTriggerAndWaitTasks run.id), + runCount: response.runCount, ctx, }); @@ -1203,7 +1176,6 @@ async function trigger_internal( ttl: options?.ttl, tags: options?.tags, maxAttempts: options?.maxAttempts, - parentAttempt: taskContext.ctx?.attempt.id, metadata: options?.metadata, maxDuration: options?.maxDuration, parentRunId: taskContext.ctx?.run.id, @@ -1243,14 +1215,18 @@ async function batchTrigger_internal( const ctx = taskContext.ctx; - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const parsedPayload = parsePayload ? await parsePayload(item.payload) : item.payload; const payloadPacket = await stringifyIO(parsedPayload); + const batchItemIdempotencyKey = await makeIdempotencyKey( + flattenIdempotencyKey([options?.idempotencyKey, `${index}`]) + ); + return { task: taskIdentifier, payload: payloadPacket.data, @@ -1259,25 +1235,24 @@ async function batchTrigger_internal( concurrencyKey: item.options?.concurrencyKey, test: taskContext.ctx?.run.isTest, payloadType: payloadPacket.dataType, - idempotencyKey: await makeIdempotencyKey(item.options?.idempotencyKey), - idempotencyKeyTTL: item.options?.idempotencyKeyTTL, delay: item.options?.delay, ttl: item.options?.ttl, tags: item.options?.tags, maxAttempts: item.options?.maxAttempts, - parentAttempt: taskContext.ctx?.attempt.id, metadata: item.options?.metadata, maxDuration: item.options?.maxDuration, parentRunId: ctx?.run.id, + idempotencyKey: + (await makeIdempotencyKey(item.options?.idempotencyKey)) ?? batchItemIdempotencyKey, + idempotencyKeyTTL: item.options?.idempotencyKeyTTL ?? options?.idempotencyKeyTTL, }, }; }) ), + parentRunId: ctx?.run.id, }, { spanParentAsLink: true, - idempotencyKey: await makeIdempotencyKey(options?.idempotencyKey), - idempotencyKeyTTL: options?.idempotencyKeyTTL, processingStrategy: options?.triggerSequentially ? "sequential" : undefined, }, { @@ -1290,20 +1265,8 @@ async function batchTrigger_internal( span.setAttribute("batchId", body.id); } - if ("runs" in body && Array.isArray(body.runs)) { - span.setAttribute("runCount", body.runs.length); - } - - if ("isCached" in body && typeof body.isCached === "boolean") { - if (body.isCached) { - console.warn(`Result is a cached response because the request was idempotent.`); - } - - span.setAttribute("isCached", body.isCached); - } - - if ("idempotencyKey" in body && typeof body.idempotencyKey === "string") { - span.setAttribute("idempotencyKey", body.idempotencyKey); + if ("runCount" in body && Array.isArray(body.runCount)) { + span.setAttribute("runCount", body.runCount); } } }, @@ -1313,9 +1276,7 @@ async function batchTrigger_internal( const handle = { batchId: response.id, - isCached: response.isCached, - idempotencyKey: response.idempotencyKey, - runs: response.runs, + runCount: response.runCount, publicAccessToken: response.publicAccessToken, }; @@ -1364,6 +1325,8 @@ async function triggerAndWait_internal { - const response = await apiClient.batchTriggerV2( + const response = await apiClient.batchTriggerV3( { items: await Promise.all( - items.map(async (item) => { + items.map(async (item, index) => { const parsedPayload = parsePayload ? await parsePayload(item.payload) : item.payload; const payloadPacket = await stringifyIO(parsedPayload); + const batchItemIdempotencyKey = await makeIdempotencyKey( + flattenIdempotencyKey([options?.idempotencyKey, `${index}`]) + ); + return { task: id, payload: payloadPacket.data, @@ -1440,11 +1407,14 @@ async function batchTriggerAndWait_internal run.id), + runCount: response.runCount, ctx, }); diff --git a/references/hello-world/src/trigger/example.ts b/references/hello-world/src/trigger/example.ts index 18cb388b25..abedf0b1e3 100644 --- a/references/hello-world/src/trigger/example.ts +++ b/references/hello-world/src/trigger/example.ts @@ -50,7 +50,10 @@ export const batchParentTask = task({ logger.log("Results 3", { results3 }); const results4 = await batch.triggerByTaskAndWait([ - { task: childTask, payload: { message: "Hello, world !" } }, + { + task: childTask, + payload: { message: "Hello, world !" }, + }, { task: childTask, payload: { message: "Hello, world 2!" } }, ]); logger.log("Results 4", { results4 }); diff --git a/references/hello-world/src/trigger/idempotency.ts b/references/hello-world/src/trigger/idempotency.ts index 9136399cc4..6ea7c1c5ea 100644 --- a/references/hello-world/src/trigger/idempotency.ts +++ b/references/hello-world/src/trigger/idempotency.ts @@ -4,73 +4,281 @@ import { childTask } from "./example.js"; export const idempotency = task({ id: "idempotency", + maxDuration: 60, run: async (payload: any, { ctx }) => { logger.log("Hello, world from the parent", { payload }); - const child1Key = await idempotencyKeys.create("a", { scope: "global" }); + const successfulKey = await idempotencyKeys.create("a", { scope: "global" }); const child1 = await childTask.triggerAndWait( - { message: "Hello, world!", duration: 10_000 }, - { idempotencyKey: child1Key, idempotencyKeyTTL: "60s" } + { message: "Hello, world!", duration: 500, failureChance: 0 }, + { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" } ); logger.log("Child 1", { child1 }); - - ctx.attempt.id; - const child2 = await childTask.triggerAndWait( - { message: "Hello, world!", duration: 10_000 }, - { idempotencyKey: child1Key, idempotencyKeyTTL: "60s" } + { message: "Hello, world!", duration: 500 }, + { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" } ); logger.log("Child 2", { child2 }); + await childTask.trigger( + { message: "Hello, world!", duration: 500, failureChance: 0 }, + { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" } + ); + + const failureKey = await idempotencyKeys.create("b", { scope: "global" }); + + const child3 = await childTask.triggerAndWait( + { message: "Hello, world!", duration: 500, failureChance: 1 }, + { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" } + ); + logger.log("Child 3", { child3 }); + const child4 = await childTask.triggerAndWait( + { message: "Hello, world!", duration: 500, failureChance: 1 }, + { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" } + ); + logger.log("Child 4", { child4 }); + + const anotherKey = await idempotencyKeys.create("c", { scope: "global" }); + + const batch1 = await childTask.batchTriggerAndWait([ + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 3", duration: 500, failureChance: 0 }, + options: { idempotencyKey: anotherKey, idempotencyKeyTTL: "120s" }, + }, + ]); + logger.log("Batch 1", { batch1 }); + + await childTask.batchTrigger([ + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + ]); + + const results2 = await batch.triggerAndWait([ + { + id: "child", + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + id: "child", + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 2", { results2 }); + + const results3 = await batch.triggerByTask([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 3", { results3 }); + + const results4 = await batch.triggerByTaskAndWait([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 4", { results4 }); + }, +}); + +export const idempotencyBatch = task({ + id: "idempotency-batch", + maxDuration: 60, + run: async ({ additionalItems }: { additionalItems?: 2 }) => { + const successfulKey = await idempotencyKeys.create("a", { scope: "global" }); + const failureKey = await idempotencyKeys.create("b", { scope: "global" }); + const anotherKey = await idempotencyKeys.create("c", { scope: "global" }); + const batchKey = await idempotencyKeys.create("batch", { scope: "global" }); + + const moreItems = Array.from({ length: additionalItems ?? 0 }, (_, i) => ({ + payload: { message: `Hello, world ${i}!` }, + options: { idempotencyKey: `key-${i}`, idempotencyKeyTTL: "120s" }, + })); + + const batch1 = await childTask.batchTriggerAndWait( + [ + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 3", duration: 500, failureChance: 0 }, + }, + // Include runs in the same batch with the same idempotencyKeys + // I'm sure people will do this, even though it doesn't make sense + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + ...moreItems, + ], + { + idempotencyKey: batchKey, + idempotencyKeyTTL: "120s", + } + ); + logger.log("Batch 1", { batch1 }); + + const b = await batch.retrieve(batch1.id); + logger.log("Batch retrieve", { ...b }); + + const batch2 = await childTask.batchTriggerAndWait( + [ + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 3", duration: 500, failureChance: 0 }, + }, + ...moreItems, + ], + { + idempotencyKey: batchKey, + idempotencyKeyTTL: "120s", + } + ); + logger.log("Batch 1", { batch1 }); + + await childTask.batchTrigger([ + { + payload: { message: "Hello, world!" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "120s" }, + }, + { + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "120s" }, + }, + ]); + + await childTask.batchTrigger([ + { + payload: { message: "Hello, world!" }, + }, + { + payload: { message: "Hello, world 2!" }, + }, + ]); + + const results2 = await batch.triggerAndWait([ + { + id: "child", + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + id: "child", + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 2", { results2 }); + + const results3 = await batch.triggerByTask([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 3", { results3 }); + + const results4 = await batch.triggerByTaskAndWait([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 4", { results4 }); + }, +}); + +export const idempotencyTriggerByTaskAndWait = task({ + id: "idempotency-trigger-by-task-and-wait", + maxDuration: 60, + run: async () => { + const successfulKey = await idempotencyKeys.create("a", { scope: "global" }); + const failureKey = await idempotencyKeys.create("b", { scope: "global" }); + + const results1 = await batch.triggerByTaskAndWait([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 1", { results1 }); - // const results = await childTask.batchTriggerAndWait([ - // { - // payload: { message: "Hello, world!" }, - // //@ts-ignore - // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, - // }, - // { - // payload: { message: "Hello, world 2!" }, - // //@ts-ignore - // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, - // }, - // ]); - // logger.log("Results", { results }); - - // const results2 = await batch.triggerAndWait([ - // { - // id: "child", - // payload: { message: "Hello, world !" }, - // //@ts-ignore - // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, - // }, - // { - // id: "child", - // payload: { message: "Hello, world 2!" }, - // //@ts-ignore - // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, - // }, - // ]); - // logger.log("Results 2", { results2 }); - - // const results3 = await batch.triggerByTask([ - // { - // task: childTask, - // payload: { message: "Hello, world !" }, - // options: { idempotencyKey: "1", idempotencyKeyTTL: "60s" }, - // }, - // { - // task: childTask, - // payload: { message: "Hello, world 2!" }, - // options: { idempotencyKey: "2", idempotencyKeyTTL: "60s" }, - // }, - // ]); - // logger.log("Results 3", { results3 }); - - // const results4 = await batch.triggerByTaskAndWait([ - // { task: childTask, payload: { message: "Hello, world !" } }, - // { task: childTask, payload: { message: "Hello, world 2!" } }, - // ]); - // logger.log("Results 4", { results4 }); + const results2 = await batch.triggerByTaskAndWait([ + { + task: childTask, + payload: { message: "Hello, world !" }, + options: { idempotencyKey: successfulKey, idempotencyKeyTTL: "60s" }, + }, + { + task: childTask, + payload: { message: "Hello, world 2!" }, + options: { idempotencyKey: failureKey, idempotencyKeyTTL: "60s" }, + }, + ]); + logger.log("Results 2", { results2 }); }, }); From e942c040f16bfcd6d302005df58a59d2e05d4f36 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 17 Jan 2025 18:15:52 +0000 Subject: [PATCH 342/485] Latest lockfile --- pnpm-lock.yaml | 481 +++++++++++++++++++------------------------------ 1 file changed, 183 insertions(+), 298 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7e67336f7a..51d52bdd87 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -14,6 +14,9 @@ patchedDependencies: graphile-worker@0.16.6: hash: hdpetta7btqcc7xb5wfkcnanoa path: patches/graphile-worker@0.16.6.patch + redlock@5.0.0-beta.2: + hash: rwyegdki7iserrd7fgjwxkhnlu + path: patches/redlock@5.0.0-beta.2.patch importers: @@ -60,7 +63,7 @@ importers: specifier: ^1.10.3 version: 1.10.3 typescript: - specifier: ^5.5.4 + specifier: 5.5.4 version: 5.5.4 vite: specifier: ^4.1.1 @@ -90,9 +93,6 @@ importers: specifier: ^0.3.0 version: 0.3.0 devDependencies: - '@types/node': - specifier: ^18 - version: 18.17.1 dotenv: specifier: ^16.4.2 version: 16.4.4 @@ -102,9 +102,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/docker-provider: dependencies: @@ -115,9 +112,6 @@ importers: specifier: ^8.0.1 version: 8.0.1 devDependencies: - '@types/node': - specifier: ^18.19.8 - version: 18.19.20 dotenv: specifier: ^16.4.2 version: 16.4.4 @@ -127,9 +121,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/kubernetes-provider: dependencies: @@ -152,9 +143,6 @@ importers: tsx: specifier: ^4.7.0 version: 4.7.1 - typescript: - specifier: ^5.3.3 - version: 5.3.3 apps/proxy: dependencies: @@ -177,9 +165,6 @@ importers: '@cloudflare/workers-types': specifier: ^4.20240512.0 version: 4.20240512.0 - typescript: - specifier: ^5.0.4 - version: 5.2.2 wrangler: specifier: ^3.57.1 version: 3.57.1(@cloudflare/workers-types@4.20240512.0) @@ -240,6 +225,9 @@ importers: '@heroicons/react': specifier: ^2.0.12 version: 2.0.13(react@18.2.0) + '@internal/run-engine': + specifier: workspace:* + version: link:../../internal-packages/run-engine '@internal/zod-worker': specifier: workspace:* version: link:../../internal-packages/zod-worker @@ -335,22 +323,22 @@ importers: version: 3.7.1(react@18.2.0) '@remix-run/express': specifier: 2.1.0 - version: 2.1.0(express@4.18.2)(typescript@5.2.2) + version: 2.1.0(express@4.18.2)(typescript@5.5.4) '@remix-run/node': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/react': specifier: 2.1.0 - version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': specifier: ^1.15.3 version: 1.15.3 '@remix-run/serve': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/server-runtime': specifier: 2.1.0 - version: 2.1.0(typescript@5.2.2) + version: 2.1.0(typescript@5.5.4) '@remix-run/v1-meta': specifier: ^0.1.3 version: 0.1.3(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) @@ -422,7 +410,7 @@ importers: version: 1.0.18 class-variance-authority: specifier: ^0.5.2 - version: 0.5.2(typescript@5.2.2) + version: 0.5.2(typescript@5.5.4) clsx: specifier: ^1.2.1 version: 1.2.1 @@ -464,7 +452,7 @@ importers: version: 10.12.11(react-dom@18.2.0)(react@18.2.0) graphile-worker: specifier: 0.16.6 - version: 0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.2.2) + version: 0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.5.4) highlight.run: specifier: ^7.3.4 version: 7.3.4 @@ -657,13 +645,13 @@ importers: version: link:../../internal-packages/testcontainers '@remix-run/dev': specifier: 2.1.0 - version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.2.2) + version: 2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.5.4) '@remix-run/eslint-config': specifier: 2.1.0 - version: 2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/testing': specifier: ^2.1.0 - version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + version: 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@swc/core': specifier: ^1.3.4 version: 1.3.26 @@ -709,9 +697,6 @@ importers: '@types/morgan': specifier: ^1.9.3 version: 1.9.4 - '@types/node': - specifier: ^18.11.15 - version: 18.11.18 '@types/node-fetch': specifier: ^2.6.2 version: 2.6.2 @@ -756,10 +741,10 @@ importers: version: 8.5.4 '@typescript-eslint/eslint-plugin': specifier: ^5.59.6 - version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) + version: 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) '@typescript-eslint/parser': specifier: ^5.59.6 - version: 5.59.6(eslint@8.31.0)(typescript@5.2.2) + version: 5.59.6(eslint@8.31.0)(typescript@5.5.4) autoprefixer: specifier: ^10.4.13 version: 10.4.13(postcss@8.4.44) @@ -804,7 +789,7 @@ importers: version: 16.0.1(postcss@8.4.44) postcss-loader: specifier: ^8.1.1 - version: 8.1.1(postcss@8.4.44)(typescript@5.2.2)(webpack@5.88.2) + version: 8.1.1(postcss@8.4.44)(typescript@5.5.4)(webpack@5.88.2) prettier: specifier: ^2.8.8 version: 2.8.8 @@ -831,16 +816,13 @@ importers: version: 3.4.1(ts-node@10.9.1) ts-node: specifier: ^10.7.0 - version: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + version: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.5.4) tsconfig-paths: specifier: ^3.14.1 version: 3.14.1 - typescript: - specifier: ^5.1.6 - version: 5.2.2 vite-tsconfig-paths: specifier: ^4.0.5 - version: 4.0.5(typescript@5.2.2) + version: 4.0.5(typescript@5.5.4) vitest: specifier: ^1.4.0 version: 1.4.0(@types/node@18.11.18) @@ -852,9 +834,6 @@ importers: '@prisma/client': specifier: 5.4.1 version: 5.4.1(prisma@5.4.1) - typescript: - specifier: ^4.8.4 - version: 4.9.5 devDependencies: prisma: specifier: 5.4.1 @@ -890,18 +869,12 @@ importers: specifier: 3.23.8 version: 3.23.8 devDependencies: - '@types/node': - specifier: ^18 - version: 18.19.20 '@types/nodemailer': specifier: ^6.4.17 version: 6.4.17 '@types/react': specifier: 18.2.69 version: 18.2.69 - typescript: - specifier: ^4.9.4 - version: 4.9.5 internal-packages/otlp-importer: dependencies: @@ -921,9 +894,6 @@ importers: ts-proto: specifier: ^1.167.3 version: 1.167.3 - typescript: - specifier: ^5.5.0 - version: 5.5.4 internal-packages/redis-worker: dependencies: @@ -942,9 +912,6 @@ importers: nanoid: specifier: ^5.0.7 version: 5.0.7 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -959,6 +926,46 @@ importers: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) + internal-packages/run-engine: + dependencies: + '@internal/redis-worker': + specifier: workspace:* + version: link:../redis-worker + '@opentelemetry/api': + specifier: ^1.9.0 + version: 1.9.0 + '@opentelemetry/semantic-conventions': + specifier: ^1.27.0 + version: 1.28.0 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core + '@trigger.dev/database': + specifier: workspace:* + version: link:../database + assert-never: + specifier: ^1.2.1 + version: 1.2.1 + ioredis: + specifier: ^5.3.2 + version: 5.3.2 + nanoid: + specifier: ^3.3.4 + version: 3.3.7 + redlock: + specifier: 5.0.0-beta.2 + version: 5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu) + zod: + specifier: 3.23.8 + version: 3.23.8 + devDependencies: + '@internal/testcontainers': + specifier: workspace:* + version: link:../testcontainers + vitest: + specifier: ^1.4.0 + version: 1.6.0(@types/node@20.14.14) + internal-packages/testcontainers: dependencies: '@opentelemetry/api': @@ -970,9 +977,6 @@ importers: ioredis: specifier: ^5.3.2 version: 5.3.2 - typescript: - specifier: ^4.8.4 - version: 4.9.5 devDependencies: '@testcontainers/postgresql': specifier: ^10.13.1 @@ -980,6 +984,9 @@ importers: '@testcontainers/redis': specifier: ^10.13.1 version: 10.13.1 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core testcontainers: specifier: ^10.13.1 version: 10.13.1 @@ -1007,9 +1014,6 @@ importers: lodash.omit: specifier: ^4.5.0 version: 4.5.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -1042,9 +1046,6 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 esbuild: specifier: ^0.23.0 version: 0.23.0 @@ -1057,9 +1058,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/cli-v3: dependencies: @@ -1189,6 +1187,9 @@ importers: signal-exit: specifier: ^4.1.0 version: 4.1.0 + socket.io-client: + specifier: 4.7.5 + version: 4.7.5 source-map-support: specifier: 0.5.21 version: 0.5.21 @@ -1226,9 +1227,6 @@ importers: '@types/gradient-string': specifier: ^1.1.2 version: 1.1.2 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/object-hash': specifier: 3.0.6 version: 3.0.6 @@ -1271,15 +1269,15 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 vitest: specifier: ^2.0.5 version: 2.0.5(@types/node@20.14.14) packages/core: dependencies: + '@bugsnag/cuid': + specifier: ^3.1.1 + version: 3.1.1 '@electric-sql/client': specifier: 1.0.0-beta.1 version: 1.0.0-beta.1 @@ -1340,12 +1338,18 @@ importers: nanoid: specifier: ^3.3.4 version: 3.3.7 + socket.io: + specifier: 4.7.4 + version: 4.7.4 socket.io-client: specifier: 4.7.5 version: 4.7.5 superjson: specifier: ^2.2.1 version: 2.2.1 + tinyexec: + specifier: ^0.3.2 + version: 0.3.2 zod: specifier: 3.23.8 version: 3.23.8 @@ -1365,12 +1369,12 @@ importers: '@epic-web/test-server': specifier: ^0.1.0 version: 0.1.0 + '@trigger.dev/database': + specifier: workspace:* + version: link:../../internal-packages/database '@types/humanize-duration': specifier: ^3.27.1 version: 3.27.1 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/readable-stream': specifier: ^4.0.14 version: 4.0.14 @@ -1386,9 +1390,6 @@ importers: rimraf: specifier: ^3.0.2 version: 3.0.2 - socket.io: - specifier: 4.7.4 - version: 4.7.4 ts-essentials: specifier: 10.0.1 version: 10.0.1(typescript@5.5.4) @@ -1398,9 +1399,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 vitest: specifier: ^1.6.0 version: 1.6.0(@types/node@20.14.14) @@ -1423,9 +1421,6 @@ importers: '@arethetypeswrong/cli': specifier: ^0.15.4 version: 0.15.4 - '@types/node': - specifier: ^20.14.14 - version: 20.14.14 '@types/react': specifier: '*' version: 18.3.1 @@ -1441,9 +1436,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/rsc: dependencies: @@ -1484,9 +1476,6 @@ importers: tsx: specifier: 4.17.0 version: 4.17.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages/trigger-sdk: dependencies: @@ -1536,9 +1525,6 @@ importers: '@types/debug': specifier: ^4.1.7 version: 4.1.7 - '@types/node': - specifier: 20.14.14 - version: 20.14.14 '@types/slug': specifier: ^5.0.3 version: 5.0.3 @@ -1566,9 +1552,6 @@ importers: typed-emitter: specifier: ^2.1.0 version: 2.1.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 zod: specifier: 3.23.8 version: 3.23.8 @@ -1585,9 +1568,6 @@ importers: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/hello-world: dependencies: @@ -1598,18 +1578,12 @@ importers: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/init-shell: devDependencies: trigger.dev: specifier: workspace:* version: link:../../packages/cli-v3 - typescript: - specifier: ^5.5.4 - version: 5.5.4 references/init-shell-js: devDependencies: @@ -1707,9 +1681,6 @@ importers: '@trigger.dev/rsc': specifier: workspace:^3 version: link:../../packages/rsc - '@types/node': - specifier: ^20 - version: 20.14.14 '@types/react': specifier: ^18 version: 18.3.1 @@ -1725,9 +1696,6 @@ importers: trigger.dev: specifier: workspace:^3 version: link:../../packages/cli-v3 - typescript: - specifier: ^5 - version: 5.5.4 references/v3-catalog: dependencies: @@ -1900,9 +1868,6 @@ importers: '@types/fluent-ffmpeg': specifier: ^2.1.26 version: 2.1.26 - '@types/node': - specifier: 20.4.2 - version: 20.4.2 '@types/react': specifier: ^18.3.1 version: 18.3.1 @@ -1924,9 +1889,6 @@ importers: tsconfig-paths: specifier: ^4.2.0 version: 4.2.0 - typescript: - specifier: ^5.5.4 - version: 5.5.4 packages: @@ -4965,6 +4927,10 @@ packages: resolution: {integrity: sha512-QDdVFLoN93Zjg36NoQPZfsVH9tZew7wKDKyV5qRdj8ntT4wQCOradQjRaTdwMhWUYsgKsvCINKKm87FdEk96Ag==} dev: false + /@bugsnag/cuid@3.1.1: + resolution: {integrity: sha512-d2z4b0rEo3chI07FNN1Xds8v25CNeekecU6FC/2Fs9MxY2EipkZTThVcV2YinMn8dvRUlViKOyC50evoUxg8tw==} + dev: false + /@bundled-es-modules/cookie@2.0.0: resolution: {integrity: sha512-Or6YHg/kamKHpxULAdSqhGqnWFneIXu1NKvvfBBzKGwpVsYuFIQ5aBPHDnnoR3ghW1nvSkALd+EF9iMtY7Vjxw==} dependencies: @@ -9172,11 +9138,6 @@ packages: resolution: {integrity: sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==} engines: {node: '>=14'} - /@opentelemetry/semantic-conventions@1.27.0: - resolution: {integrity: sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==} - engines: {node: '>=14'} - dev: false - /@opentelemetry/semantic-conventions@1.28.0: resolution: {integrity: sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==} engines: {node: '>=14'} @@ -14601,7 +14562,7 @@ packages: - encoding dev: false - /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.2.2): + /@remix-run/dev@2.1.0(@remix-run/serve@2.1.0)(@types/node@18.11.18)(ts-node@10.9.1)(typescript@5.5.4): resolution: {integrity: sha512-Hn5lw46F+a48dp5uHKe68ckaHgdStW4+PmLod+LMFEqrMbkF0j4XD1ousebxlv989o0Uy/OLgfRMgMy4cBOvHg==} engines: {node: '>=18.0.0'} hasBin: true @@ -14623,8 +14584,8 @@ packages: '@babel/traverse': 7.22.17 '@mdx-js/mdx': 2.3.0 '@npmcli/package-json': 4.0.1 - '@remix-run/serve': 2.1.0(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/serve': 2.1.0(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) '@types/mdx': 2.0.5 '@vanilla-extract/integration': 6.2.1(@types/node@18.11.18) arg: 5.0.2 @@ -14662,7 +14623,7 @@ packages: semver: 7.6.3 tar-fs: 2.1.1 tsconfig-paths: 4.2.0 - typescript: 5.2.2 + typescript: 5.5.4 ws: 7.5.9 transitivePeerDependencies: - '@types/node' @@ -14680,7 +14641,7 @@ packages: - utf-8-validate dev: true - /@remix-run/eslint-config@2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/eslint-config@2.1.0(eslint@8.31.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-yfeUnHpUG+XveujMi6QODKMGhs5CvKWCKzASU397BPXiPWbMv6r2acfODSWK64ZdBMu9hcLbOb42GBFydVQeHA==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14695,28 +14656,28 @@ packages: '@babel/eslint-parser': 7.21.8(@babel/core@7.22.17)(eslint@8.31.0) '@babel/preset-react': 7.18.6(@babel/core@7.22.17) '@rushstack/eslint-patch': 1.2.0 - '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 eslint-import-resolver-node: 0.3.7 eslint-import-resolver-typescript: 3.5.5(@typescript-eslint/parser@5.59.6)(eslint-import-resolver-node@0.3.7)(eslint-plugin-import@2.29.1)(eslint@8.31.0) eslint-plugin-import: 2.29.1(@typescript-eslint/parser@5.59.6)(eslint-import-resolver-typescript@3.5.5)(eslint@8.31.0) - eslint-plugin-jest: 26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.2.2) + eslint-plugin-jest: 26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.5.4) eslint-plugin-jest-dom: 4.0.3(eslint@8.31.0) eslint-plugin-jsx-a11y: 6.7.1(eslint@8.31.0) eslint-plugin-node: 11.1.0(eslint@8.31.0) eslint-plugin-react: 7.32.2(eslint@8.31.0) eslint-plugin-react-hooks: 4.6.2(eslint@8.31.0) - eslint-plugin-testing-library: 5.11.0(eslint@8.31.0)(typescript@5.2.2) + eslint-plugin-testing-library: 5.11.0(eslint@8.31.0)(typescript@5.5.4) react: 18.2.0 - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - eslint-import-resolver-webpack - jest - supports-color dev: true - /@remix-run/express@2.1.0(express@4.18.2)(typescript@5.2.2): + /@remix-run/express@2.1.0(express@4.18.2)(typescript@5.5.4): resolution: {integrity: sha512-R5myPowQx6LYWY3+EqP42q19MOCT3+ZGwb2f0UKNs9a34R8U3nFpGWL7saXryC+To+EasujEScc8rTQw5Pftog==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14726,11 +14687,11 @@ packages: typescript: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) express: 4.18.2 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/node@2.1.0(typescript@5.2.2): + /@remix-run/node@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-TeSgjXnZUUlmw5FVpBVnXY7MLpracjdnwFNwoJE5NQkiUEFnGD/Yhvk4F2fOCkszqc2Z25KRclc5noweyiFu6Q==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14739,7 +14700,7 @@ packages: typescript: optional: true dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) '@remix-run/web-fetch': 4.4.1 '@remix-run/web-file': 3.1.0 '@remix-run/web-stream': 1.1.0 @@ -14747,9 +14708,9 @@ packages: cookie-signature: 1.2.0 source-map-support: 0.5.21 stream-slice: 0.1.2 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/react@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/react@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-DeYgfsvNxHqNn29sGA3XsZCciMKo2EFTQ9hHkuVPTsJXC4ipHr6Dja1j6UzZYPe/ZuKppiuTjueWCQlE2jOe1w==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14761,11 +14722,11 @@ packages: optional: true dependencies: '@remix-run/router': 1.10.0 - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) react: 18.2.0 react-dom: 18.2.0(react@18.2.0) react-router-dom: 6.17.0(react-dom@18.2.0)(react@18.2.0) - typescript: 5.2.2 + typescript: 5.5.4 /@remix-run/router@1.10.0: resolution: {integrity: sha512-Lm+fYpMfZoEucJ7cMxgt4dYt8jLfbpwRCzAjm9UgSLOkmlqo9gupxt6YX3DY0Fk155NT9l17d/ydi+964uS9Lw==} @@ -14776,13 +14737,13 @@ packages: engines: {node: '>=14.0.0'} dev: false - /@remix-run/serve@2.1.0(typescript@5.2.2): + /@remix-run/serve@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-XHI+vPYz217qrg1QcV38TTPlEBTzMJzAt0SImPutyF0S2IBrZGZIFMEsspI0i0wNvdcdQz1IqmSx+mTghzW8eQ==} engines: {node: '>=18.0.0'} hasBin: true dependencies: - '@remix-run/express': 2.1.0(express@4.18.2)(typescript@5.2.2) - '@remix-run/node': 2.1.0(typescript@5.2.2) + '@remix-run/express': 2.1.0(express@4.18.2)(typescript@5.5.4) + '@remix-run/node': 2.1.0(typescript@5.5.4) chokidar: 3.6.0 compression: 1.7.4 express: 4.18.2 @@ -14793,7 +14754,7 @@ packages: - supports-color - typescript - /@remix-run/server-runtime@2.1.0(typescript@5.2.2): + /@remix-run/server-runtime@2.1.0(typescript@5.5.4): resolution: {integrity: sha512-Uz69yF4Gu6F3VYQub3JgDo9godN8eDMeZclkadBTAWN7bYLonu0ChR/GlFxS35OLeF7BDgudxOSZob0nE1WHNg==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14808,9 +14769,9 @@ packages: cookie: 0.4.2 set-cookie-parser: 2.6.0 source-map: 0.7.4 - typescript: 5.2.2 + typescript: 5.5.4 - /@remix-run/testing@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2): + /@remix-run/testing@2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4): resolution: {integrity: sha512-eLPx4Bmjt243kyRpQTong1eFo6nkvSfCr65bb5PfoF172DKnsSSCYWAmBmB72VwtAPESHxBm3g6AUbhwphkU6A==} engines: {node: '>=18.0.0'} peerDependencies: @@ -14820,12 +14781,12 @@ packages: typescript: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': 1.10.0 react: 18.2.0 react-router-dom: 6.17.0(react-dom@18.2.0)(react@18.2.0) - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - react-dom dev: true @@ -14836,8 +14797,8 @@ packages: '@remix-run/react': ^1.15.0 || ^2.0.0 '@remix-run/server-runtime': ^1.15.0 || ^2.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) dev: false /@remix-run/web-blob@3.1.0: @@ -15950,6 +15911,7 @@ packages: /@socket.io/component-emitter@3.1.0: resolution: {integrity: sha512-+9jVqKhRSpsc591z5vX+X5Yyw+he/HCB4iQ/RYxw35CEPaY1gnsNE43nf9n9AaYjAQrTiI/mOwKUKdUs9vf7Xg==} + dev: false /@socket.io/redis-adapter@8.3.0(socket.io-adapter@2.5.4): resolution: {integrity: sha512-ly0cra+48hDmChxmIpnESKrc94LjRL80TEmZVscuQ/WWkRP81nNj8W8cCGMqbI4L6NCuAaPRSzZF1a9GlAxxnA==} @@ -16406,7 +16368,7 @@ packages: dependencies: '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.4.1) '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.4.1) - '@opentelemetry/semantic-conventions': 1.27.0 + '@opentelemetry/semantic-conventions': 1.28.0 '@traceloop/ai-semantic-conventions': 0.10.0 js-tiktoken: 1.0.14 tslib: 2.6.2 @@ -16807,10 +16769,6 @@ packages: /@types/node@18.11.18: resolution: {integrity: sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==} - /@types/node@18.17.1: - resolution: {integrity: sha512-xlR1jahfizdplZYRU59JlUx9uzF1ARa8jbhM11ccpCJya8kvos5jwdm2ZAgxSCwOl0fq21svP18EVwPBXMQudw==} - dev: true - /@types/node@18.19.20: resolution: {integrity: sha512-SKXZvI375jkpvAj8o+5U2518XQv76mAsixqfXiVyWyXZbVWQK25RurFovYpVIxVzul0rZoH58V/3SkEnm7s3qA==} dependencies: @@ -17126,7 +17084,7 @@ packages: - '@types/json-schema' dev: false - /@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/eslint-plugin@5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-sXtOgJNEuRU5RLwPUb1jxtToZbgvq3M6FPpY4QENxoOggK+UpTxUBpj6tD8+Qh2g46Pi9We87E+eHnUw8YcGsw==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -17138,23 +17096,23 @@ packages: optional: true dependencies: '@eslint-community/regexpp': 4.5.1 - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) '@typescript-eslint/scope-manager': 5.59.6 - '@typescript-eslint/type-utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/type-utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 4.3.4 eslint: 8.31.0 grapheme-splitter: 1.0.4 ignore: 5.2.4 natural-compare-lite: 1.4.0 semver: 7.6.3 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/parser@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-7pCa6al03Pv1yf/dUg/s1pXz/yGMUBAw5EeWqNTFiSueKvRNonze3hma3lhdsOrQcaOXhbk5gKu2Fludiho9VA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -17166,10 +17124,10 @@ packages: dependencies: '@typescript-eslint/scope-manager': 5.59.6 '@typescript-eslint/types': 5.59.6 - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) debug: 4.3.4 eslint: 8.31.0 - typescript: 5.2.2 + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true @@ -17182,7 +17140,7 @@ packages: '@typescript-eslint/visitor-keys': 5.59.6 dev: true - /@typescript-eslint/type-utils@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/type-utils@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-A4tms2Mp5yNvLDlySF+kAThV9VTBPCvGf0Rp8nl/eoDX9Okun8byTKoj3fJ52IJitjWOk0fKPNQhXEB++eNozQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -17192,12 +17150,12 @@ packages: typescript: optional: true dependencies: - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 4.3.7 eslint: 8.31.0 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true @@ -17207,7 +17165,7 @@ packages: engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true - /@typescript-eslint/typescript-estree@5.59.6(typescript@5.2.2): + /@typescript-eslint/typescript-estree@5.59.6(typescript@5.5.4): resolution: {integrity: sha512-vW6JP3lMAs/Tq4KjdI/RiHaaJSO7IUsbkz17it/Rl9Q+WkQ77EOuOnlbaU8kKfVIOJxMhnRiBG+olE7f3M16DA==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -17222,13 +17180,13 @@ packages: globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.3 - tsutils: 3.21.0(typescript@5.2.2) - typescript: 5.2.2 + tsutils: 3.21.0(typescript@5.5.4) + typescript: 5.5.4 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/utils@5.59.6(eslint@8.31.0)(typescript@5.2.2): + /@typescript-eslint/utils@5.59.6(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-vzaaD6EXbTS29cVH0JjXBdzMt6VBlv+hE31XktDRMX1j3462wZCJa7VzO2AxXEXcIl8GQqZPcOPuW/Z1tZVogg==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -17239,7 +17197,7 @@ packages: '@types/semver': 7.5.1 '@typescript-eslint/scope-manager': 5.59.6 '@typescript-eslint/types': 5.59.6 - '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.2.2) + '@typescript-eslint/typescript-estree': 5.59.6(typescript@5.5.4) eslint: 8.31.0 eslint-scope: 5.1.1 semver: 7.6.3 @@ -19105,7 +19063,7 @@ packages: assertion-error: 1.1.0 check-error: 1.0.3 deep-eql: 4.1.3 - get-func-name: 2.0.0 + get-func-name: 2.0.2 loupe: 2.3.7 pathval: 1.1.1 type-detect: 4.0.8 @@ -19292,7 +19250,7 @@ packages: /cjs-module-lexer@1.2.3: resolution: {integrity: sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==} - /class-variance-authority@0.5.2(typescript@5.2.2): + /class-variance-authority@0.5.2(typescript@5.5.4): resolution: {integrity: sha512-j7Qqw3NPbs4IpO80gvdACWmVvHiLLo5MECacUBLnJG17CrLpWaQ7/4OaWX6P0IO1j2nvZ7AuSfBS/ImtEUZJGA==} peerDependencies: typescript: '>= 4.5.5 < 6' @@ -19300,7 +19258,7 @@ packages: typescript: optional: true dependencies: - typescript: 5.2.2 + typescript: 5.5.4 dev: false /class-variance-authority@0.7.0: @@ -19703,22 +19661,6 @@ packages: yaml: 1.10.2 dev: true - /cosmiconfig@8.3.6(typescript@5.2.2): - resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - path-type: 4.0.0 - typescript: 5.2.2 - dev: false - /cosmiconfig@8.3.6(typescript@5.5.4): resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} engines: {node: '>=14'} @@ -19735,22 +19677,6 @@ packages: typescript: 5.5.4 dev: false - /cosmiconfig@9.0.0(typescript@5.2.2): - resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} - engines: {node: '>=14'} - peerDependencies: - typescript: '>=4.9.5' - peerDependenciesMeta: - typescript: - optional: true - dependencies: - env-paths: 2.2.1 - import-fresh: 3.3.0 - js-yaml: 4.1.0 - parse-json: 5.2.0 - typescript: 5.2.2 - dev: true - /cosmiconfig@9.0.0(typescript@5.5.4): resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} engines: {node: '>=14'} @@ -19765,7 +19691,6 @@ packages: js-yaml: 4.1.0 parse-json: 5.2.0 typescript: 5.5.4 - dev: false /cp-file@10.0.0: resolution: {integrity: sha512-vy2Vi1r2epK5WqxOLnskeKeZkdZvTKfFZQCplE3XWsP+SUJyd5XAUFC9lFgTjjXJF2GMne/UML14iEmkAaDfFg==} @@ -21420,7 +21345,7 @@ packages: eslint-import-resolver-webpack: optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 3.2.7 eslint: 8.31.0 eslint-import-resolver-node: 0.3.7 @@ -21450,7 +21375,7 @@ packages: eslint-import-resolver-webpack: optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) debug: 3.2.7 eslint: 8.31.0 eslint-import-resolver-node: 0.3.9 @@ -21480,7 +21405,7 @@ packages: '@typescript-eslint/parser': optional: true dependencies: - '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/parser': 5.59.6(eslint@8.31.0)(typescript@5.5.4) array-includes: 3.1.8 array.prototype.findlastindex: 1.2.5 array.prototype.flat: 1.3.2 @@ -21517,7 +21442,7 @@ packages: requireindex: 1.2.0 dev: true - /eslint-plugin-jest@26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.2.2): + /eslint-plugin-jest@26.9.0(@typescript-eslint/eslint-plugin@5.59.6)(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-TWJxWGp1J628gxh2KhaH1H1paEdgE2J61BBF1I59c6xWeL5+D1BzMxGDN/nXAfX+aSkR5u80K+XhskK6Gwq9ng==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} peerDependencies: @@ -21530,8 +21455,8 @@ packages: jest: optional: true dependencies: - '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.2.2) - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/eslint-plugin': 5.59.6(@typescript-eslint/parser@5.59.6)(eslint@8.31.0)(typescript@5.5.4) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 transitivePeerDependencies: - supports-color @@ -21611,13 +21536,13 @@ packages: string.prototype.matchall: 4.0.8 dev: true - /eslint-plugin-testing-library@5.11.0(eslint@8.31.0)(typescript@5.2.2): + /eslint-plugin-testing-library@5.11.0(eslint@8.31.0)(typescript@5.5.4): resolution: {integrity: sha512-ELY7Gefo+61OfXKlQeXNIDVVLPcvKTeiQOoMZG9TeuWa7Ln4dUNRv8JdRWBQI9Mbb427XGlVB1aa1QPZxBJM8Q==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0, npm: '>=6'} peerDependencies: eslint: ^7.5.0 || ^8.0.0 dependencies: - '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.2.2) + '@typescript-eslint/utils': 5.59.6(eslint@8.31.0)(typescript@5.5.4) eslint: 8.31.0 transitivePeerDependencies: - supports-color @@ -22585,10 +22510,6 @@ packages: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} - /get-func-name@2.0.0: - resolution: {integrity: sha512-Hm0ixYtaSZ/V7C8FJrtZIuBBI+iSgL+1Aq82zSu8VQNB4S3Gk8e7Qs3VwBDJAhmRZcFqkl3tQu36g/Foh5I5ig==} - dev: true - /get-func-name@2.0.2: resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} dev: true @@ -22909,27 +22830,6 @@ packages: - supports-color dev: false - /graphile-worker@0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.2.2): - resolution: {integrity: sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==} - engines: {node: '>=14.0.0'} - hasBin: true - dependencies: - '@graphile/logger': 0.2.0 - '@types/debug': 4.1.12 - '@types/pg': 8.11.6 - cosmiconfig: 8.3.6(typescript@5.2.2) - graphile-config: 0.0.1-beta.8 - json5: 2.2.3 - pg: 8.11.5 - tslib: 2.6.2 - yargs: 17.7.2 - transitivePeerDependencies: - - pg-native - - supports-color - - typescript - dev: false - patched: true - /graphile-worker@0.16.6(patch_hash=hdpetta7btqcc7xb5wfkcnanoa)(typescript@5.5.4): resolution: {integrity: sha512-e7gGYDmGqzju2l83MpzX8vNG/lOtVJiSzI3eZpAFubSxh/cxs7sRrRGBGjzBP1kNG0H+c95etPpNRNlH65PYhw==} engines: {node: '>=14.0.0'} @@ -25597,6 +25497,10 @@ packages: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} dev: true + /node-abort-controller@3.1.1: + resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==} + dev: false + /node-domexception@1.0.0: resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} engines: {node: '>=10.5.0'} @@ -26836,7 +26740,7 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.29 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.5.4) yaml: 2.3.1 dev: true @@ -26854,10 +26758,10 @@ packages: dependencies: lilconfig: 2.1.0 postcss: 8.4.44 - ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2) + ts-node: 10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.5.4) yaml: 2.3.1 - /postcss-loader@8.1.1(postcss@8.4.44)(typescript@5.2.2)(webpack@5.88.2): + /postcss-loader@8.1.1(postcss@8.4.44)(typescript@5.5.4)(webpack@5.88.2): resolution: {integrity: sha512-0IeqyAsG6tYiDRCYKQJLAmgQr47DX6N7sFSWvQxt6AcupX8DIdmykuk/o/tx0Lze3ErGHJEp5OSRxrelC6+NdQ==} engines: {node: '>= 18.12.0'} peerDependencies: @@ -26870,7 +26774,7 @@ packages: webpack: optional: true dependencies: - cosmiconfig: 9.0.0(typescript@5.2.2) + cosmiconfig: 9.0.0(typescript@5.5.4) jiti: 1.21.0 postcss: 8.4.44 semver: 7.6.3 @@ -28317,6 +28221,14 @@ packages: redis-errors: 1.2.0 dev: false + /redlock@5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu): + resolution: {integrity: sha512-2RDWXg5jgRptDrB1w9O/JgSZC0j7y4SlaXnor93H/UJm/QyDiFgBKNtrh0TI6oCXqYSaSoXxFh6Sd3VtYfhRXw==} + engines: {node: '>=12'} + dependencies: + node-abort-controller: 3.1.1 + dev: false + patched: true + /reduce-css-calc@2.1.8: resolution: {integrity: sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==} dependencies: @@ -28489,7 +28401,7 @@ packages: '@remix-run/server-runtime': ^1.1.1 remix-auth: ^3.2.1 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) crypto-js: 4.1.1 remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) dev: false @@ -28500,7 +28412,7 @@ packages: '@remix-run/server-runtime': ^1.0.0 remix-auth: ^3.4.0 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) remix-auth-oauth2: 1.11.0(@remix-run/server-runtime@2.1.0)(remix-auth@3.6.0) transitivePeerDependencies: @@ -28513,7 +28425,7 @@ packages: '@remix-run/server-runtime': ^1.0.0 || ^2.0.0 remix-auth: ^3.6.0 dependencies: - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) debug: 4.3.7 remix-auth: 3.6.0(@remix-run/react@2.1.0)(@remix-run/server-runtime@2.1.0) transitivePeerDependencies: @@ -28526,8 +28438,8 @@ packages: '@remix-run/react': ^1.0.0 || ^2.0.0 '@remix-run/server-runtime': ^1.0.0 || ^2.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) uuid: 8.3.2 dev: false @@ -28538,8 +28450,8 @@ packages: '@remix-run/server-runtime': ^1.16.0 || ^2.0 react: ^17.0.2 || ^18.0.0 dependencies: - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) - '@remix-run/server-runtime': 2.1.0(typescript@5.2.2) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) + '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) react: 18.2.0 dev: false @@ -28579,8 +28491,8 @@ packages: zod: optional: true dependencies: - '@remix-run/node': 2.1.0(typescript@5.2.2) - '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.2.2) + '@remix-run/node': 2.1.0(typescript@5.5.4) + '@remix-run/react': 2.1.0(react-dom@18.2.0)(react@18.2.0)(typescript@5.5.4) '@remix-run/router': 1.15.3 intl-parse-accept-language: 1.0.0 react: 18.2.0 @@ -29236,6 +29148,7 @@ packages: - bufferutil - supports-color - utf-8-validate + dev: false /socket.io-client@4.7.3: resolution: {integrity: sha512-nU+ywttCyBitXIl9Xe0RSEfek4LneYkJxCeNnKCuhwoH4jGXO1ipIUw/VA/+Vvv2G1MTym11fzFC0SxkrcfXDw==} @@ -29256,7 +29169,7 @@ packages: engines: {node: '>=10.0.0'} dependencies: '@socket.io/component-emitter': 3.1.0 - debug: 4.3.4 + debug: 4.3.7 engine.io-client: 6.5.3 socket.io-parser: 4.2.4 transitivePeerDependencies: @@ -29273,6 +29186,7 @@ packages: debug: 4.3.7 transitivePeerDependencies: - supports-color + dev: false /socket.io@4.7.3: resolution: {integrity: sha512-SE+UIQXBQE+GPG2oszWMlsEmWtHVqw/h1VrYJGK5/MC7CH5p58N448HwIrtREcvR4jfdOJAY4ieQfxMr55qbbw==} @@ -29306,6 +29220,7 @@ packages: - bufferutil - supports-color - utf-8-validate + dev: false /socket.io@4.7.5: resolution: {integrity: sha512-DmeAkF6cwM9jSfmp6Dr/5/mfMwb5Z5qRrSXLpo3Fq5SqyU8CMF15jIN4ZhfSwu35ksM1qmHZDQ/DK5XTccSTvA==} @@ -30418,6 +30333,10 @@ packages: resolution: {integrity: sha512-WiCJLEECkO18gwqIp6+hJg0//p23HXp4S+gGtAKu3mI2F2/sXC4FvHvXvB0zJVVaTPhx1/tOwdbRsa1sOBIKqQ==} dev: false + /tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + dev: false + /tinyglobby@0.2.2: resolution: {integrity: sha512-mZ2sDMaySvi1PkTp4lTo1In2zjU+cY8OvZsfwrDrx3YGRbXPX1/cbPwCR9zkm3O/Fz9Jo0F1HNgIQ1b8BepqyQ==} engines: {node: '>=12.0.0'} @@ -30579,7 +30498,7 @@ packages: /ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} - /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.2.2): + /ts-node@10.9.1(@swc/core@1.3.26)(@types/node@18.11.18)(typescript@5.5.4): resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} hasBin: true peerDependencies: @@ -30606,7 +30525,7 @@ packages: create-require: 1.1.1 diff: 4.0.2 make-error: 1.3.6 - typescript: 5.2.2 + typescript: 5.5.4 v8-compile-cache-lib: 3.0.1 yn: 3.1.1 @@ -30671,19 +30590,6 @@ packages: resolution: {integrity: sha512-3IDBalvf6SyvHFS14UiwCWzqdSdo+Q0k2J7DZyJYaHW/iraW9DJpaBKDJpry3yQs3o/t/A+oGaRW3iVt2lKxzA==} dev: false - /tsconfck@2.1.2(typescript@5.2.2): - resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} - engines: {node: ^14.13.1 || ^16 || >=18} - hasBin: true - peerDependencies: - typescript: ^4.3.5 || ^5.0.0 - peerDependenciesMeta: - typescript: - optional: true - dependencies: - typescript: 5.2.2 - dev: true - /tsconfck@2.1.2(typescript@5.5.4): resolution: {integrity: sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==} engines: {node: ^14.13.1 || ^16 || >=18} @@ -30768,14 +30674,14 @@ packages: /tslib@2.6.2: resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} - /tsutils@3.21.0(typescript@5.2.2): + /tsutils@3.21.0(typescript@5.5.4): resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} engines: {node: '>= 6'} peerDependencies: typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' dependencies: tslib: 1.14.1 - typescript: 5.2.2 + typescript: 5.5.4 dev: true /tsx@3.12.2: @@ -31104,22 +31010,12 @@ packages: - supports-color dev: false - /typescript@4.9.5: - resolution: {integrity: sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==} - engines: {node: '>=4.2.0'} - hasBin: true - /typescript@5.1.6: resolution: {integrity: sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==} engines: {node: '>=14.17'} hasBin: true dev: false - /typescript@5.2.2: - resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} - engines: {node: '>=14.17'} - hasBin: true - /typescript@5.3.3: resolution: {integrity: sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==} engines: {node: '>=14.17'} @@ -31764,17 +31660,6 @@ packages: - terser dev: true - /vite-tsconfig-paths@4.0.5(typescript@5.2.2): - resolution: {integrity: sha512-/L/eHwySFYjwxoYt1WRJniuK/jPv+WGwgRGBYx3leciR5wBeqntQpUE6Js6+TJemChc+ter7fDBKieyEWDx4yQ==} - dependencies: - debug: 4.3.7 - globrex: 0.1.2 - tsconfck: 2.1.2(typescript@5.2.2) - transitivePeerDependencies: - - supports-color - - typescript - dev: true - /vite-tsconfig-paths@4.0.5(typescript@5.5.4): resolution: {integrity: sha512-/L/eHwySFYjwxoYt1WRJniuK/jPv+WGwgRGBYx3leciR5wBeqntQpUE6Js6+TJemChc+ter7fDBKieyEWDx4yQ==} dependencies: From 68813bbb9bda88d2dbf05ee49f5db1f8d915055b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 17 Jan 2025 18:16:03 +0000 Subject: [PATCH 343/485] Trigger with a machine (old run engine) --- apps/webapp/app/v3/services/triggerTaskV1.server.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts index 51c4e4cffc..21b56f0825 100644 --- a/apps/webapp/app/v3/services/triggerTaskV1.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -399,6 +399,7 @@ export class TriggerTaskServiceV1 extends BaseService { : undefined, runTags: bodyTags, oneTimeUseToken: options.oneTimeUseToken, + machinePreset: body.options?.machine, }, }); From a37b528e49f0e44112c041220dcfee94e9641792 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 17 Jan 2025 18:27:07 +0000 Subject: [PATCH 344/485] RE2, allow setting machine when triggering --- apps/webapp/app/v3/services/triggerTaskV2.server.ts | 1 + internal-packages/run-engine/src/engine/index.ts | 10 +++++++--- .../run-engine/src/engine/machinePresets.ts | 13 ++++++++++++- internal-packages/run-engine/src/engine/types.ts | 1 + 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index b0633640fa..5a662a4d0c 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -366,6 +366,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { maxDurationInSeconds: body.options?.maxDuration ? clampMaxDuration(body.options.maxDuration) : undefined, + machine: body.options?.machine, }, this._prisma ); diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 0464bd7577..d84b264e14 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -56,7 +56,7 @@ import { runStatusFromError } from "./errors"; import { EventBusEvents } from "./eventBus"; import { executionResultFromSnapshot, getLatestExecutionSnapshot } from "./executionSnapshots"; import { RunLocker } from "./locking"; -import { machinePresetFromConfig } from "./machinePresets"; +import { getMachinePreset } from "./machinePresets"; import { isCheckpointable, isDequeueableExecutionStatus, @@ -247,6 +247,7 @@ export class RunEngine { seedMetadataType, oneTimeUseToken, maxDurationInSeconds, + machine, }: TriggerParams, tx?: PrismaClientOrTransaction ): Promise { @@ -322,6 +323,7 @@ export class RunEngine { seedMetadata, seedMetadataType, maxDurationInSeconds, + machinePreset: machine, executionSnapshots: { create: { engine: "V2", @@ -670,10 +672,11 @@ export class RunEngine { } } - const machinePreset = machinePresetFromConfig({ + const machinePreset = getMachinePreset({ machines: this.options.machines.machines, defaultMachine: this.options.machines.defaultMachine, config: result.task.machineConfig ?? {}, + run: result.run, }); //increment the consumed resources @@ -1124,10 +1127,11 @@ export class RunEngine { const { run, snapshot } = result; - const machinePreset = machinePresetFromConfig({ + const machinePreset = getMachinePreset({ machines: this.options.machines.machines, defaultMachine: this.options.machines.defaultMachine, config: taskRun.lockedBy.machineConfig ?? {}, + run: taskRun, }); const metadata = await parsePacket({ diff --git a/internal-packages/run-engine/src/engine/machinePresets.ts b/internal-packages/run-engine/src/engine/machinePresets.ts index 7e794fdcf1..4c526942a7 100644 --- a/internal-packages/run-engine/src/engine/machinePresets.ts +++ b/internal-packages/run-engine/src/engine/machinePresets.ts @@ -3,15 +3,26 @@ import { Logger } from "@trigger.dev/core/logger"; const logger = new Logger("machinePresetFromConfig"); -export function machinePresetFromConfig({ +export function getMachinePreset({ defaultMachine, machines, config, + run, }: { defaultMachine: MachinePresetName; machines: Record; config: unknown; + run: { machinePreset: string | null }; }): MachinePreset { + if (run.machinePreset) { + const preset = MachinePresetName.safeParse(run.machinePreset); + if (preset.error) { + logger.error("Failed to parse machine preset", { machinePreset: run.machinePreset }); + return machinePresetFromName(machines, defaultMachine); + } + return machinePresetFromName(machines, preset.data); + } + const parsedConfig = MachineConfig.safeParse(config); if (!parsedConfig.success) { diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index 9ee63b7744..0014454324 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -77,4 +77,5 @@ export type TriggerParams = { seedMetadataType?: string; oneTimeUseToken?: string; maxDurationInSeconds?: number; + machine?: MachinePresetName; }; From 46e069811162b52f8988af8719f3c1eef624260e Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 17 Jan 2025 19:03:56 +0000 Subject: [PATCH 345/485] Fix for new glob patterns --- packages/cli-v3/src/build/bundle.ts | 36 ------------------------ packages/cli-v3/src/build/entryPoints.ts | 28 ++++++++++++++---- references/hello-world/trigger.config.ts | 1 + 3 files changed, 24 insertions(+), 41 deletions(-) diff --git a/packages/cli-v3/src/build/bundle.ts b/packages/cli-v3/src/build/bundle.ts index 2cf3e95926..a4e1c10cf4 100644 --- a/packages/cli-v3/src/build/bundle.ts +++ b/packages/cli-v3/src/build/bundle.ts @@ -265,42 +265,6 @@ export async function getBundleResultFromBuild( }; } -async function getEntryPoints(target: BuildTarget, config: ResolvedConfig) { - const projectEntryPoints = config.dirs.flatMap((dir) => dirToEntryPointGlob(dir)); - - if (config.configFile) { - projectEntryPoints.push(config.configFile); - } - - switch (target) { - case "dev": { - projectEntryPoints.push(...devEntryPoints); - break; - } - case "deploy": { - projectEntryPoints.push(...deployEntryPoints); - break; - } - case "managed": { - projectEntryPoints.push(...managedEntryPoints); - break; - } - case "unmanaged": { - projectEntryPoints.push(...unmanagedEntryPoints); - break; - } - default: { - assertExhaustive(target); - } - } - - if (config.instrumentedPackageNames?.length ?? 0 > 0) { - projectEntryPoints.push(telemetryEntryPoint); - } - - return projectEntryPoints; -} - // Converts a directory to a glob that matches all the entry points in that function dirToEntryPointGlob(dir: string): string[] { return [ diff --git a/packages/cli-v3/src/build/entryPoints.ts b/packages/cli-v3/src/build/entryPoints.ts index 95ca763764..29c88ef2dc 100644 --- a/packages/cli-v3/src/build/entryPoints.ts +++ b/packages/cli-v3/src/build/entryPoints.ts @@ -3,7 +3,13 @@ import { ResolvedConfig } from "@trigger.dev/core/v3/build"; import * as chokidar from "chokidar"; import { glob } from "tinyglobby"; import { logger } from "../utilities/logger.js"; -import { deployEntryPoints, devEntryPoints, telemetryEntryPoint } from "./packageModules.js"; +import { + deployEntryPoints, + devEntryPoints, + managedEntryPoints, + telemetryEntryPoint, + unmanagedEntryPoints, +} from "./packageModules.js"; type EntryPointManager = { entryPoints: string[]; @@ -52,10 +58,22 @@ export async function createEntryPointManager( entryPoints.push(config.configFile); } - if (target === "dev") { - entryPoints.push(...devEntryPoints); - } else { - entryPoints.push(...deployEntryPoints); + switch (target) { + case "dev": { + entryPoints.push(...devEntryPoints); + break; + } + case "managed": { + entryPoints.push(...managedEntryPoints); + break; + } + case "unmanaged": { + entryPoints.push(...unmanagedEntryPoints); + break; + } + default: { + entryPoints.push(...deployEntryPoints); + } } if (config.instrumentedPackageNames?.length ?? 0 > 0) { diff --git a/references/hello-world/trigger.config.ts b/references/hello-world/trigger.config.ts index ca95e06685..9d31dd312a 100644 --- a/references/hello-world/trigger.config.ts +++ b/references/hello-world/trigger.config.ts @@ -1,6 +1,7 @@ import { defineConfig } from "@trigger.dev/sdk/v3"; export default defineConfig({ + compatibilityFlags: ["run_engine_v2"], project: "proj_rrkpdguyagvsoktglnod", logLevel: "log", maxDuration: 60, From 41cfaa47561f7d93263d2bafbb044ce944f36353 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:30:23 +0000 Subject: [PATCH 346/485] add max run count to dequeue from version route --- ...ns.deployments.$deploymentFriendlyId.dequeue.ts | 14 ++++++++++++-- .../core/src/v3/runEngineWorker/supervisor/http.ts | 4 ++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts index d0b5d773cf..abb300598e 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -10,8 +10,15 @@ export const loader = createLoaderWorkerApiRoute( params: z.object({ deploymentFriendlyId: z.string(), }), + searchParams: z.object({ + maxRunCount: z.number().optional(), + }), }, - async ({ authenticatedWorker, params }): Promise> => { + async ({ + authenticatedWorker, + params, + searchParams, + }): Promise> => { const deployment = await $replica.workerDeployment.findUnique({ where: { friendlyId: params.deploymentFriendlyId, @@ -34,7 +41,10 @@ export const loader = createLoaderWorkerApiRoute( deployment.worker.id, deployment.environmentId ) - : await authenticatedWorker.dequeueFromVersion(deployment.worker.id); + : await authenticatedWorker.dequeueFromVersion( + deployment.worker.id, + searchParams.maxRunCount + ); return json(dequeuedMessages); } diff --git a/packages/core/src/v3/runEngineWorker/supervisor/http.ts b/packages/core/src/v3/runEngineWorker/supervisor/http.ts index 6bc1910ac4..4a83cedf8a 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/http.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/http.ts @@ -77,10 +77,10 @@ export class SupervisorHttpClient { ); } - async dequeueFromVersion(deploymentId: string) { + async dequeueFromVersion(deploymentId: string, maxRunCount = 1) { return wrapZodFetch( WorkerApiDequeueResponseBody, - `${this.apiUrl}/api/v1/worker-actions/deployments/${deploymentId}/dequeue`, + `${this.apiUrl}/api/v1/worker-actions/deployments/${deploymentId}/dequeue?maxRunCount=${maxRunCount}`, { headers: { ...this.defaultHeaders, From 6ff189336b34afadb99351317af33bef63845ba7 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:30:45 +0000 Subject: [PATCH 347/485] add worker instance name env var and header --- packages/cli-v3/src/entryPoints/managed-run-controller.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index daf68a34ee..6ffa504390 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -42,6 +42,7 @@ const Env = z.object({ TRIGGER_WARM_START_URL: z.string().optional(), TRIGGER_MACHINE_CPU: z.string().default("0"), TRIGGER_MACHINE_MEMORY: z.string().default("0"), + TRIGGER_WORKER_INSTANCE_NAME: z.string(), }); const env = Env.parse(stdEnv); @@ -363,6 +364,7 @@ class ManagedRunController { "x-trigger-deployment-version": env.TRIGGER_DEPLOYMENT_VERSION, "x-trigger-machine-cpu": env.TRIGGER_MACHINE_CPU, "x-trigger-machine-memory": env.TRIGGER_MACHINE_MEMORY, + "x-trigger-worker-instance-name": env.TRIGGER_WORKER_INSTANCE_NAME, }, }, { From 35ab2bd8f77f14db286b09a04fa55b1144ad6e07 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:31:17 +0000 Subject: [PATCH 348/485] queue consumer pre skip callback --- .../v3/runEngineWorker/supervisor/queueConsumer.ts | 12 +++++++++++- .../src/v3/runEngineWorker/supervisor/session.ts | 4 +++- .../core/src/v3/runEngineWorker/supervisor/types.ts | 2 ++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts b/packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts index 21f13e725f..059e999be1 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/queueConsumer.ts @@ -1,17 +1,19 @@ import { SupervisorHttpClient } from "./http.js"; import { WorkerApiDequeueResponseBody } from "./schemas.js"; -import { PreDequeueFn } from "./types.js"; +import { PreDequeueFn, PreSkipFn } from "./types.js"; type RunQueueConsumerOptions = { client: SupervisorHttpClient; intervalMs?: number; preDequeue?: PreDequeueFn; + preSkip?: PreSkipFn; onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; }; export class RunQueueConsumer { private readonly client: SupervisorHttpClient; private readonly preDequeue?: PreDequeueFn; + private readonly preSkip?: PreSkipFn; private readonly onDequeue: (messages: WorkerApiDequeueResponseBody) => Promise; private intervalMs: number; @@ -64,6 +66,14 @@ export class RunQueueConsumer { preDequeueResult?.maxResources?.cpu === 0 || preDequeueResult?.maxResources?.memory === 0 ) { + if (this.preSkip) { + try { + await this.preSkip(); + } catch (preSkipError) { + console.error("[RunQueueConsumer] preSkip error", { error: preSkipError }); + } + } + return this.scheduleNextDequeue(); } diff --git a/packages/core/src/v3/runEngineWorker/supervisor/session.ts b/packages/core/src/v3/runEngineWorker/supervisor/session.ts index 02bf712b5f..eb0b297235 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/session.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/session.ts @@ -1,5 +1,5 @@ import { SupervisorHttpClient } from "./http.js"; -import { PreDequeueFn, SupervisorClientCommonOptions } from "./types.js"; +import { PreDequeueFn, PreSkipFn, SupervisorClientCommonOptions } from "./types.js"; import { WorkerApiDequeueResponseBody, WorkerApiHeartbeatRequestBody } from "./schemas.js"; import { RunQueueConsumer } from "./queueConsumer.js"; import { WorkerEvents } from "./events.js"; @@ -14,6 +14,7 @@ type SupervisorSessionOptions = SupervisorClientCommonOptions & { heartbeatIntervalSeconds?: number; dequeueIntervalMs?: number; preDequeue?: PreDequeueFn; + preSkip?: PreSkipFn; }; export class SupervisorSession extends EventEmitter { @@ -32,6 +33,7 @@ export class SupervisorSession extends EventEmitter { this.queueConsumer = new RunQueueConsumer({ client: this.httpClient, preDequeue: opts.preDequeue, + preSkip: opts.preSkip, onDequeue: this.onDequeue.bind(this), intervalMs: opts.dequeueIntervalMs, }); diff --git a/packages/core/src/v3/runEngineWorker/supervisor/types.ts b/packages/core/src/v3/runEngineWorker/supervisor/types.ts index 80678b6f18..7e94f2ca32 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/types.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/types.ts @@ -12,3 +12,5 @@ export type PreDequeueFn = () => Promise<{ maxResources?: MachineResources; skipDequeue?: boolean; }>; + +export type PreSkipFn = () => Promise; From 05c0ee91e4bf9b4ae6fe35d8425f1c4b9d14241e Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 14 Jan 2025 16:05:07 +0000 Subject: [PATCH 349/485] poll for more runs after final execution errors --- .../src/entryPoints/managed-run-controller.ts | 54 +++++++++++++++---- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 6ffa504390..af321a5a68 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -5,9 +5,12 @@ import { z } from "zod"; import { randomUUID } from "crypto"; import { readJSONFile } from "../utilities/fileSystem.js"; import { + CompleteRunAttemptResult, DequeuedMessage, HeartbeatService, RunExecutionData, + TaskRunExecutionResult, + TaskRunFailedExecutionResult, WorkerManifest, } from "@trigger.dev/core/v3"; import { @@ -105,6 +108,7 @@ class ManagedRunController { private enterWarmStartPhase() { this.state = { phase: "WARM_START" }; + this.snapshotPollService.stop(); } private get runFriendlyId() { @@ -321,17 +325,17 @@ class ManagedRunController { updatedSnapshotId: this.snapshotFriendlyId, }); + const completion = { + id: execution.run.id, + ok: false, + retry: undefined, + error: TaskRunProcess.parseExecuteError(error), + } satisfies TaskRunFailedExecutionResult; + const completionResult = await this.httpClient.completeRunAttempt( this.runFriendlyId, this.snapshotFriendlyId, - { - completion: { - id: execution.run.id, - ok: false, - retry: undefined, - error: TaskRunProcess.parseExecuteError(error), - }, - } + { completion } ); if (!completionResult.success) { @@ -342,10 +346,19 @@ class ManagedRunController { } logger.log("Attempt completion submitted after error", completionResult.data.result); + + try { + await this.handleCompletionResult(completion, completionResult.data.result); + } catch (error) { + console.error("Failed to handle completion result after error", { error }); + process.exit(1); + } } } private async waitForNextRun() { + logger.debug("[ManagedRunController] Waiting for next run"); + this.enterWarmStartPhase(); try { @@ -448,6 +461,8 @@ class ManagedRunController { envVars, execution, }: WorkloadRunAttemptStartResponseBody) { + this.snapshotPollService.start(); + this.taskRunProcess = new TaskRunProcess({ workerManifest: this.workerManifest, env: envVars, @@ -510,7 +525,21 @@ class ManagedRunController { logger.log("Attempt completion submitted", completionResult.data.result); - const { attemptStatus, snapshot: completionSnapshot } = completionResult.data.result; + try { + await this.handleCompletionResult(completion, completionResult.data.result); + } catch (error) { + console.error("Failed to handle completion result", { error }); + process.exit(1); + } + } + + private async handleCompletionResult( + completion: TaskRunExecutionResult, + result: CompleteRunAttemptResult + ) { + logger.debug("[ManagedRunController] Handling completion result", { completion, result }); + + const { attemptStatus, snapshot: completionSnapshot } = result; this.updateSnapshot(completionSnapshot); @@ -603,7 +632,6 @@ class ManagedRunController { this.createSocket(); this.startAndExecuteRunAttempt(); - this.snapshotPollService.start(); } async stop() { @@ -614,6 +642,8 @@ class ManagedRunController { } this.heartbeatService.stop(); + this.snapshotPollService.stop(); + this.socket?.close(); } } @@ -661,6 +691,8 @@ const longPoll = async ( error: string; } > => { + logger.debug("Long polling", { url, requestInit, timeoutMs, totalDurationMs }); + const endTime = Date.now() + totalDurationMs; while (Date.now() < endTime) { @@ -690,7 +722,7 @@ const longPoll = async ( } } catch (error) { if (error instanceof Error && error.name === "AbortError") { - console.log("Request timed out, retrying..."); + console.log("Long poll request timed out, retrying..."); continue; } else { console.error("Error during fetch, retrying...", error); From 6b8bd66ecbdb2d2039f2a3f2ebb79e26de7051f9 Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Tue, 14 Jan 2025 18:22:07 +0000 Subject: [PATCH 350/485] fix dequeue search param schema --- ....worker-actions.deployments.$deploymentFriendlyId.dequeue.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts index abb300598e..fbfa194662 100644 --- a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts +++ b/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -11,7 +11,7 @@ export const loader = createLoaderWorkerApiRoute( deploymentFriendlyId: z.string(), }), searchParams: z.object({ - maxRunCount: z.number().optional(), + maxRunCount: z.coerce.number().optional(), }), }, async ({ From 8aa07e7a8f3753ee1239df85b6b0997f955d0f9b Mon Sep 17 00:00:00 2001 From: nicktrn <55853254+nicktrn@users.noreply.github.com> Date: Thu, 16 Jan 2025 08:08:39 +0000 Subject: [PATCH 351/485] add shortcut to debug switch --- .../app/components/primitives/Switch.tsx | 23 ++++++++++++++++++- .../route.tsx | 1 + 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/apps/webapp/app/components/primitives/Switch.tsx b/apps/webapp/app/components/primitives/Switch.tsx index 5cb860910b..e5d89f20dd 100644 --- a/apps/webapp/app/components/primitives/Switch.tsx +++ b/apps/webapp/app/components/primitives/Switch.tsx @@ -3,6 +3,7 @@ import * as React from "react"; import * as SwitchPrimitives from "@radix-ui/react-switch"; import { cn } from "~/utils/cn"; +import { ShortcutDefinition, useShortcutKeys } from "~/hooks/useShortcutKeys"; const variations = { large: { @@ -23,14 +24,34 @@ const variations = { type SwitchProps = React.ComponentPropsWithoutRef & { label?: React.ReactNode; variant: keyof typeof variations; + shortcut?: ShortcutDefinition; }; export const Switch = React.forwardRef, SwitchProps>( ({ className, variant, label, ...props }, ref) => { + const innerRef = React.useRef(null); + React.useImperativeHandle(ref, () => innerRef.current as HTMLButtonElement); + const { container, root, thumb, text } = variations[variant]; + if (props.shortcut) { + useShortcutKeys({ + shortcut: props.shortcut, + action: () => { + if (innerRef.current) { + innerRef.current.click(); + } + }, + disabled: props.disabled, + }); + } + return ( - + {label ? (
{runParam && closePanel && ( @@ -385,14 +385,7 @@ function SpanBody({ )} {span.events.length > 0 && } - {span.properties !== undefined && ( - - )} +
)}
@@ -1217,3 +1210,81 @@ function SpanLinkElement({ link }: { link: SpanLink }) { return null; } + +function SpanEntity({ span }: { span: Span }) { + switch (span.entityType) { + case "waitpoint": { + if (!span.waitpoint) { + return No waitpoint found: {span.entity.id}; + } + + return ( + <> + + + Waitpoint ID + + {span.waitpoint?.friendlyId} + + + + Waitpoint status + + + + + + Waitpoint idempotency key + + {span.waitpoint.userProvidedIdempotencyKey ? span.waitpoint.idempotencyKey : "–"} + + + + Waitpoint idempotency key expires at + + {span.waitpoint.idempotencyKeyExpiresAt ? ( + + ) : ( + "–" + )} + + + + {span.waitpoint.status === "PENDING" ? ( +
Manually complete waitpoint
+ ) : span.waitpoint.output ? ( + + ) : ( + "No output" + )} + + ); + } + default: { + if (span.properties !== undefined) + return ( + + ); + } + } + + return <>; +} diff --git a/apps/webapp/app/v3/eventRepository.server.ts b/apps/webapp/app/v3/eventRepository.server.ts index da469c9d8f..907b8975ab 100644 --- a/apps/webapp/app/v3/eventRepository.server.ts +++ b/apps/webapp/app/v3/eventRepository.server.ts @@ -624,6 +624,14 @@ export class EventRepository { SemanticInternalAttributes.ORIGINAL_RUN_ID ); + const entity = { + type: rehydrateAttribute( + spanEvent.properties, + SemanticInternalAttributes.ENTITY_TYPE + ), + id: rehydrateAttribute(spanEvent.properties, SemanticInternalAttributes.ENTITY_ID), + }; + return { ...spanEvent, ...span.data, @@ -634,6 +642,7 @@ export class EventRepository { show, links, originalRun, + entity, }; }); } diff --git a/packages/core/src/v3/semanticInternalAttributes.ts b/packages/core/src/v3/semanticInternalAttributes.ts index 1ece571489..9231e261e2 100644 --- a/packages/core/src/v3/semanticInternalAttributes.ts +++ b/packages/core/src/v3/semanticInternalAttributes.ts @@ -25,6 +25,8 @@ export const SemanticInternalAttributes = { MACHINE_PRESET_CENTS_PER_MS: "ctx.machine.centsPerMs", SPAN_PARTIAL: "$span.partial", SPAN_ID: "$span.span_id", + ENTITY_TYPE: "$entity.type", + ENTITY_ID: "$entity.id", OUTPUT: "$output", OUTPUT_TYPE: "$mime_type_output", STYLE: "$style", diff --git a/packages/trigger-sdk/src/v3/wait.ts b/packages/trigger-sdk/src/v3/wait.ts index 2b17b26e30..8cfa1d2839 100644 --- a/packages/trigger-sdk/src/v3/wait.ts +++ b/packages/trigger-sdk/src/v3/wait.ts @@ -206,6 +206,8 @@ export const wait = { { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "wait-token", + [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", + [SemanticInternalAttributes.ENTITY_ID]: tokenId, id: tokenId, ...accessoryAttributes({ items: [ From 26d1017e48efdae6f8fa0916f39acd86be30d932 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 11:57:06 +0000 Subject: [PATCH 453/485] WIP on complete waitpoint form --- .../route.tsx | 193 ++++++++++++++++++ .../route.tsx | 3 +- .../hello-world/src/trigger/wait-tokens.ts | 29 ++- 3 files changed, 214 insertions(+), 11 deletions(-) create mode 100644 apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx new file mode 100644 index 0000000000..03b557d1b8 --- /dev/null +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -0,0 +1,193 @@ +import { conform, useForm } from "@conform-to/react"; +import { parse } from "@conform-to/zod"; +import { CheckIcon, XMarkIcon } from "@heroicons/react/20/solid"; +import { Form, useActionData, useLocation, useNavigation, useSubmit } from "@remix-run/react"; +import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; +import { useVirtualizer } from "@tanstack/react-virtual"; +import { parseExpression } from "cron-parser"; +import cronstrue from "cronstrue"; +import { useCallback, useRef, useState } from "react"; +import { + environmentTextClassName, + environmentTitle, +} from "~/components/environments/EnvironmentLabel"; +import { Button, LinkButton } from "~/components/primitives/Buttons"; +import { CheckboxWithLabel } from "~/components/primitives/Checkbox"; +import { DateTime } from "~/components/primitives/DateTime"; +import { Fieldset } from "~/components/primitives/Fieldset"; +import { FormError } from "~/components/primitives/FormError"; +import { Header2, Header3 } from "~/components/primitives/Headers"; +import { Hint } from "~/components/primitives/Hint"; +import { Input } from "~/components/primitives/Input"; +import { InputGroup } from "~/components/primitives/InputGroup"; +import { Label } from "~/components/primitives/Label"; +import { Paragraph } from "~/components/primitives/Paragraph"; +import { Select, SelectItem } from "~/components/primitives/Select"; +import { + Table, + TableBody, + TableCell, + TableHeader, + TableHeaderCell, + TableRow, +} from "~/components/primitives/Table"; +import { TextLink } from "~/components/primitives/TextLink"; +import { prisma } from "~/db.server"; +import { useOrganization } from "~/hooks/useOrganizations"; +import { useProject } from "~/hooks/useProject"; +import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; +import { EditableScheduleElements } from "~/presenters/v3/EditSchedulePresenter.server"; +import { requireUserId } from "~/services/session.server"; +import { cn } from "~/utils/cn"; +import { ProjectParamSchema, docsPath, v3SchedulesPath } from "~/utils/pathBuilder"; +import { CronPattern, UpsertSchedule } from "~/v3/schedules"; +import { UpsertTaskScheduleService } from "~/v3/services/upsertTaskSchedule.server"; +import { AIGeneratedCronField } from "../resources.orgs.$organizationSlug.projects.$projectParam.schedules.new.natural-language"; +import { TimezoneList } from "~/components/scheduled/timezones"; +import { logger } from "~/services/logger.server"; +import { Waitpoint } from "@trigger.dev/database"; +import { z } from "zod"; +import { JSONEditor } from "~/components/code/JSONEditor"; + +const CompleteWaitpointFormData = z.discriminatedUnion("type", [ + z.object({ + type: z.literal("MANUAL"), + payload: z.string(), + }), + z.object({ + type: z.literal("DATETIME"), + }), +]); + +export const action = async ({ request, params }: ActionFunctionArgs) => { + const userId = await requireUserId(request); + const { organizationSlug, projectParam } = ProjectParamSchema.parse(params); + + const formData = await request.formData(); + const submission = parse(formData, { schema: UpsertSchedule }); + + if (!submission.value) { + return json(submission); + } + + try { + //first check that the user has access to the project + const project = await prisma.project.findUnique({ + where: { + slug: projectParam, + organization: { + members: { + some: { + userId, + }, + }, + }, + }, + select: { id: true }, + }); + + if (!project) { + throw new Error("Project not found"); + } + + const createSchedule = new UpsertTaskScheduleService(); + const result = await createSchedule.call(project.id, submission.value); + + return redirectWithSuccessMessage( + v3SchedulesPath({ slug: organizationSlug }, { slug: projectParam }), + request, + submission.value?.friendlyId === result.id ? "Schedule updated" : "Schedule created" + ); + } catch (error: any) { + logger.error("Failed to create schedule", error); + + const errorMessage = `Something went wrong. Please try again.`; + return redirectWithErrorMessage( + v3SchedulesPath({ slug: organizationSlug }, { slug: projectParam }), + request, + errorMessage + ); + } +}; + +type FormWaitpoint = Pick; + +export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint }) { + const navigation = useNavigation(); + const submit = useSubmit(); + const isLoading = navigation.state !== "idle"; + const organization = useOrganization(); + const project = useProject(); + const currentJson = useRef(""); + const formAction = `/resources/orgs/${organization.slug}/projects/${project.slug}/waitpoints/${waitpoint.friendlyId}/complete`; + + const submitForm = useCallback( + (e: React.FormEvent) => { + const formData = new FormData(e.currentTarget); + const data: Record = { + type: formData.get("type") as string, + failedRedirect: formData.get("failedRedirect") as string, + successRedirect: formData.get("failedRedirect") as string, + }; + + data.payload = currentJson.current; + + submit(data, { + action: formAction, + method: "post", + }); + e.preventDefault(); + }, + [currentJson] + ); + + return ( +
submitForm(e)} + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_3.25rem] overflow-hidden bg-background-bright" + > +
+ Complete waitpoint +
+
+
+ +
+ + + { + currentJson.current = v; + }} + showClearButton={false} + showCopyButton={false} + height="100%" + min-height="100%" + max-height="100%" + /> + +
+
+
+
+
+ +
+
+
+ ); +} diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 263e9c84a5..852ddc877d 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -66,6 +66,7 @@ import { v3TraceSpanPath, } from "~/utils/pathBuilder"; import { SpanLink } from "~/v3/eventRepository.server"; +import { CompleteWaitpointForm } from "../resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route"; export const loader = async ({ request, params }: LoaderFunctionArgs) => { const userId = await requireUserId(request); @@ -1260,7 +1261,7 @@ function SpanEntity({ span }: { span: Span }) { {span.waitpoint.status === "PENDING" ? ( -
Manually complete waitpoint
+ ) : span.waitpoint.output ? ( { + run: async ({ + completeBeforeWaiting = false, + idempotencyKey, + idempotencyKeyTTL, + completionDelay, + }: { + completeBeforeWaiting?: boolean; + idempotencyKey?: string; + idempotencyKeyTTL?: string; + completionDelay?: number; + }) => { logger.log("Hello, world", { completeBeforeWaiting }); const token = await wait.createToken({ idempotencyKey, idempotencyKeyTTL, - timeout: new Date(Date.now() + 10_000), + timeout: completionDelay ? undefined : new Date(Date.now() + 10_000), }); logger.log("Token", token); const token2 = await wait.createToken({ idempotencyKey, idempotencyKeyTTL, - timeout: "10s" }); + timeout: "10s", + }); logger.log("Token2", token2); if (completeBeforeWaiting) { await wait.completeToken(token.id, { status: "approved" }); - await wait.for({ seconds: 10 }); + await wait.for({ seconds: 5 }); } else { - await completeWaitToken.trigger({ token: token.id, delay: 4 }); + await completeWaitToken.trigger({ token: token.id, delay: completionDelay }); } - //wait for the token const result = await wait.forToken<{ foo: string }>(token); if (!result.ok) { @@ -37,14 +47,13 @@ export const waitToken = task({ } else { logger.log("Token completed", result); } - }, -}) +}); export const completeWaitToken = task({ id: "wait-token-complete", - run: async (payload: { token: string; delay: number }) => { - await wait.for({ seconds: payload.delay }); + run: async (payload: { token: string; delay?: number }) => { + await wait.for({ seconds: payload.delay ?? 10 }); await wait.completeToken(payload.token, { status: "approved" }); }, }); From 23c77d462b98331ce4e7d481130bdcf91c9b9ab7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 15:09:08 +0000 Subject: [PATCH 454/485] Span overview panel can be changed based on the entity type --- .../route.tsx | 48 +--- .../route.tsx | 242 +++++++----------- 2 files changed, 104 insertions(+), 186 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 03b557d1b8..45c651bd05 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -1,53 +1,25 @@ -import { conform, useForm } from "@conform-to/react"; import { parse } from "@conform-to/zod"; -import { CheckIcon, XMarkIcon } from "@heroicons/react/20/solid"; -import { Form, useActionData, useLocation, useNavigation, useSubmit } from "@remix-run/react"; +import { Form, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; -import { useVirtualizer } from "@tanstack/react-virtual"; -import { parseExpression } from "cron-parser"; -import cronstrue from "cronstrue"; -import { useCallback, useRef, useState } from "react"; -import { - environmentTextClassName, - environmentTitle, -} from "~/components/environments/EnvironmentLabel"; -import { Button, LinkButton } from "~/components/primitives/Buttons"; -import { CheckboxWithLabel } from "~/components/primitives/Checkbox"; -import { DateTime } from "~/components/primitives/DateTime"; +import { Waitpoint } from "@trigger.dev/database"; +import { useCallback, useRef } from "react"; +import { z } from "zod"; +import { JSONEditor } from "~/components/code/JSONEditor"; +import { Button } from "~/components/primitives/Buttons"; import { Fieldset } from "~/components/primitives/Fieldset"; -import { FormError } from "~/components/primitives/FormError"; -import { Header2, Header3 } from "~/components/primitives/Headers"; -import { Hint } from "~/components/primitives/Hint"; -import { Input } from "~/components/primitives/Input"; +import { Header2 } from "~/components/primitives/Headers"; import { InputGroup } from "~/components/primitives/InputGroup"; import { Label } from "~/components/primitives/Label"; -import { Paragraph } from "~/components/primitives/Paragraph"; -import { Select, SelectItem } from "~/components/primitives/Select"; -import { - Table, - TableBody, - TableCell, - TableHeader, - TableHeaderCell, - TableRow, -} from "~/components/primitives/Table"; -import { TextLink } from "~/components/primitives/TextLink"; import { prisma } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; -import { EditableScheduleElements } from "~/presenters/v3/EditSchedulePresenter.server"; +import { logger } from "~/services/logger.server"; import { requireUserId } from "~/services/session.server"; import { cn } from "~/utils/cn"; -import { ProjectParamSchema, docsPath, v3SchedulesPath } from "~/utils/pathBuilder"; -import { CronPattern, UpsertSchedule } from "~/v3/schedules"; +import { ProjectParamSchema, v3SchedulesPath } from "~/utils/pathBuilder"; +import { UpsertSchedule } from "~/v3/schedules"; import { UpsertTaskScheduleService } from "~/v3/services/upsertTaskSchedule.server"; -import { AIGeneratedCronField } from "../resources.orgs.$organizationSlug.projects.$projectParam.schedules.new.natural-language"; -import { TimezoneList } from "~/components/scheduled/timezones"; -import { logger } from "~/services/logger.server"; -import { Waitpoint } from "@trigger.dev/database"; -import { z } from "zod"; -import { JSONEditor } from "~/components/code/JSONEditor"; const CompleteWaitpointFormData = z.discriminatedUnion("type", [ z.object({ diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 852ddc877d..6b9571c925 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -288,104 +288,6 @@ function SpanBody({ ) : (
- {span.level === "TRACE" ? ( - <> -
- -
- - - ) : ( -
- } - state="complete" - /> -
- )} - - - Message - {span.message} - - {span.triggeredRuns.length > 0 && ( - -
- Triggered runs - - - - Run # - Task - Version - Created at - - - - {span.triggeredRuns.map((run) => { - const path = v3RunSpanPath( - organization, - project, - { friendlyId: run.friendlyId }, - { spanId: run.spanId } - ); - return ( - - - {run.number} - - - {run.taskIdentifier} - - - {run.lockedToVersion?.version ?? "–"} - - - - - - ); - })} - -
-
-
- )} -
- {span.events.length > 0 && }
)} @@ -1159,21 +1061,6 @@ function SpanTimeline({ startTime, duration, inProgress, isError }: TimelineProp ); } -function VerticalBar({ state }: { state: TimelineState }) { - return
; -} - -function DottedLine() { - return ( -
-
-
-
-
-
- ); -} - function classNameForState(state: TimelineState) { switch (state) { case "pending": { @@ -1188,30 +1075,6 @@ function classNameForState(state: TimelineState) { } } -function SpanLinkElement({ link }: { link: SpanLink }) { - const organization = useOrganization(); - const project = useProject(); - - switch (link.type) { - case "run": { - return ( - - {link.title} - - ); - } - case "span": { - return ( - - {link.title} - - ); - } - } - - return null; -} - function SpanEntity({ span }: { span: Span }) { switch (span.entityType) { case "waitpoint": { @@ -1275,17 +1138,100 @@ function SpanEntity({ span }: { span: Span }) { ); } default: { - if (span.properties !== undefined) - return ( - - ); + return ( + <> + {span.level === "TRACE" ? ( + <> +
+ +
+ + + ) : ( +
+ } + state="complete" + /> +
+ )} + + + Message + {span.message} + + {span.triggeredRuns.length > 0 && ( + +
+ Triggered runs + + + + Run # + Task + Version + Created at + + + + {span.triggeredRuns.map((run) => { + const path = v3RunSpanPath( + organization, + project, + { friendlyId: run.friendlyId }, + { spanId: run.spanId } + ); + return ( + + + {run.number} + + + {run.taskIdentifier} + + + {run.lockedToVersion?.version ?? "–"} + + + + + + ); + })} + +
+
+
+ )} +
+ {span.events.length > 0 && } + {span.properties !== undefined ? ( + + ) : null} + + ); } } - - return <>; } From 053c650ca99ad39e92d1648b994c4af0c9da84fd Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 15:28:36 +0000 Subject: [PATCH 455/485] Improved the waitpoint display --- .../route.tsx | 42 +++++++++++-------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 6b9571c925..940e4b6f00 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -56,6 +56,7 @@ import { requireUserId } from "~/services/session.server"; import { cn } from "~/utils/cn"; import { formatCurrencyAccurate } from "~/utils/numberFormatter"; import { + docsPath, v3BatchPath, v3RunDownloadLogsPath, v3RunPath, @@ -1076,6 +1077,9 @@ function classNameForState(state: TimelineState) { } function SpanEntity({ span }: { span: Span }) { + const organization = useOrganization(); + const project = useProject(); + switch (span.entityType) { case "waitpoint": { if (!span.waitpoint) { @@ -1084,15 +1088,16 @@ function SpanEntity({ span }: { span: Span }) { return ( <> +
+ Waitpoint + + A waitpoint pauses your code from continuing until the conditions are met.{" "} + View docs. + +
- Waitpoint ID - - {span.waitpoint?.friendlyId} - - - - Waitpoint status + Status - Waitpoint idempotency key - - {span.waitpoint.userProvidedIdempotencyKey ? span.waitpoint.idempotencyKey : "–"} + ID + + {span.waitpoint?.friendlyId} - Waitpoint idempotency key expires at + Idempotency key - {span.waitpoint.idempotencyKeyExpiresAt ? ( - - ) : ( - "–" - )} +
+ {span.waitpoint.userProvidedIdempotencyKey ? span.waitpoint.idempotencyKey : "–"} + {span.waitpoint.idempotencyKeyExpiresAt ? ( + <> + Expires at: + + ) : null} +
@@ -1127,7 +1135,7 @@ function SpanEntity({ span }: { span: Span }) { ) : span.waitpoint.output ? ( From 39101897a41332e69f0282d1ce981b6e81871577 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 16:22:17 +0000 Subject: [PATCH 456/485] WIP on completing waitpoint form --- .../webapp/app/assets/icons/WaitTokenIcon.tsx | 26 ++--- ...ts.tokens.$waitpointFriendlyId.complete.ts | 3 - .../route.tsx | 105 ++++++++++-------- 3 files changed, 73 insertions(+), 61 deletions(-) diff --git a/apps/webapp/app/assets/icons/WaitTokenIcon.tsx b/apps/webapp/app/assets/icons/WaitTokenIcon.tsx index a808c081f7..8b46f1c801 100644 --- a/apps/webapp/app/assets/icons/WaitTokenIcon.tsx +++ b/apps/webapp/app/assets/icons/WaitTokenIcon.tsx @@ -1,34 +1,34 @@ export function WaitTokenIcon({ className }: { className?: string }) { return ( - + diff --git a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts index 5ef72790c6..ab3fbe33b9 100644 --- a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts +++ b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts @@ -3,7 +3,6 @@ import { CompleteWaitpointTokenRequestBody, CompleteWaitpointTokenResponseBody, conditionallyExportPacket, - CreateWaitpointTokenResponseBody, stringifyIO, } from "@trigger.dev/core/v3"; import { WaitpointId } from "@trigger.dev/core/v3/apps"; @@ -11,8 +10,6 @@ import { z } from "zod"; import { $replica } from "~/db.server"; import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { parseDelay } from "~/utils/delays"; -import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; import { engine } from "~/v3/runEngine.server"; const { action } = createActionApiRoute( diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 45c651bd05..3e055fdb1d 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -1,15 +1,20 @@ import { parse } from "@conform-to/zod"; +import { InformationCircleIcon } from "@heroicons/react/20/solid"; import { Form, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; import { Waitpoint } from "@trigger.dev/database"; import { useCallback, useRef } from "react"; import { z } from "zod"; +import { CodeBlock } from "~/components/code/CodeBlock"; +import { InlineCode } from "~/components/code/InlineCode"; import { JSONEditor } from "~/components/code/JSONEditor"; import { Button } from "~/components/primitives/Buttons"; +import { Callout } from "~/components/primitives/Callout"; import { Fieldset } from "~/components/primitives/Fieldset"; -import { Header2 } from "~/components/primitives/Headers"; +import { Header3 } from "~/components/primitives/Headers"; import { InputGroup } from "~/components/primitives/InputGroup"; import { Label } from "~/components/primitives/Label"; +import { Paragraph } from "~/components/primitives/Paragraph"; import { prisma } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; @@ -90,7 +95,7 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint const isLoading = navigation.state !== "idle"; const organization = useOrganization(); const project = useProject(); - const currentJson = useRef(""); + const currentJson = useRef("{\n\n}"); const formAction = `/resources/orgs/${organization.slug}/projects/${project.slug}/waitpoints/${waitpoint.friendlyId}/complete`; const submitForm = useCallback( @@ -114,52 +119,62 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint ); return ( -
submitForm(e)} - className="grid h-full max-h-full grid-rows-[2.5rem_1fr_3.25rem] overflow-hidden bg-background-bright" - > -
- Complete waitpoint -
-
-
+
+
submitForm(e)} + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" + > +
+ Manually complete this waitpoint +
+
-
- - - { - currentJson.current = v; - }} - showClearButton={false} - showCopyButton={false} - height="100%" - min-height="100%" - max-height="100%" - /> - -
+
+ { + currentJson.current = v; + }} + showClearButton={false} + showCopyButton={false} + height="100%" + min-height="100%" + max-height="100%" + /> +
-
-
-
- +
+
+ +
+
+ +
+
+ + To complete this waitpoint in your code use:
+ (tokenId, + output +);`} + className="mt-1 max-w-full border-0 pl-1" + showLineNumbers={false} + />
- +
); } From 9600ee8d08352fcf3c00909f55c1a41c33d27910 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 16:28:32 +0000 Subject: [PATCH 457/485] Use the existing CodeBlock for the tip --- apps/webapp/app/components/code/CodeBlock.tsx | 2 +- .../route.tsx | 21 +++++++++---------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/apps/webapp/app/components/code/CodeBlock.tsx b/apps/webapp/app/components/code/CodeBlock.tsx index a7050202ea..7865befb66 100644 --- a/apps/webapp/app/components/code/CodeBlock.tsx +++ b/apps/webapp/app/components/code/CodeBlock.tsx @@ -50,7 +50,7 @@ type CodeBlockProps = { fileName?: string; /** title text for the Title row */ - rowTitle?: string; + rowTitle?: ReactNode; }; const dimAmount = 0.5; diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 3e055fdb1d..4cda122d9e 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -161,20 +161,19 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint
-
-
- - To complete this waitpoint in your code use: -
- + + To complete this waitpoint in your code use: +
+ } + code={` await wait.completeToken(tokenId, output );`} - className="mt-1 max-w-full border-0 pl-1" - showLineNumbers={false} - /> -
+ showLineNumbers={false} + />
); } From 197dce318f20df4a3909848fb82ed0a94713eba2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 16:34:36 +0000 Subject: [PATCH 458/485] Style improvements --- .../route.tsx | 2 +- .../route.tsx | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 4cda122d9e..b43eb8642d 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -127,7 +127,7 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" >
- Manually complete this waitpoint + Manually complete this waitpoint
diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 940e4b6f00..30160d207f 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -1121,12 +1121,18 @@ function SpanEntity({ span }: { span: Span }) { Idempotency key
- {span.waitpoint.userProvidedIdempotencyKey ? span.waitpoint.idempotencyKey : "–"} - {span.waitpoint.idempotencyKeyExpiresAt ? ( - <> - Expires at: - - ) : null} +
+ {span.waitpoint.userProvidedIdempotencyKey + ? span.waitpoint.idempotencyKey + : "–"} +
+
+ {span.waitpoint.idempotencyKeyExpiresAt ? ( + <> + TTL: + + ) : null} +
From 677077a2abe0c035b567a409e9ed3b784a367d77 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 17:14:57 +0000 Subject: [PATCH 459/485] Complete waitpoint --- .../route.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index b43eb8642d..4377352e29 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -163,10 +163,10 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint + To complete this waitpoint in your code use: -
+ } code={` await wait.completeToken(tokenId, From 05bdbf5c8d3c478e78055a4513af77ab7f1ca050 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Mon, 24 Feb 2025 18:24:01 +0000 Subject: [PATCH 460/485] All waitpoint sidebar variants --- .../assets/icons/AnimatedHourglassIcon.tsx | 27 ++++++++++ .../app/components/runs/v3/LiveTimer.tsx | 34 +++++++++++++ .../route.tsx | 50 ++++++++++++++++--- packages/core/src/v3/utils/durations.ts | 3 +- 4 files changed, 106 insertions(+), 8 deletions(-) create mode 100644 apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx diff --git a/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx b/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx new file mode 100644 index 0000000000..f28a7d6870 --- /dev/null +++ b/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx @@ -0,0 +1,27 @@ +import { useAnimate } from "framer-motion"; +import { HourglassIcon } from "lucide-react"; +import { useEffect } from "react"; + +export function AnimatedHourglassIcon({ + className, + delay, +}: { + className?: string; + delay?: number; +}) { + const [scope, animate] = useAnimate(); + + useEffect(() => { + animate( + [ + [scope.current, { rotate: 0 }, { duration: 0.7 }], + [scope.current, { rotate: 180 }, { duration: 0.3 }], + [scope.current, { rotate: 180 }, { duration: 0.7 }], + [scope.current, { rotate: 360 }, { duration: 0.3 }], + ], + { repeat: Infinity, delay } + ); + }); + + return ; +} diff --git a/apps/webapp/app/components/runs/v3/LiveTimer.tsx b/apps/webapp/app/components/runs/v3/LiveTimer.tsx index 4ff94dfa78..953bfb320b 100644 --- a/apps/webapp/app/components/runs/v3/LiveTimer.tsx +++ b/apps/webapp/app/components/runs/v3/LiveTimer.tsx @@ -66,3 +66,37 @@ export function LiveCountUp({ ); } + +export function LiveCountdown({ + endTime, + updateInterval = 100, +}: { + endTime: Date; + updateInterval?: number; +}) { + const [now, setNow] = useState(); + + useEffect(() => { + const interval = setInterval(() => { + const date = new Date(); + setNow(date); + + if (date > endTime) { + clearInterval(interval); + } + }, updateInterval); + + return () => clearInterval(interval); + }, [endTime]); + + return ( + <> + {formatDuration(now, endTime, { + style: "short", + maxDecimalPoints: 0, + units: ["d", "h", "m", "s"], + maxUnits: 4, + })} + + ); +} diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 4377352e29..c62d19cf9a 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -3,25 +3,22 @@ import { InformationCircleIcon } from "@heroicons/react/20/solid"; import { Form, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; import { Waitpoint } from "@trigger.dev/database"; +import { motion } from "framer-motion"; import { useCallback, useRef } from "react"; import { z } from "zod"; +import { AnimatedHourglassIcon } from "~/assets/icons/AnimatedHourglassIcon"; import { CodeBlock } from "~/components/code/CodeBlock"; -import { InlineCode } from "~/components/code/InlineCode"; import { JSONEditor } from "~/components/code/JSONEditor"; import { Button } from "~/components/primitives/Buttons"; -import { Callout } from "~/components/primitives/Callout"; -import { Fieldset } from "~/components/primitives/Fieldset"; -import { Header3 } from "~/components/primitives/Headers"; -import { InputGroup } from "~/components/primitives/InputGroup"; -import { Label } from "~/components/primitives/Label"; +import { DateTime } from "~/components/primitives/DateTime"; import { Paragraph } from "~/components/primitives/Paragraph"; +import { LiveCountdown } from "~/components/runs/v3/LiveTimer"; import { prisma } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; import { logger } from "~/services/logger.server"; import { requireUserId } from "~/services/session.server"; -import { cn } from "~/utils/cn"; import { ProjectParamSchema, v3SchedulesPath } from "~/utils/pathBuilder"; import { UpsertSchedule } from "~/v3/schedules"; import { UpsertTaskScheduleService } from "~/v3/services/upsertTaskSchedule.server"; @@ -118,6 +115,8 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint [currentJson] ); + const endTime = new Date(Date.now() + 60_000 * 113); + return (
(tokenId, );`} showLineNumbers={false} /> + submitForm(e)} + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" + > +
+ Manually skip this waitpoint +
+
+ +
+
+ + + + +
+ +
+
+
+
+ +
+
+
); } diff --git a/packages/core/src/v3/utils/durations.ts b/packages/core/src/v3/utils/durations.ts index c2baf52693..7bec968fe6 100644 --- a/packages/core/src/v3/utils/durations.ts +++ b/packages/core/src/v3/utils/durations.ts @@ -8,6 +8,7 @@ type DurationOptions = { style?: "long" | "short"; maxDecimalPoints?: number; units?: Unit[]; + maxUnits?: number; }; export function formatDuration( @@ -48,7 +49,7 @@ export function formatDurationMilliseconds( ? belowOneSecondUnits : aboveOneSecondUnits, maxDecimalPoints: options?.maxDecimalPoints ?? 1, - largest: 2, + largest: options?.maxUnits ?? 2, }); if (!options) { From 8aa47a90eb9383d895ce9c9b9da2673b6487177d Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 25 Feb 2025 19:04:51 +0000 Subject: [PATCH 461/485] Waits now use a pause icon --- apps/webapp/app/assets/icons/PauseIcon.tsx | 12 ++++++ .../webapp/app/assets/icons/WaitTokenIcon.tsx | 41 ------------------- .../webapp/app/components/runs/v3/RunIcon.tsx | 6 +-- 3 files changed, 14 insertions(+), 45 deletions(-) create mode 100644 apps/webapp/app/assets/icons/PauseIcon.tsx delete mode 100644 apps/webapp/app/assets/icons/WaitTokenIcon.tsx diff --git a/apps/webapp/app/assets/icons/PauseIcon.tsx b/apps/webapp/app/assets/icons/PauseIcon.tsx new file mode 100644 index 0000000000..ccbc1d9a4d --- /dev/null +++ b/apps/webapp/app/assets/icons/PauseIcon.tsx @@ -0,0 +1,12 @@ +export function PauseIcon({ className }: { className?: string }) { + return ( + + + + ); +} diff --git a/apps/webapp/app/assets/icons/WaitTokenIcon.tsx b/apps/webapp/app/assets/icons/WaitTokenIcon.tsx deleted file mode 100644 index 8b46f1c801..0000000000 --- a/apps/webapp/app/assets/icons/WaitTokenIcon.tsx +++ /dev/null @@ -1,41 +0,0 @@ -export function WaitTokenIcon({ className }: { className?: string }) { - return ( - - - - - - - - - - - - - - ); -} diff --git a/apps/webapp/app/components/runs/v3/RunIcon.tsx b/apps/webapp/app/components/runs/v3/RunIcon.tsx index e88a013226..57f1b05dbe 100644 --- a/apps/webapp/app/components/runs/v3/RunIcon.tsx +++ b/apps/webapp/app/components/runs/v3/RunIcon.tsx @@ -10,7 +10,7 @@ import { TaskIcon } from "~/assets/icons/TaskIcon"; import { TaskCachedIcon } from "~/assets/icons/TaskCachedIcon"; import { NamedIcon } from "~/components/primitives/NamedIcon"; import { cn } from "~/utils/cn"; -import { WaitTokenIcon } from "~/assets/icons/WaitTokenIcon"; +import { PauseIcon } from "~/assets/icons/PauseIcon"; type TaskIconProps = { name: string | undefined; @@ -50,13 +50,11 @@ export function RunIcon({ name, className, spanName }: TaskIconProps) { case "attempt": return ; case "wait": - return ; + return ; case "trace": return ; case "tag": return ; - case "wait-token": - return ; //log levels case "debug": case "log": From d3b095be802fb872e373dac91f3b99e7fa2ef8e9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 25 Feb 2025 19:09:10 +0000 Subject: [PATCH 462/485] Durations waits use the API to create/block with a waitpoint, not the runtime --- ...shots.$snapshotFriendlyId.wait.duration.ts | 63 ----- ...shots.$snapshotFriendlyId.wait.duration.ts | 32 --- ...ne.v1.runs.$runFriendlyId.wait.duration.ts | 75 +++++ ...oints.tokens.$waitpointFriendlyId.wait.ts} | 1 + .../worker/workerGroupTokenService.server.ts | 18 -- .../run-engine/src/engine/index.ts | 263 ++++++++---------- packages/cli-v3/src/apiClient.ts | 23 -- .../src/v3/runEngineWorker/supervisor/http.ts | 23 -- .../v3/runEngineWorker/supervisor/schemas.ts | 13 - .../src/v3/runEngineWorker/workload/http.ts | 21 -- .../v3/runEngineWorker/workload/schemas.ts | 10 - packages/core/src/v3/runtime/index.ts | 17 +- .../src/v3/runtime/managedRuntimeManager.ts | 34 +-- packages/core/src/v3/runtime/manager.ts | 11 +- .../core/src/v3/runtime/noopRuntimeManager.ts | 24 +- packages/core/src/v3/schemas/api.ts | 29 ++ packages/core/src/v3/schemas/runEngine.ts | 14 - packages/trigger-sdk/src/v3/retry.ts | 23 +- packages/trigger-sdk/src/v3/wait.ts | 79 ++++-- .../src/trigger/{wait-tokens.ts => waits.ts} | 21 +- 20 files changed, 337 insertions(+), 457 deletions(-) delete mode 100644 apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts delete mode 100644 apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts create mode 100644 apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts rename apps/webapp/app/routes/{api.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts => engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts} (97%) rename references/hello-world/src/trigger/{wait-tokens.ts => waits.ts} (74%) diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts b/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts deleted file mode 100644 index 801eac8312..0000000000 --- a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { json, TypedResponse } from "@remix-run/server-runtime"; -import { assertExhaustive } from "@trigger.dev/core"; -import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; -import { - WorkerApiDebugLogBody, - WorkerApiRunAttemptStartResponseBody, - WorkerApiWaitForDurationRequestBody, - WorkerApiWaitForDurationResponseBody, - WorkloadHeartbeatResponseBody, -} from "@trigger.dev/core/v3/workers"; -import { z } from "zod"; -import { prisma } from "~/db.server"; -import { logger } from "~/services/logger.server"; -import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { recordRunDebugLog } from "~/v3/eventRepository.server"; -import { engine } from "~/v3/runEngine.server"; - -const { action } = createActionApiRoute( - { - body: WorkerApiWaitForDurationRequestBody, - params: z.object({ - runFriendlyId: z.string(), - snapshotFriendlyId: z.string(), - }), - method: "POST", - }, - async ({ - authentication, - body, - params, - }): Promise> => { - const { runFriendlyId, snapshotFriendlyId } = params; - - try { - const run = await prisma.taskRun.findFirst({ - where: { - friendlyId: params.runFriendlyId, - runtimeEnvironmentId: authentication.environment.id, - }, - }); - - if (!run) { - throw new Response("You don't have permissions for this run", { status: 401 }); - } - - const waitResult = await engine.waitForDuration({ - runId: RunId.toId(runFriendlyId), - snapshotId: SnapshotId.toId(snapshotFriendlyId), - date: body.date, - }); - - return json(waitResult); - } catch (error) { - logger.error("Failed to wait for duration dev", { - environmentId: authentication.environment.id, - error, - }); - throw error; - } - } -); - -export { action }; diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts b/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts deleted file mode 100644 index 8f4630149a..0000000000 --- a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.wait.duration.ts +++ /dev/null @@ -1,32 +0,0 @@ -import { json, TypedResponse } from "@remix-run/server-runtime"; -import { - WorkerApiWaitForDurationRequestBody, - WorkerApiWaitForDurationResponseBody, -} from "@trigger.dev/core/v3/workers"; -import { z } from "zod"; -import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; - -export const action = createActionWorkerApiRoute( - { - body: WorkerApiWaitForDurationRequestBody, - params: z.object({ - runFriendlyId: z.string(), - snapshotFriendlyId: z.string(), - }), - }, - async ({ - authenticatedWorker, - body, - params, - }): Promise> => { - const { runFriendlyId, snapshotFriendlyId } = params; - - const waitResult = await authenticatedWorker.waitForDuration({ - runFriendlyId, - snapshotFriendlyId, - date: body.date, - }); - - return json(waitResult); - } -); diff --git a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts new file mode 100644 index 0000000000..04ef8c5aae --- /dev/null +++ b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts @@ -0,0 +1,75 @@ +import { json, TypedResponse } from "@remix-run/server-runtime"; +import { WaitForDurationRequestBody, WaitForDurationResponseBody } from "@trigger.dev/core/v3"; +import { RunId } from "@trigger.dev/core/v3/apps"; + +import { z } from "zod"; +import { prisma } from "~/db.server"; +import { logger } from "~/services/logger.server"; +import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; +import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; +import { engine } from "~/v3/runEngine.server"; + +const { action } = createActionApiRoute( + { + body: WaitForDurationRequestBody, + params: z.object({ + runFriendlyId: z.string(), + }), + method: "POST", + }, + async ({ authentication, body, params }): Promise> => { + const { runFriendlyId } = params; + const runId = RunId.toId(runFriendlyId); + + try { + const run = await prisma.taskRun.findFirst({ + where: { + id: runId, + runtimeEnvironmentId: authentication.environment.id, + }, + }); + + if (!run) { + throw new Response("You don't have permissions for this run", { status: 401 }); + } + + const idempotencyKeyExpiresAt = body.idempotencyKeyTTL + ? resolveIdempotencyKeyTTL(body.idempotencyKeyTTL) + : undefined; + + const { waitpoint } = await engine.createDateTimeWaitpoint({ + projectId: authentication.environment.project.id, + environmentId: authentication.environment.id, + completedAfter: body.date, + idempotencyKey: body.idempotencyKey, + idempotencyKeyExpiresAt: idempotencyKeyExpiresAt, + }); + + const waitResult = await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpoints: waitpoint.id, + environmentId: authentication.environment.id, + projectId: authentication.environment.project.id, + organizationId: authentication.environment.organization.id, + releaseConcurrency: { + releaseQueue: true, + }, + }); + + return json({ + waitUntil: body.date, + waitpoint: { + id: waitpoint.friendlyId, + }, + }); + } catch (error) { + logger.error("Failed to wait for duration dev", { + environmentId: authentication.environment.id, + error, + }); + throw error; + } + } +); + +export { action }; diff --git a/apps/webapp/app/routes/api.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts similarity index 97% rename from apps/webapp/app/routes/api.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts rename to apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts index 47da58bd7f..6adee9cb66 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts +++ b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts @@ -52,6 +52,7 @@ const { action } = createActionApiRoute( waitpoints: [waitpointId], environmentId: authentication.environment.id, projectId: authentication.environment.project.id, + organizationId: authentication.environment.organization.id, failAfter: timeout, }); diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 78231148ef..ef0bf2e7be 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -718,24 +718,6 @@ export class AuthenticatedWorkerInstance extends WithRunEngine { }); } - async waitForDuration({ - runFriendlyId, - snapshotFriendlyId, - date, - }: { - runFriendlyId: string; - snapshotFriendlyId: string; - date: Date; - }): Promise { - return await this._engine.waitForDuration({ - runId: fromFriendlyId(runFriendlyId), - snapshotId: fromFriendlyId(snapshotFriendlyId), - date, - workerId: this.workerInstanceId, - runnerId: this.runnerId, - }); - } - async getLatestSnapshot({ runFriendlyId }: { runFriendlyId: string }) { return await this._engine.getRunExecutionData({ runId: fromFriendlyId(runFriendlyId), diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2e5be2e9c6..2de5f59300 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -22,7 +22,6 @@ import { TaskRunFailedExecutionResult, TaskRunInternalError, TaskRunSuccessfulExecutionResult, - WaitForDurationResult, WAITPOINT_TIMEOUT_ERROR_CODE, } from "@trigger.dev/core/v3"; import { @@ -412,6 +411,7 @@ export class RunEngine { waitpoints: associatedWaitpoint.id, environmentId: associatedWaitpoint.environmentId, projectId: associatedWaitpoint.projectId, + organizationId: environment.organization.id, batch, workerId, runnerId, @@ -497,17 +497,18 @@ export class RunEngine { } if (taskRun.delayUntil) { - const delayWaitpoint = await this.#createDateTimeWaitpoint(prisma, { + const delayWaitpointResult = await this.createDateTimeWaitpoint({ projectId: environment.project.id, environmentId: environment.id, completedAfter: taskRun.delayUntil, + tx: prisma, }); await prisma.taskRunWaitpoint.create({ data: { taskRunId: taskRun.id, - waitpointId: delayWaitpoint.id, - projectId: delayWaitpoint.projectId, + waitpointId: delayWaitpointResult.waitpoint.id, + projectId: delayWaitpointResult.waitpoint.projectId, }, }); } @@ -1391,110 +1392,6 @@ export class RunEngine { } } - async waitForDuration({ - runId, - snapshotId, - date, - releaseConcurrency = true, - idempotencyKey, - workerId, - runnerId, - tx, - }: { - runId: string; - snapshotId: string; - date: Date; - releaseConcurrency?: boolean; - idempotencyKey?: string; - workerId?: string; - runnerId?: string; - tx?: PrismaClientOrTransaction; - }): Promise { - const prisma = tx ?? this.prisma; - - return await this.runLock.lock([runId], 5_000, async (signal) => { - const snapshot = await getLatestExecutionSnapshot(prisma, runId); - - if (snapshot.id !== snapshotId) { - throw new ServiceValidationError("Snapshot ID doesn't match the latest snapshot", 400); - } - - const run = await prisma.taskRun.findFirst({ - select: { - runtimeEnvironment: { - select: { - id: true, - organizationId: true, - }, - }, - projectId: true, - }, - where: { id: runId }, - }); - - if (!run) { - throw new ServiceValidationError("TaskRun not found", 404); - } - - let waitpoint = idempotencyKey - ? await prisma.waitpoint.findUnique({ - where: { - environmentId_idempotencyKey: { - environmentId: run.runtimeEnvironment.id, - idempotencyKey, - }, - }, - }) - : undefined; - - if (!waitpoint) { - waitpoint = await this.#createDateTimeWaitpoint(prisma, { - projectId: run.projectId, - environmentId: run.runtimeEnvironment.id, - completedAfter: date, - idempotencyKey, - }); - } - - //waitpoint already completed, so we don't need to wait - if (waitpoint.status === "COMPLETED") { - return { - waitUntil: waitpoint.completedAt ?? new Date(), - waitpoint: { - id: waitpoint.id, - }, - ...executionResultFromSnapshot(snapshot), - }; - } - - //block the run - const blockResult = await this.blockRunWithWaitpoint({ - runId, - waitpoints: waitpoint.id, - environmentId: waitpoint.environmentId, - projectId: waitpoint.projectId, - workerId, - runnerId, - tx: prisma, - }); - - //release concurrency - await this.runQueue.releaseConcurrency( - run.runtimeEnvironment.organizationId, - runId, - releaseConcurrency - ); - - return { - waitUntil: date, - waitpoint: { - id: waitpoint.id, - }, - ...executionResultFromSnapshot(blockResult), - }; - }); - } - /** Call this to cancel a run. If the run is in-progress it will change it's state to PENDING_CANCEL and notify the worker. @@ -1729,6 +1626,91 @@ export class RunEngine { return this.runQueue.lengthOfEnvQueue(environment); } + /** + * This creates a DATETIME waitpoint, that will be completed automatically when the specified date is reached. + * If you pass an `idempotencyKey`, the waitpoint will be created only if it doesn't already exist. + */ + async createDateTimeWaitpoint({ + projectId, + environmentId, + completedAfter, + idempotencyKey, + idempotencyKeyExpiresAt, + tx, + }: { + projectId: string; + environmentId: string; + completedAfter: Date; + idempotencyKey?: string; + idempotencyKeyExpiresAt?: Date; + tx?: PrismaClientOrTransaction; + }) { + const prisma = tx ?? this.prisma; + + const existingWaitpoint = idempotencyKey + ? await prisma.waitpoint.findUnique({ + where: { + environmentId_idempotencyKey: { + environmentId, + idempotencyKey, + }, + }, + }) + : undefined; + + if (existingWaitpoint) { + if ( + existingWaitpoint.idempotencyKeyExpiresAt && + new Date() > existingWaitpoint.idempotencyKeyExpiresAt + ) { + //the idempotency key has expired + //remove the waitpoint idempotencyKey + await prisma.waitpoint.update({ + where: { + id: existingWaitpoint.id, + }, + data: { + idempotencyKey: nanoid(24), + inactiveIdempotencyKey: existingWaitpoint.idempotencyKey, + }, + }); + + //let it fall through to create a new waitpoint + } else { + return { waitpoint: existingWaitpoint, isCached: true }; + } + } + + const waitpoint = await prisma.waitpoint.upsert({ + where: { + environmentId_idempotencyKey: { + environmentId, + idempotencyKey: idempotencyKey ?? nanoid(24), + }, + }, + create: { + ...WaitpointId.generate(), + type: "DATETIME", + idempotencyKey: idempotencyKey ?? nanoid(24), + idempotencyKeyExpiresAt, + userProvidedIdempotencyKey: !!idempotencyKey, + environmentId, + projectId, + completedAfter, + }, + update: {}, + }); + + await this.worker.enqueue({ + id: `finishWaitpoint.${waitpoint.id}`, + job: "finishWaitpoint", + payload: { waitpointId: waitpoint.id }, + availableAt: completedAfter, + }); + + return { waitpoint, isCached: false }; + } + /** This creates a MANUAL waitpoint, that can be explicitly completed (or failed). * If you pass an `idempotencyKey` and it already exists, it will return the existing waitpoint. */ @@ -1826,12 +1808,14 @@ export class RunEngine { batchId, environmentId, projectId, + organizationId, tx, }: { runId: string; batchId: string; environmentId: string; projectId: string; + organizationId: string; tx?: PrismaClientOrTransaction; }): Promise { const prisma = tx ?? this.prisma; @@ -1854,6 +1838,7 @@ export class RunEngine { waitpoints: waitpoint.id, environmentId, projectId, + organizationId, batch: { id: batchId }, tx: prisma, }); @@ -1960,6 +1945,8 @@ export class RunEngine { runId, waitpoints, projectId, + organizationId, + releaseConcurrency, failAfter, spanIdToComplete, batch, @@ -1971,6 +1958,10 @@ export class RunEngine { waitpoints: string | string[]; environmentId: string; projectId: string; + organizationId: string; + releaseConcurrency?: { + releaseQueue: boolean; + }; failAfter?: Date; spanIdToComplete?: string; batch?: { id: string; index?: number }; @@ -2070,6 +2061,15 @@ export class RunEngine { //in the near future availableAt: new Date(Date.now() + 50), }); + } else { + if (releaseConcurrency) { + //release concurrency + await this.runQueue.releaseConcurrency( + organizationId, + runId, + releaseConcurrency.releaseQueue === true + ); + } } return snapshot; @@ -2107,8 +2107,7 @@ export class RunEngine { // 2. Update the waitpoint to completed (only if it's pending) let waitpoint: Waitpoint | null = null; try { - waitpoint = await tx.waitpoint - .update({ + waitpoint = await tx.waitpoint.update({ where: { id, status: "PENDING" }, data: { status: "COMPLETED", @@ -2117,22 +2116,24 @@ export class RunEngine { outputType: output?.type, outputIsError: output?.isError, }, - }) - } catch(error) { + }); + } catch (error) { if (error instanceof Prisma.PrismaClientKnownRequestError && error.code === "P2025") { waitpoint = await tx.waitpoint.findFirst({ where: { id }, }); } else { - this.logger.log('completeWaitpoint: error updating waitpoint:', {error}); + this.logger.log("completeWaitpoint: error updating waitpoint:", { error }); throw error; } - }; + } return { waitpoint, affectedTaskRuns }; }, (error) => { - this.logger.error(`completeWaitpoint: Error completing waitpoint ${id}, retrying`, { error }); + this.logger.error(`completeWaitpoint: Error completing waitpoint ${id}, retrying`, { + error, + }); throw error; } ); @@ -3484,38 +3485,6 @@ export class RunEngine { }); } - async #createDateTimeWaitpoint( - tx: PrismaClientOrTransaction, - { - projectId, - environmentId, - completedAfter, - idempotencyKey, - }: { projectId: string; environmentId: string; completedAfter: Date; idempotencyKey?: string } - ) { - const waitpoint = await tx.waitpoint.create({ - data: { - ...WaitpointId.generate(), - type: "DATETIME", - status: "PENDING", - idempotencyKey: idempotencyKey ?? nanoid(24), - userProvidedIdempotencyKey: !!idempotencyKey, - projectId, - environmentId, - completedAfter, - }, - }); - - await this.worker.enqueue({ - id: `finishWaitpoint.${waitpoint.id}`, - job: "finishWaitpoint", - payload: { waitpointId: waitpoint.id }, - availableAt: completedAfter, - }); - - return waitpoint; - } - async #rescheduleDateTimeWaitpoint( tx: PrismaClientOrTransaction, waitpointId: string, diff --git a/packages/cli-v3/src/apiClient.ts b/packages/cli-v3/src/apiClient.ts index ac666e2dda..7bc58fb735 100644 --- a/packages/cli-v3/src/apiClient.ts +++ b/packages/cli-v3/src/apiClient.ts @@ -42,8 +42,6 @@ import { WorkloadRunAttemptStartRequestBody, WorkloadRunAttemptStartResponseBody, WorkloadRunLatestSnapshotResponseBody, - WorkloadWaitForDurationRequestBody, - WorkloadWaitForDurationResponseBody, } from "@trigger.dev/core/v3/workers"; export class CliApiClient { @@ -399,7 +397,6 @@ export class CliApiClient { heartbeatRun: this.devHeartbeatRun.bind(this), startRunAttempt: this.devStartRunAttempt.bind(this), completeRunAttempt: this.devCompleteRunAttempt.bind(this), - waitForDuration: this.devWaitForDuration.bind(this), }; } @@ -613,26 +610,6 @@ export class CliApiClient { } ); } - - private async devWaitForDuration( - runId: string, - snapshotId: string, - body: WorkloadWaitForDurationRequestBody - ) { - return wrapZodFetch( - WorkloadWaitForDurationResponseBody, - `${this.apiURL}/api/v1/dev/runs/${runId}/snapshots/${snapshotId}/wait/duration`, - { - method: "POST", - headers: { - Authorization: `Bearer ${this.accessToken}`, - Accept: "application/json", - "Content-Type": "application/json", - }, - body: JSON.stringify(body), - } - ); - } } type ApiResult = diff --git a/packages/core/src/v3/runEngineWorker/supervisor/http.ts b/packages/core/src/v3/runEngineWorker/supervisor/http.ts index 6139c295fd..4116a5d135 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/http.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/http.ts @@ -14,8 +14,6 @@ import { WorkerApiRunHeartbeatRequestBody, WorkerApiRunHeartbeatResponseBody, WorkerApiRunLatestSnapshotResponseBody, - WorkerApiWaitForDurationRequestBody, - WorkerApiWaitForDurationResponseBody, WorkerApiDebugLogBody, } from "./schemas.js"; import { SupervisorClientCommonOptions } from "./types.js"; @@ -208,27 +206,6 @@ export class SupervisorHttpClient { } } - async waitForDuration( - runId: string, - snapshotId: string, - body: WorkerApiWaitForDurationRequestBody, - runnerId?: string - ) { - return wrapZodFetch( - WorkerApiWaitForDurationResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/wait/duration`, - { - method: "POST", - headers: { - ...this.defaultHeaders, - ...this.runnerIdHeader(runnerId), - "Content-Type": "application/json", - }, - body: JSON.stringify(body), - } - ); - } - async continueRunExecution(runId: string, snapshotId: string, runnerId?: string) { return wrapZodFetch( WorkerApiContinueRunExecutionRequestBody, diff --git a/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts b/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts index b49b3b703b..270dcc9e8a 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/schemas.ts @@ -6,7 +6,6 @@ import { StartRunAttemptResult, CompleteRunAttemptResult, RunExecutionData, - WaitForDurationResult, CheckpointInput, ExecutionResult, } from "../../schemas/runEngine.js"; @@ -126,18 +125,6 @@ export type WorkerApiDequeueFromVersionResponseBody = z.infer< typeof WorkerApiDequeueFromVersionResponseBody >; -export const WorkerApiWaitForDurationRequestBody = z.object({ - date: z.coerce.date(), -}); -export type WorkerApiWaitForDurationRequestBody = z.infer< - typeof WorkerApiWaitForDurationRequestBody ->; - -export const WorkerApiWaitForDurationResponseBody = WaitForDurationResult; -export type WorkerApiWaitForDurationResponseBody = z.infer< - typeof WorkerApiWaitForDurationResponseBody ->; - const AttributeValue = z.union([ z.string(), z.number(), diff --git a/packages/core/src/v3/runEngineWorker/workload/http.ts b/packages/core/src/v3/runEngineWorker/workload/http.ts index 6ec9d97256..cc36e89682 100644 --- a/packages/core/src/v3/runEngineWorker/workload/http.ts +++ b/packages/core/src/v3/runEngineWorker/workload/http.ts @@ -8,8 +8,6 @@ import { WorkloadRunLatestSnapshotResponseBody, WorkloadDequeueFromVersionResponseBody, WorkloadRunAttemptStartRequestBody, - WorkloadWaitForDurationRequestBody, - WorkloadWaitForDurationResponseBody, WorkloadSuspendRunResponseBody, WorkloadContinueRunExecutionResponseBody, WorkloadDebugLogRequestBody, @@ -152,25 +150,6 @@ export class WorkloadHttpClient { } } - async waitForDuration( - runId: string, - snapshotId: string, - body: WorkloadWaitForDurationRequestBody - ) { - return wrapZodFetch( - WorkloadWaitForDurationResponseBody, - `${this.apiUrl}/api/v1/workload-actions/runs/${runId}/snapshots/${snapshotId}/wait/duration`, - { - method: "POST", - headers: { - ...this.defaultHeaders, - "Content-Type": "application/json", - }, - body: JSON.stringify(body), - } - ); - } - async dequeue() { return wrapZodFetch( WorkloadDequeueFromVersionResponseBody, diff --git a/packages/core/src/v3/runEngineWorker/workload/schemas.ts b/packages/core/src/v3/runEngineWorker/workload/schemas.ts index 54b919033a..93c4252d37 100644 --- a/packages/core/src/v3/runEngineWorker/workload/schemas.ts +++ b/packages/core/src/v3/runEngineWorker/workload/schemas.ts @@ -8,8 +8,6 @@ import { WorkerApiRunAttemptStartResponseBody, WorkerApiRunLatestSnapshotResponseBody, WorkerApiDequeueFromVersionResponseBody, - WorkerApiWaitForDurationRequestBody, - WorkerApiWaitForDurationResponseBody, WorkerApiContinueRunExecutionRequestBody, WorkerApiDebugLogBody, } from "../supervisor/schemas.js"; @@ -66,11 +64,3 @@ export const WorkloadDequeueFromVersionResponseBody = WorkerApiDequeueFromVersio export type WorkloadDequeueFromVersionResponseBody = z.infer< typeof WorkloadDequeueFromVersionResponseBody >; - -export const WorkloadWaitForDurationRequestBody = WorkerApiWaitForDurationRequestBody; -export type WorkloadWaitForDurationRequestBody = z.infer; - -export const WorkloadWaitForDurationResponseBody = WorkerApiWaitForDurationResponseBody; -export type WorkloadWaitForDurationResponseBody = z.infer< - typeof WorkloadWaitForDurationResponseBody ->; diff --git a/packages/core/src/v3/runtime/index.ts b/packages/core/src/v3/runtime/index.ts index 1e73571490..c53767e2ad 100644 --- a/packages/core/src/v3/runtime/index.ts +++ b/packages/core/src/v3/runtime/index.ts @@ -31,24 +31,19 @@ export class RuntimeAPI { return this._instance; } - public waitForDuration(ms: number): Promise { - return usage.pauseAsync(() => this.#getRuntimeManager().waitForDuration(ms)); - } - - public waitUntil(date: Date): Promise { - return usage.pauseAsync(() => this.#getRuntimeManager().waitUntil(date)); + public waitUntil(waitpointFriendlyId: string, finishDate?: Date): Promise { + return usage.pauseAsync(() => + this.#getRuntimeManager().waitForWaitpoint({ waitpointFriendlyId, finishDate }) + ); } public waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { return usage.pauseAsync(() => this.#getRuntimeManager().waitForTask(params)); } - public waitForToken( - waitpointFriendlyId: string, - options?: WaitForWaitpointTokenRequestBody - ): Promise { + public waitForToken(waitpointFriendlyId: string): Promise { return usage.pauseAsync(() => - this.#getRuntimeManager().waitForToken(waitpointFriendlyId, options) + this.#getRuntimeManager().waitForWaitpoint({ waitpointFriendlyId }) ); } diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index e1b74742d9..f2c0c5b816 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -36,27 +36,6 @@ export class ManagedRuntimeManager implements RuntimeManager { // do nothing } - async waitForDuration(ms: number): Promise { - const wait = { - type: "DATETIME", - id: crypto.randomUUID(), - date: new Date(Date.now() + ms), - } satisfies RuntimeWait; - - const promise = new Promise((resolve) => { - this.resolversByWaitId.set(wait.id, resolve); - }); - - // Send wait to parent process - this.ipc.send("WAIT", { wait }); - - await promise; - } - - async waitUntil(date: Date): Promise { - return this.waitForDuration(date.getTime() - Date.now()); - } - async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { const promise = new Promise((resolve) => { this.resolversByWaitId.set(params.id, resolve); @@ -94,10 +73,13 @@ export class ManagedRuntimeManager implements RuntimeManager { }; } - async waitForToken( - waitpointFriendlyId: string, - options?: WaitForWaitpointTokenRequestBody - ): Promise { + async waitForWaitpoint({ + waitpointFriendlyId, + finishDate, + }: { + waitpointFriendlyId: string; + finishDate?: Date; + }): Promise { const promise = new Promise((resolve) => { this.resolversByWaitId.set(waitpointFriendlyId, resolve); }); @@ -134,7 +116,7 @@ export class ManagedRuntimeManager implements RuntimeManager { //no waitpoint resolves associated with batch completions //a batch completion isn't when all the runs from a batch are completed return; - } else if (waitpoint.type === "MANUAL") { + } else if (waitpoint.type === "MANUAL" || waitpoint.type === "DATETIME") { waitId = waitpoint.friendlyId; } else { waitId = this.resolversByWaitpoint.get(waitpoint.id); diff --git a/packages/core/src/v3/runtime/manager.ts b/packages/core/src/v3/runtime/manager.ts index a51dcf130d..ec7b5f1c18 100644 --- a/packages/core/src/v3/runtime/manager.ts +++ b/packages/core/src/v3/runtime/manager.ts @@ -2,22 +2,19 @@ import { BatchTaskRunExecutionResult, TaskRunContext, TaskRunExecutionResult, - WaitForWaitpointTokenRequestBody, WaitpointTokenResult, } from "../schemas/index.js"; export interface RuntimeManager { disable(): void; - waitUntil(date: Date): Promise; - waitForDuration(ms: number): Promise; waitForTask(params: { id: string; ctx: TaskRunContext }): Promise; waitForBatch(params: { id: string; runCount: number; ctx: TaskRunContext; }): Promise; - waitForToken( - waitpointFriendlyId: string, - options?: WaitForWaitpointTokenRequestBody - ): Promise; + waitForWaitpoint(params: { + waitpointFriendlyId: string; + finishDate?: Date; + }): Promise; } diff --git a/packages/core/src/v3/runtime/noopRuntimeManager.ts b/packages/core/src/v3/runtime/noopRuntimeManager.ts index 0a0addd23c..0650cf4e0d 100644 --- a/packages/core/src/v3/runtime/noopRuntimeManager.ts +++ b/packages/core/src/v3/runtime/noopRuntimeManager.ts @@ -3,7 +3,6 @@ import { TaskRunContext, TaskRunErrorCodes, TaskRunExecutionResult, - WaitForWaitpointTokenRequestBody, WaitpointTokenResult, } from "../schemas/index.js"; import { RuntimeManager } from "./manager.js"; @@ -13,12 +12,13 @@ export class NoopRuntimeManager implements RuntimeManager { // do nothing } - waitForDuration(ms: number): Promise { - return Promise.resolve(); - } - - waitUntil(date: Date): Promise { - return Promise.resolve(); + waitForWaitpoint(params: { + waitpointFriendlyId: string; + finishDate?: Date; + }): Promise { + return Promise.resolve({ + ok: true, + }); } waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { @@ -42,14 +42,4 @@ export class NoopRuntimeManager implements RuntimeManager { items: [], }); } - - waitForToken( - waitpointFriendlyId: string, - options?: WaitForWaitpointTokenRequestBody - ): Promise { - return Promise.resolve({ - ok: true, - outputType: "application/json", - }); - } } diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 9088bea9ce..5c05a881e9 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -944,6 +944,35 @@ export const WaitForWaitpointTokenResponseBody = z.object({ }); export type WaitForWaitpointTokenResponseBody = z.infer; +export const WaitForDurationRequestBody = z.object({ + /** + * An optional idempotency key for the waitpoint. + * If you use the same key twice (and the key hasn't expired), you will get the original waitpoint back. + * + * Note: This waitpoint may already be complete, in which case when you wait for it, it will immediately continue. + */ + idempotencyKey: z.string().optional(), + /** + * When set, this means the passed in idempotency key will expire after this time. + * This means after that time if you pass the same idempotency key again, you will get a new waitpoint. + */ + idempotencyKeyTTL: z.string().optional(), + date: z.coerce.date(), +}); +export type WaitForDurationRequestBody = z.infer; + +export const WaitForDurationResponseBody = z.object({ + /** + If you pass an idempotencyKey, you may actually not need to wait. + Use this date to determine when to continue. + */ + waitUntil: z.coerce.date(), + waitpoint: z.object({ + id: z.string(), + }), +}); +export type WaitForDurationResponseBody = z.infer; + export const WAITPOINT_TIMEOUT_ERROR_CODE = "TRIGGER_WAITPOINT_TIMEOUT"; export function isWaitpointOutputTimeout(output: string): boolean { diff --git a/packages/core/src/v3/schemas/runEngine.ts b/packages/core/src/v3/schemas/runEngine.ts index abb0da6f7c..a93ca96a13 100644 --- a/packages/core/src/v3/schemas/runEngine.ts +++ b/packages/core/src/v3/schemas/runEngine.ts @@ -247,20 +247,6 @@ export const RunExecutionData = z.object({ }); export type RunExecutionData = z.infer; -export const WaitForDurationResult = z - .object({ - /** - If you pass an idempotencyKey, you may actually not need to wait. - Use this date to determine when to continue. - */ - waitUntil: z.coerce.date(), - waitpoint: z.object({ - id: z.string(), - }), - }) - .and(ExecutionResult); -export type WaitForDurationResult = z.infer; - export const CreateCheckpointResult = z.discriminatedUnion("ok", [ z .object({ diff --git a/packages/trigger-sdk/src/v3/retry.ts b/packages/trigger-sdk/src/v3/retry.ts index ef8b81d887..65e3203f37 100644 --- a/packages/trigger-sdk/src/v3/retry.ts +++ b/packages/trigger-sdk/src/v3/retry.ts @@ -23,6 +23,7 @@ import { runtime, } from "@trigger.dev/core/v3"; import { tracer } from "./tracer.js"; +import { wait } from "./wait.js"; export type { RetryOptions }; @@ -97,7 +98,7 @@ function onThrow( innerSpan.setAttribute(SemanticInternalAttributes.RETRY_DELAY, `${nextRetryDelay}ms`); innerSpan.end(); - await runtime.waitForDuration(nextRetryDelay); + await wait.until({ date: new Date(Date.now() + nextRetryDelay) }); } finally { attempt++; } @@ -216,16 +217,14 @@ async function retryFetch( } if (nextRetry.type === "delay") { - span.setAttribute( - SemanticInternalAttributes.RETRY_AT, - new Date(Date.now() + nextRetry.value).toISOString() - ); + const continueDate = new Date(Date.now() + nextRetry.value); + span.setAttribute(SemanticInternalAttributes.RETRY_AT, continueDate.toISOString()); span.setAttribute(SemanticInternalAttributes.RETRY_COUNT, attempt); span.setAttribute(SemanticInternalAttributes.RETRY_DELAY, `${nextRetry.value}ms`); span.end(); - await runtime.waitForDuration(nextRetry.value); + await wait.until({ date: continueDate }); } else { const now = Date.now(); const nextRetryDate = new Date(nextRetry.value); @@ -246,7 +245,7 @@ async function retryFetch( span.end(); - await runtime.waitUntil(new Date(nextRetry.value)); + await wait.until({ date: new Date(nextRetry.value) }); } } catch (e) { if (e instanceof FetchErrorWithSpan && e.originalError instanceof Error) { @@ -266,16 +265,14 @@ async function retryFetch( throw e; } - e.span.setAttribute( - SemanticInternalAttributes.RETRY_AT, - new Date(Date.now() + nextRetryDelay).toISOString() - ); + const continueDate = new Date(Date.now() + nextRetryDelay); + e.span.setAttribute(SemanticInternalAttributes.RETRY_AT, continueDate.toISOString()); e.span.setAttribute(SemanticInternalAttributes.RETRY_COUNT, attempt); e.span.setAttribute(SemanticInternalAttributes.RETRY_DELAY, `${nextRetryDelay}ms`); e.span.end(); - await runtime.waitForDuration(nextRetryDelay); + await wait.until({ date: continueDate }); continue; // Move to the next attempt } else if ( @@ -311,7 +308,7 @@ async function retryFetch( e.span.end(); - await runtime.waitForDuration(nextRetryDelay); + await wait.until({ date: new Date(Date.now() + nextRetryDelay) }); continue; // Move to the next attempt } diff --git a/packages/trigger-sdk/src/v3/wait.ts b/packages/trigger-sdk/src/v3/wait.ts index 8cfa1d2839..52760d2f33 100644 --- a/packages/trigger-sdk/src/v3/wait.ts +++ b/packages/trigger-sdk/src/v3/wait.ts @@ -75,10 +75,27 @@ async function completeToken( requestOptions ); - return apiClient.completeResumeToken(tokenId, { data }, $requestOptions); + return apiClient.completeWaitpointToken(tokenId, { data }, $requestOptions); } -export type WaitOptions = +export type CommonWaitOptions = { + /** + * An optional idempotency key for the waitpoint. + * If you use the same key twice (and the key hasn't expired), you will get the original waitpoint back. + * + * Note: This waitpoint may already be complete, in which case when you wait for it, it will immediately continue. + */ + idempotencyKey?: string; + /** + * When set, this means the passed in idempotency key will expire after this time. + * This means after that time if you pass the same idempotency key again, you will get a new waitpoint. + */ + idempotencyKeyTTL?: string; +}; + +export type WaitForOptions = WaitPeriod & CommonWaitOptions; + +type WaitPeriod = | { seconds: number; } @@ -102,18 +119,33 @@ export type WaitOptions = }; export const wait = { - for: async (options: WaitOptions) => { + for: async (options: WaitForOptions) => { + const ctx = taskContext.ctx; + if (!ctx) { + throw new Error("wait.forToken can only be used from inside a task.run()"); + } + + const apiClient = apiClientManager.clientOrThrow(); + + const start = Date.now(); + const durationInMs = calculateDurationInMs(options); + const date = new Date(start + durationInMs); + const result = await apiClient.waitForDuration(ctx.run.id, { + date: date, + idempotencyKey: options.idempotencyKey, + idempotencyKeyTTL: options.idempotencyKeyTTL, + }); + return tracer.startActiveSpan( `wait.for()`, async (span) => { - const start = Date.now(); - const durationInMs = calculateDurationInMs(options); - - await runtime.waitForDuration(durationInMs); + await runtime.waitUntil(result.waitpoint.id, date); }, { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "wait", + [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", + [SemanticInternalAttributes.ENTITY_ID]: result.waitpoint.id, ...accessoryAttributes({ items: [ { @@ -127,23 +159,34 @@ export const wait = { } ); }, - until: async (options: { date: Date; throwIfInThePast?: boolean }) => { + until: async (options: { date: Date; throwIfInThePast?: boolean } & CommonWaitOptions) => { + const ctx = taskContext.ctx; + if (!ctx) { + throw new Error("wait.forToken can only be used from inside a task.run()"); + } + + const apiClient = apiClientManager.clientOrThrow(); + + const result = await apiClient.waitForDuration(ctx.run.id, { + date: options.date, + idempotencyKey: options.idempotencyKey, + idempotencyKeyTTL: options.idempotencyKeyTTL, + }); + return tracer.startActiveSpan( `wait.until()`, async (span) => { - const start = Date.now(); - if (options.throwIfInThePast && options.date < new Date()) { throw new Error("Date is in the past"); } - const durationInMs = options.date.getTime() - start; - - await runtime.waitForDuration(durationInMs); + await runtime.waitUntil(result.waitpoint.id, options.date); }, { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "wait", + [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", + [SemanticInternalAttributes.ENTITY_ID]: result.waitpoint.id, ...accessoryAttributes({ items: [ { @@ -182,7 +225,7 @@ export const wait = { throw new Error(`Failed to wait for wait token ${tokenId}`); } - const result = await runtime.waitForToken(tokenId, options); + const result = await runtime.waitUntil(tokenId); const data = result.output ? await conditionallyImportAndParsePacket( @@ -205,7 +248,7 @@ export const wait = { }, { attributes: { - [SemanticInternalAttributes.STYLE_ICON]: "wait-token", + [SemanticInternalAttributes.STYLE_ICON]: "wait", [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", [SemanticInternalAttributes.ENTITY_ID]: tokenId, id: tokenId, @@ -224,7 +267,7 @@ export const wait = { }, }; -function nameForWaitOptions(options: WaitOptions): string { +function nameForWaitOptions(options: WaitForOptions): string { if ("seconds" in options) { return options.seconds === 1 ? `1 second` : `${options.seconds} seconds`; } @@ -256,7 +299,7 @@ function nameForWaitOptions(options: WaitOptions): string { return "NaN"; } -function calculateDurationInMs(options: WaitOptions): number { +function calculateDurationInMs(options: WaitForOptions): number { if ("seconds" in options) { return options.seconds * 1000; } @@ -290,5 +333,5 @@ function calculateDurationInMs(options: WaitOptions): number { type RequestOptions = { to: (url: string) => Promise; - timeout: WaitOptions; + timeout: WaitForOptions; }; diff --git a/references/hello-world/src/trigger/wait-tokens.ts b/references/hello-world/src/trigger/waits.ts similarity index 74% rename from references/hello-world/src/trigger/wait-tokens.ts rename to references/hello-world/src/trigger/waits.ts index f9c79f69e3..4014dc2156 100644 --- a/references/hello-world/src/trigger/wait-tokens.ts +++ b/references/hello-world/src/trigger/waits.ts @@ -1,4 +1,4 @@ -import { logger, wait, task } from "@trigger.dev/sdk/v3"; +import { logger, wait, task, retry } from "@trigger.dev/sdk/v3"; type Token = { status: "approved" | "pending" | "rejected"; @@ -57,3 +57,22 @@ export const completeWaitToken = task({ await wait.completeToken(payload.token, { status: "approved" }); }, }); + +export const waitForDuration = task({ + id: "wait-duration", + run: async ({ duration = 4 }: { duration?: number }) => { + await wait.for({ seconds: duration }); + await wait.until({ date: new Date(Date.now() + duration * 1000) }); + + await retry.fetch("https://example.com/404/", { method: "GET" }); + + await retry.onThrow( + async () => { + throw new Error("This is an error"); + }, + { + maxAttempts: 3, + } + ); + }, +}); From 3db4a2917900e4465219f1a01b83a9a3a49c0d50 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 25 Feb 2025 19:33:57 +0000 Subject: [PATCH 463/485] Fix for engine.blockRunWithWaitpoint required org id --- .../app/v3/services/triggerTaskV2.server.ts | 1 + .../src/engine/tests/triggerAndWait.test.ts | 1 + .../src/engine/tests/waitpoints.test.ts | 126 +++++++++++------- packages/core/src/v3/apiClient/index.ts | 23 +++- 4 files changed, 98 insertions(+), 53 deletions(-) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 16a7737d02..e9447a99c2 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -160,6 +160,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { : undefined, environmentId: environment.id, projectId: environment.projectId, + organizationId: environment.organizationId, tx: this._prisma, }); } diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index e2baf33872..92c2cb12bc 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -373,6 +373,7 @@ describe("RunEngine triggerAndWait", () => { waitpoints: childRunWithWaitpoint.associatedWaitpoint!.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.project.id, + organizationId: authenticatedEnvironment.organizationId, tx: prisma, }); expect(blockedResult.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index b207baeff3..0692ba35c0 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -90,28 +90,38 @@ describe("RunEngine Waitpoints", () => { //waitForDuration const date = new Date(Date.now() + durationMs); - const result = await engine.waitForDuration({ + const { waitpoint } = await engine.createDateTimeWaitpoint({ + projectId: authenticatedEnvironment.project.id, + environmentId: authenticatedEnvironment.id, + completedAfter: date, + }); + expect(waitpoint.completedAfter!.toISOString()).toBe(date.toISOString()); + + const result = await engine.blockRunWithWaitpoint({ runId: run.id, - snapshotId: attemptResult.snapshot.id, - date, - releaseConcurrency: false, + waitpoints: [waitpoint.id], + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.project.id, + organizationId: authenticatedEnvironment.organization.id, + releaseConcurrency: { + releaseQueue: true, + }, }); - expect(result.waitUntil.toISOString()).toBe(date.toISOString()); - expect(result.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - expect(result.run.status).toBe("EXECUTING"); + expect(result.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + expect(result.runStatus).toBe("EXECUTING"); const executionData = await engine.getRunExecutionData({ runId: run.id }); expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); await setTimeout(2_000); - const waitpoint = await prisma.waitpoint.findFirst({ + const waitpoint2 = await prisma.waitpoint.findFirst({ where: { - id: result.waitpoint.id, + id: waitpoint.id, }, }); - expect(waitpoint?.status).toBe("COMPLETED"); - expect(waitpoint?.completedAt?.getTime()).toBeLessThanOrEqual(date.getTime() + 200); + expect(waitpoint2?.status).toBe("COMPLETED"); + expect(waitpoint2?.completedAt?.getTime()).toBeLessThanOrEqual(date.getTime() + 200); const executionDataAfter = await engine.getRunExecutionData({ runId: run.id }); expect(executionDataAfter?.snapshot.executionStatus).toBe("EXECUTING"); @@ -199,11 +209,19 @@ describe("RunEngine Waitpoints", () => { //waitForDuration const date = new Date(Date.now() + 60_000); - const result = await engine.waitForDuration({ + const { waitpoint } = await engine.createDateTimeWaitpoint({ + projectId: authenticatedEnvironment.project.id, + environmentId: authenticatedEnvironment.id, + completedAfter: date, + }); + expect(waitpoint.completedAfter!.toISOString()).toBe(date.toISOString()); + + const result = await engine.blockRunWithWaitpoint({ runId: run.id, - snapshotId: attemptResult.snapshot.id, - date, - releaseConcurrency: false, + waitpoints: [waitpoint.id], + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.project.id, + organizationId: authenticatedEnvironment.organization.id, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); @@ -335,18 +353,19 @@ describe("RunEngine Waitpoints", () => { expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); //create a manual waitpoint - const waitpoint = await engine.createManualWaitpoint({ + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, }); - expect(waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.status).toBe("PENDING"); //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); @@ -361,7 +380,7 @@ describe("RunEngine Waitpoints", () => { waitpoint: true, }, }); - expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; engine.eventBus.on("workerNotification", (result) => { @@ -370,7 +389,7 @@ describe("RunEngine Waitpoints", () => { //complete the waitpoint await engine.completeWaitpoint({ - id: waitpoint.id, + id: result.waitpoint.id, }); await setTimeout(200); @@ -476,7 +495,7 @@ describe("RunEngine Waitpoints", () => { expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); //create a manual waitpoint - const waitpoint = await engine.createManualWaitpoint({ + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, }); @@ -484,9 +503,10 @@ describe("RunEngine Waitpoints", () => { //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, //fail after 200ms failAfter: new Date(Date.now() + 200), }); @@ -600,7 +620,7 @@ describe("RunEngine Waitpoints", () => { const waitpointCount = 5; //create waitpoints - const waitpoints = await Promise.all( + const results = await Promise.all( Array.from({ length: waitpointCount }).map(() => engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, @@ -611,12 +631,13 @@ describe("RunEngine Waitpoints", () => { //block the run with them await Promise.all( - waitpoints.map((waitpoint) => + results.map((result) => engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }) ) ); @@ -637,9 +658,9 @@ describe("RunEngine Waitpoints", () => { //complete the waitpoints await Promise.all( - waitpoints.map((waitpoint) => + results.map((result) => engine.completeWaitpoint({ - id: waitpoint.id, + id: result.waitpoint.id, }) ) ); @@ -746,20 +767,21 @@ describe("RunEngine Waitpoints", () => { //create a manual waitpoint with timeout const timeout = new Date(Date.now() + 1_000); - const waitpoint = await engine.createManualWaitpoint({ + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, timeout, }); - expect(waitpoint.status).toBe("PENDING"); - expect(waitpoint.completedAfter).toStrictEqual(timeout); + expect(result.waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.completedAfter).toStrictEqual(timeout); //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); @@ -774,7 +796,7 @@ describe("RunEngine Waitpoints", () => { waitpoint: true, }, }); - expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; engine.eventBus.on("workerNotification", (result) => { @@ -803,7 +825,7 @@ describe("RunEngine Waitpoints", () => { const waitpoint2 = await prisma.waitpoint.findUnique({ where: { - id: waitpoint.id, + id: result.waitpoint.id, }, }); assertNonNullable(waitpoint2); @@ -898,21 +920,22 @@ describe("RunEngine Waitpoints", () => { const idempotencyKey = "a-key"; //create a manual waitpoint with timeout - const waitpoint = await engine.createManualWaitpoint({ + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, idempotencyKey, }); - expect(waitpoint.status).toBe("PENDING"); - expect(waitpoint.idempotencyKey).toBe(idempotencyKey); - expect(waitpoint.userProvidedIdempotencyKey).toBe(true); + expect(result.waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); + expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); @@ -927,7 +950,7 @@ describe("RunEngine Waitpoints", () => { waitpoint: true, }, }); - expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; engine.eventBus.on("workerNotification", (result) => { @@ -936,7 +959,7 @@ describe("RunEngine Waitpoints", () => { //complete the waitpoint await engine.completeWaitpoint({ - id: waitpoint.id, + id: result.waitpoint.id, }); await setTimeout(200); @@ -961,7 +984,7 @@ describe("RunEngine Waitpoints", () => { const waitpoint2 = await prisma.waitpoint.findUnique({ where: { - id: waitpoint.id, + id: result.waitpoint.id, }, }); assertNonNullable(waitpoint2); @@ -1053,30 +1076,31 @@ describe("RunEngine Waitpoints", () => { const idempotencyKey = "a-key"; //create a manual waitpoint with timeout - const waitpoint = await engine.createManualWaitpoint({ + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, idempotencyKey, idempotencyKeyExpiresAt: new Date(Date.now() + 200), }); - expect(waitpoint.status).toBe("PENDING"); - expect(waitpoint.idempotencyKey).toBe(idempotencyKey); - expect(waitpoint.userProvidedIdempotencyKey).toBe(true); + expect(result.waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); + expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); - const sameWaitpoint = await engine.createManualWaitpoint({ + const sameWaitpointResult = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, idempotencyKey, idempotencyKeyExpiresAt: new Date(Date.now() + 200), }); - expect(sameWaitpoint.id).toBe(waitpoint.id); + expect(sameWaitpointResult.waitpoint.id).toBe(result.waitpoint.id); //block the run await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: waitpoint.id, + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); @@ -1091,7 +1115,7 @@ describe("RunEngine Waitpoints", () => { waitpoint: true, }, }); - expect(runWaitpointBefore?.waitpointId).toBe(waitpoint.id); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; engine.eventBus.on("workerNotification", (result) => { @@ -1100,7 +1124,7 @@ describe("RunEngine Waitpoints", () => { //complete the waitpoint await engine.completeWaitpoint({ - id: waitpoint.id, + id: result.waitpoint.id, }); await setTimeout(200); @@ -1125,7 +1149,7 @@ describe("RunEngine Waitpoints", () => { const waitpoint2 = await prisma.waitpoint.findUnique({ where: { - id: waitpoint.id, + id: result.waitpoint.id, }, }); assertNonNullable(waitpoint2); diff --git a/packages/core/src/v3/apiClient/index.ts b/packages/core/src/v3/apiClient/index.ts index f9a197c23f..d1ab68d341 100644 --- a/packages/core/src/v3/apiClient/index.ts +++ b/packages/core/src/v3/apiClient/index.ts @@ -32,6 +32,8 @@ import { UpdateMetadataRequestBody, UpdateMetadataResponseBody, UpdateScheduleOptions, + WaitForDurationRequestBody, + WaitForDurationResponseBody, WaitForWaitpointTokenRequestBody, WaitForWaitpointTokenResponseBody, } from "../schemas/index.js"; @@ -654,7 +656,7 @@ export class ApiClient { ); } - completeResumeToken( + completeWaitpointToken( friendlyId: string, options: CompleteWaitpointTokenRequestBody, requestOptions?: ZodFetchOptions @@ -679,7 +681,7 @@ export class ApiClient { ) { return zodfetch( WaitForWaitpointTokenResponseBody, - `${this.baseUrl}/api/v1/runs/${runFriendlyId}/waitpoints/tokens/${waitpointFriendlyId}/wait`, + `${this.baseUrl}/engine/v1/runs/${runFriendlyId}/waitpoints/tokens/${waitpointFriendlyId}/wait`, { method: "POST", headers: this.#getHeaders(false), @@ -689,6 +691,23 @@ export class ApiClient { ); } + async waitForDuration( + runId: string, + body: WaitForDurationRequestBody, + requestOptions?: ZodFetchOptions + ) { + return zodfetch( + WaitForDurationResponseBody, + `${this.baseUrl}/engine/v1/runs/${runId}/wait/duration`, + { + method: "POST", + headers: this.#getHeaders(false), + body: JSON.stringify(body), + }, + mergeRequestOptions(this.defaultRequestOptions, requestOptions) + ); + } + subscribeToRun( runId: string, options?: { From 32b89a2ec5e92ef945e96d90d83f854e7530ca75 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 25 Feb 2025 19:41:53 +0000 Subject: [PATCH 464/485] Removed old wait code from the run controllers/task run process --- .../src/entryPoints/dev-run-controller.ts | 39 +------------------ .../src/entryPoints/managed-run-controller.ts | 39 +------------------ .../cli-v3/src/executions/taskRunProcess.ts | 7 ---- 3 files changed, 2 insertions(+), 83 deletions(-) diff --git a/packages/cli-v3/src/entryPoints/dev-run-controller.ts b/packages/cli-v3/src/entryPoints/dev-run-controller.ts index e84a9a94f5..46cfdfc0a8 100644 --- a/packages/cli-v3/src/entryPoints/dev-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/dev-run-controller.ts @@ -11,7 +11,7 @@ import { import { type WorkloadRunAttemptStartResponseBody } from "@trigger.dev/core/v3/workers"; import { setTimeout as sleep } from "timers/promises"; import { CliApiClient } from "../apiClient.js"; -import { OnWaitMessage, TaskRunProcess } from "../executions/taskRunProcess.js"; +import { TaskRunProcess } from "../executions/taskRunProcess.js"; import { assertExhaustive } from "../utilities/assertExhaustive.js"; import { logger } from "../utilities/logger.js"; import { sanitizeEnvVars } from "../utilities/sanitizeEnvVars.js"; @@ -598,8 +598,6 @@ export class DevRunController { messageId: run.friendlyId, }); - this.taskRunProcess.onWait.attach(this.handleWait.bind(this)); - await this.taskRunProcess.initialize(); logger.debug("executing task run process", { @@ -729,41 +727,6 @@ export class DevRunController { assertExhaustive(attemptStatus); } - private async handleWait({ wait }: OnWaitMessage) { - if (!this.runFriendlyId || !this.snapshotFriendlyId) { - logger.debug("[DevRunController] Ignoring wait, no run ID or snapshot ID"); - return; - } - - switch (wait.type) { - case "DATETIME": { - logger.debug("Waiting for duration", { wait }); - - const waitpoint = await this.httpClient.dev.waitForDuration( - this.runFriendlyId, - this.snapshotFriendlyId, - { - date: wait.date, - } - ); - - if (!waitpoint.success) { - logger.debug("Failed to wait for datetime", { error: waitpoint.error }); - return; - } - - logger.debug("Waitpoint created", { waitpointData: waitpoint.data }); - - this.taskRunProcess?.waitpointCreated(wait.id, waitpoint.data.waitpoint.id); - - break; - } - default: { - logger.debug("Wait type not implemented", { wait }); - } - } - } - private async runFinished() { // Kill the run process try { diff --git a/packages/cli-v3/src/entryPoints/managed-run-controller.ts b/packages/cli-v3/src/entryPoints/managed-run-controller.ts index 691ba02758..91c9863dbe 100644 --- a/packages/cli-v3/src/entryPoints/managed-run-controller.ts +++ b/packages/cli-v3/src/entryPoints/managed-run-controller.ts @@ -1,5 +1,5 @@ import { logger } from "../utilities/logger.js"; -import { OnWaitMessage, TaskRunProcess } from "../executions/taskRunProcess.js"; +import { TaskRunProcess } from "../executions/taskRunProcess.js"; import { env as stdEnv } from "std-env"; import { z } from "zod"; import { randomUUID } from "crypto"; @@ -832,8 +832,6 @@ class ManagedRunController { messageId: run.friendlyId, }); - this.taskRunProcess.onWait.attach(this.handleWait.bind(this)); - await this.taskRunProcess.initialize(); logger.log("executing task run process", { @@ -953,41 +951,6 @@ class ManagedRunController { assertExhaustive(attemptStatus); } - private async handleWait({ wait }: OnWaitMessage) { - if (!this.runFriendlyId || !this.snapshotFriendlyId) { - logger.debug("[ManagedRunController] Ignoring wait, no run ID or snapshot ID"); - return; - } - - switch (wait.type) { - case "DATETIME": { - logger.log("Waiting for duration", { wait }); - - const waitpoint = await this.httpClient.waitForDuration( - this.runFriendlyId, - this.snapshotFriendlyId, - { - date: wait.date, - } - ); - - if (!waitpoint.success) { - console.error("Failed to wait for datetime", { error: waitpoint.error }); - return; - } - - logger.log("Waitpoint created", { waitpointData: waitpoint.data }); - - this.taskRunProcess?.waitpointCreated(wait.id, waitpoint.data.waitpoint.id); - - break; - } - default: { - console.error("Wait type not implemented", { wait }); - } - } - } - async cancelAttempt(runId: string) { logger.log("cancelling attempt", { runId }); diff --git a/packages/cli-v3/src/executions/taskRunProcess.ts b/packages/cli-v3/src/executions/taskRunProcess.ts index 118467ef4a..7d2f958d2f 100644 --- a/packages/cli-v3/src/executions/taskRunProcess.ts +++ b/packages/cli-v3/src/executions/taskRunProcess.ts @@ -74,7 +74,6 @@ export class TaskRunProcess { public onIsBeingKilled: Evt = new Evt(); public onReadyToDispose: Evt = new Evt(); - public onWaitForDuration: Evt = new Evt(); public onWaitForTask: Evt = new Evt(); public onWaitForBatch: Evt = new Evt(); public onWait: Evt = new Evt(); @@ -186,15 +185,9 @@ export class TaskRunProcess { WAIT_FOR_BATCH: async (message) => { this.onWaitForBatch.post(message); }, - WAIT_FOR_DURATION: async (message) => { - this.onWaitForDuration.post(message); - }, UNCAUGHT_EXCEPTION: async (message) => { logger.debug(`[${this.runId}] uncaught exception in task run process`, { ...message }); }, - WAIT: async (message) => { - this.onWait.post(message); - }, }, }); From 5031fd99f88eb13cfaca15fb0dd33e0615f15edc Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 09:32:46 +0000 Subject: [PATCH 465/485] Form action for skipping a datetime waitpoint --- .../app/presenters/v3/SpanPresenter.server.ts | 1 + .../route.tsx | 287 +++++++++++------- 2 files changed, 184 insertions(+), 104 deletions(-) diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index 5363064137..990607bfa8 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -447,6 +447,7 @@ export class SpanPresenter extends BasePresenter { output: true, outputType: true, outputIsError: true, + completedAfter: true, }, }); diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index c62d19cf9a..c9f275179f 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -1,7 +1,8 @@ import { parse } from "@conform-to/zod"; import { InformationCircleIcon } from "@heroicons/react/20/solid"; -import { Form, useNavigation, useSubmit } from "@remix-run/react"; +import { Form, useLocation, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; +import { WaitpointId } from "@trigger.dev/core/v3/apps"; import { Waitpoint } from "@trigger.dev/database"; import { motion } from "framer-motion"; import { useCallback, useRef } from "react"; @@ -13,13 +14,14 @@ import { Button } from "~/components/primitives/Buttons"; import { DateTime } from "~/components/primitives/DateTime"; import { Paragraph } from "~/components/primitives/Paragraph"; import { LiveCountdown } from "~/components/runs/v3/LiveTimer"; -import { prisma } from "~/db.server"; +import { $replica, prisma } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; import { logger } from "~/services/logger.server"; import { requireUserId } from "~/services/session.server"; -import { ProjectParamSchema, v3SchedulesPath } from "~/utils/pathBuilder"; +import { ProjectParamSchema, v3RunsPath, v3SchedulesPath } from "~/utils/pathBuilder"; +import { engine } from "~/v3/runEngine.server"; import { UpsertSchedule } from "~/v3/schedules"; import { UpsertTaskScheduleService } from "~/v3/services/upsertTaskSchedule.server"; @@ -27,18 +29,26 @@ const CompleteWaitpointFormData = z.discriminatedUnion("type", [ z.object({ type: z.literal("MANUAL"), payload: z.string(), + successRedirect: z.string(), + failureRedirect: z.string(), }), z.object({ type: z.literal("DATETIME"), + successRedirect: z.string(), + failureRedirect: z.string(), }), ]); +const Params = ProjectParamSchema.extend({ + waitpointFriendlyId: z.string(), +}); + export const action = async ({ request, params }: ActionFunctionArgs) => { const userId = await requireUserId(request); - const { organizationSlug, projectParam } = ProjectParamSchema.parse(params); + const { organizationSlug, projectParam, waitpointFriendlyId } = Params.parse(params); const formData = await request.formData(); - const submission = parse(formData, { schema: UpsertSchedule }); + const submission = parse(formData, { schema: CompleteWaitpointFormData }); if (!submission.value) { return json(submission); @@ -46,7 +56,7 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { try { //first check that the user has access to the project - const project = await prisma.project.findUnique({ + const project = await $replica.project.findUnique({ where: { slug: projectParam, organization: { @@ -64,27 +74,49 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { throw new Error("Project not found"); } - const createSchedule = new UpsertTaskScheduleService(); - const result = await createSchedule.call(project.id, submission.value); + switch (submission.value.type) { + case "DATETIME": { + const waitpointId = WaitpointId.toId(waitpointFriendlyId); - return redirectWithSuccessMessage( - v3SchedulesPath({ slug: organizationSlug }, { slug: projectParam }), - request, - submission.value?.friendlyId === result.id ? "Schedule updated" : "Schedule created" - ); + const waitpoint = await $replica.waitpoint.findFirst({ + select: { + projectId: true, + }, + where: { + id: waitpointId, + }, + }); + + if (waitpoint?.projectId !== project.id) { + throw new Error("Waitpoint not found"); + } + + const result = await engine.completeWaitpoint({ + id: waitpointId, + }); + + return redirectWithSuccessMessage( + submission.value.successRedirect, + request, + "Waitpoint skipped" + ); + } + case "MANUAL": { + } + } } catch (error: any) { - logger.error("Failed to create schedule", error); + logger.error("Failed to complete waitpoint", error); const errorMessage = `Something went wrong. Please try again.`; return redirectWithErrorMessage( - v3SchedulesPath({ slug: organizationSlug }, { slug: projectParam }), + v3RunsPath({ slug: organizationSlug }, { slug: projectParam }), request, errorMessage ); } }; -type FormWaitpoint = Pick; +type FormWaitpoint = Pick; export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint }) { const navigation = useNavigation(); @@ -115,101 +147,148 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint [currentJson] ); - const endTime = new Date(Date.now() + 60_000 * 113); - return (
-
submitForm(e)} - className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" - > -
- Manually complete this waitpoint -
-
- -
- { - currentJson.current = v; - }} - showClearButton={false} - showCopyButton={false} - height="100%" - min-height="100%" - max-height="100%" - /> -
-
-
-
- -
-
-
- - - To complete this waitpoint in your code use: - - } - code={` + {waitpoint.type === "DATETIME" ? ( + waitpoint.completedAfter ? ( + + ) : ( + <>Waitpoint doesn't have a complete date + ) + ) : ( + <> +
submitForm(e)} + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" + > + +
+ Manually complete this waitpoint +
+
+
+ { + currentJson.current = v; + }} + showClearButton={false} + showCopyButton={false} + height="100%" + min-height="100%" + max-height="100%" + /> +
+
+
+
+ +
+
+
+ + + To complete this waitpoint in your code use: + + } + code={` await wait.completeToken(tokenId, output );`} - showLineNumbers={false} - /> -
submitForm(e)} - className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" - > -
- Manually skip this waitpoint -
-
- -
-
- - - - -
- + showLineNumbers={false} + /> + + )} +
+ ); +} + +function CompleteDateTimeWaitpointForm({ + waitpoint, +}: { + waitpoint: { friendlyId: string; completedAfter: Date }; +}) { + const location = useLocation(); + const navigation = useNavigation(); + const submit = useSubmit(); + const isLoading = navigation.state !== "idle"; + const organization = useOrganization(); + const project = useProject(); + + const timeToComplete = waitpoint.completedAfter.getTime() - Date.now(); + if (timeToComplete < 0) { + return ( +
+ Waitpoint completed +
+ ); + } + + return ( + +
+ Manually skip this waitpoint +
+
+ + + +
+
+ + + +
+
-
-
- -
+
+
+
+
- -
+
+ ); } From 153639f32b1d5b08567f48652d34be6aac101521 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 10:45:01 +0000 Subject: [PATCH 466/485] =?UTF-8?q?Move=20testDockerCheckpoint=20to=20a=20?= =?UTF-8?q?separate=20core=20package=20export=20(it=20can=E2=80=99t=20be?= =?UTF-8?q?=20bundled=20on=20the=20client)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- apps/coordinator/src/checkpointer.ts | 2 +- apps/docker-provider/src/index.ts | 3 ++- packages/core/package.json | 17 ++++++++++++++++- packages/core/src/v3/apps/index.ts | 1 - .../src/v3/{apps => checkpoints}/checkpoints.ts | 2 +- packages/core/src/v3/checkpoints/index.ts | 1 + 6 files changed, 21 insertions(+), 5 deletions(-) rename packages/core/src/v3/{apps => checkpoints}/checkpoints.ts (95%) create mode 100644 packages/core/src/v3/checkpoints/index.ts diff --git a/apps/coordinator/src/checkpointer.ts b/apps/coordinator/src/checkpointer.ts index f6468e5c2e..a7b2efd176 100644 --- a/apps/coordinator/src/checkpointer.ts +++ b/apps/coordinator/src/checkpointer.ts @@ -1,5 +1,5 @@ import { ExponentialBackoff } from "@trigger.dev/core/v3/apps"; -import { testDockerCheckpoint } from "@trigger.dev/core/v3/apps"; +import { testDockerCheckpoint } from "@trigger.dev/core/v3/checkpoints"; import { nanoid } from "nanoid"; import fs from "node:fs/promises"; import { ChaosMonkey } from "./chaosMonkey"; diff --git a/apps/docker-provider/src/index.ts b/apps/docker-provider/src/index.ts index 9a4ea0160c..e588f1747e 100644 --- a/apps/docker-provider/src/index.ts +++ b/apps/docker-provider/src/index.ts @@ -7,7 +7,8 @@ import { TaskOperationsRestoreOptions, } from "@trigger.dev/core/v3/apps"; import { SimpleLogger } from "@trigger.dev/core/v3/apps"; -import { isExecaChildProcess, testDockerCheckpoint } from "@trigger.dev/core/v3/apps"; +import { isExecaChildProcess } from "@trigger.dev/core/v3/apps"; +import { testDockerCheckpoint } from "@trigger.dev/core/v3/checkpoints"; import { setTimeout } from "node:timers/promises"; import { PostStartCauses, PreStopCauses } from "@trigger.dev/core/v3"; diff --git a/packages/core/package.json b/packages/core/package.json index bd00ffb336..adc8f19185 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -58,7 +58,8 @@ "./v3/prod": "./src/v3/prod/index.ts", "./v3/workers": "./src/v3/workers/index.ts", "./v3/schemas": "./src/v3/schemas/index.ts", - "./v3/runEngineWorker": "./src/v3/runEngineWorker/index.ts" + "./v3/runEngineWorker": "./src/v3/runEngineWorker/index.ts", + "./v3/checkpoints": "./src/v3/checkpoints/index.ts" }, "sourceDialects": [ "@triggerdotdev/source" @@ -176,6 +177,9 @@ ], "v3/runEngineWorker": [ "dist/commonjs/v3/runEngineWorker/index.d.ts" + ], + "v3/checkpoints": [ + "dist/commonjs/v3/checkpoints/index.d.ts" ] } }, @@ -658,6 +662,17 @@ "types": "./dist/commonjs/v3/runEngineWorker/index.d.ts", "default": "./dist/commonjs/v3/runEngineWorker/index.js" } + }, + "./v3/checkpoints": { + "import": { + "@triggerdotdev/source": "./src/v3/checkpoints/index.ts", + "types": "./dist/esm/v3/checkpoints/index.d.ts", + "default": "./dist/esm/v3/checkpoints/index.js" + }, + "require": { + "types": "./dist/commonjs/v3/checkpoints/index.d.ts", + "default": "./dist/commonjs/v3/checkpoints/index.js" + } } }, "type": "module", diff --git a/packages/core/src/v3/apps/index.ts b/packages/core/src/v3/apps/index.ts index b30778299b..93a1ac2876 100644 --- a/packages/core/src/v3/apps/index.ts +++ b/packages/core/src/v3/apps/index.ts @@ -1,7 +1,6 @@ export * from "./backoff.js"; export * from "./logger.js"; export * from "./process.js"; -export * from "./checkpoints.js"; export * from "./http.js"; export * from "./provider.js"; export * from "./isExecaChildProcess.js"; diff --git a/packages/core/src/v3/apps/checkpoints.ts b/packages/core/src/v3/checkpoints/checkpoints.ts similarity index 95% rename from packages/core/src/v3/apps/checkpoints.ts rename to packages/core/src/v3/checkpoints/checkpoints.ts index 293cfc4781..4778b07bcd 100644 --- a/packages/core/src/v3/apps/checkpoints.ts +++ b/packages/core/src/v3/checkpoints/checkpoints.ts @@ -1,5 +1,5 @@ import { randomUUID } from "node:crypto"; -import { isExecaChildProcess } from "./isExecaChildProcess.js"; +import { isExecaChildProcess } from "../apps/isExecaChildProcess.js"; export type CheckpointTestResult = | { diff --git a/packages/core/src/v3/checkpoints/index.ts b/packages/core/src/v3/checkpoints/index.ts new file mode 100644 index 0000000000..c5f3d550a8 --- /dev/null +++ b/packages/core/src/v3/checkpoints/index.ts @@ -0,0 +1 @@ +export * from "./checkpoints.js"; From 31293b96e25216d674d336695ee861dbba2da867 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 10:53:32 +0000 Subject: [PATCH 467/485] Fix for glitchy hourglass animation --- .../assets/icons/AnimatedHourglassIcon.tsx | 2 +- .../route.tsx | 214 ++++++++++-------- 2 files changed, 122 insertions(+), 94 deletions(-) diff --git a/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx b/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx index f28a7d6870..3c94426fa0 100644 --- a/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx +++ b/apps/webapp/app/assets/icons/AnimatedHourglassIcon.tsx @@ -21,7 +21,7 @@ export function AnimatedHourglassIcon({ ], { repeat: Infinity, delay } ); - }); + }, []); return ; } diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index c9f275179f..0410985969 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -74,23 +74,27 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { throw new Error("Project not found"); } - switch (submission.value.type) { - case "DATETIME": { - const waitpointId = WaitpointId.toId(waitpointFriendlyId); + const waitpointId = WaitpointId.toId(waitpointFriendlyId); - const waitpoint = await $replica.waitpoint.findFirst({ - select: { - projectId: true, - }, - where: { - id: waitpointId, - }, - }); + const waitpoint = await $replica.waitpoint.findFirst({ + select: { + projectId: true, + }, + where: { + id: waitpointId, + }, + }); - if (waitpoint?.projectId !== project.id) { - throw new Error("Waitpoint not found"); - } + if (waitpoint?.projectId !== project.id) { + return redirectWithErrorMessage( + submission.value.failureRedirect, + request, + "No waitpoint found" + ); + } + switch (submission.value.type) { + case "DATETIME": { const result = await engine.completeWaitpoint({ id: waitpointId, }); @@ -102,6 +106,8 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { ); } case "MANUAL": { + //todo packet + //todo completeWaitpoint } } } catch (error: any) { @@ -124,28 +130,6 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint const isLoading = navigation.state !== "idle"; const organization = useOrganization(); const project = useProject(); - const currentJson = useRef("{\n\n}"); - const formAction = `/resources/orgs/${organization.slug}/projects/${project.slug}/waitpoints/${waitpoint.friendlyId}/complete`; - - const submitForm = useCallback( - (e: React.FormEvent) => { - const formData = new FormData(e.currentTarget); - const data: Record = { - type: formData.get("type") as string, - failedRedirect: formData.get("failedRedirect") as string, - successRedirect: formData.get("failedRedirect") as string, - }; - - data.payload = currentJson.current; - - submit(data, { - action: formAction, - method: "post", - }); - e.preventDefault(); - }, - [currentJson] - ); return (
@@ -161,62 +145,7 @@ export function CompleteWaitpointForm({ waitpoint }: { waitpoint: FormWaitpoint <>Waitpoint doesn't have a complete date ) ) : ( - <> -
submitForm(e)} - className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" - > - -
- Manually complete this waitpoint -
-
-
- { - currentJson.current = v; - }} - showClearButton={false} - showCopyButton={false} - height="100%" - min-height="100%" - max-height="100%" - /> -
-
-
-
- -
-
-
- - - To complete this waitpoint in your code use: - - } - code={` -await wait.completeToken(tokenId, - output -);`} - showLineNumbers={false} - /> - + )}
); @@ -229,7 +158,6 @@ function CompleteDateTimeWaitpointForm({ }) { const location = useLocation(); const navigation = useNavigation(); - const submit = useSubmit(); const isLoading = navigation.state !== "idle"; const organization = useOrganization(); const project = useProject(); @@ -292,3 +220,103 @@ function CompleteDateTimeWaitpointForm({ ); } + +function CompleteManualWaitpointForm({ waitpoint }: { waitpoint: { friendlyId: string } }) { + const location = useLocation(); + const navigation = useNavigation(); + const submit = useSubmit(); + const isLoading = navigation.state !== "idle"; + const organization = useOrganization(); + const project = useProject(); + const currentJson = useRef("{\n\n}"); + const formAction = `/resources/orgs/${organization.slug}/projects/${project.slug}/waitpoints/${waitpoint.friendlyId}/complete`; + + const submitForm = useCallback( + (e: React.FormEvent) => { + const formData = new FormData(e.currentTarget); + const data: Record = { + type: formData.get("type") as string, + failureRedirect: formData.get("failureRedirect") as string, + successRedirect: formData.get("successRedirect") as string, + }; + + data.payload = currentJson.current; + + submit(data, { + action: formAction, + method: "post", + }); + e.preventDefault(); + }, + [currentJson] + ); + + return ( + <> +
submitForm(e)} + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" + > + + + +
+ Manually complete this waitpoint +
+
+
+ { + currentJson.current = v; + }} + showClearButton={false} + showCopyButton={false} + height="100%" + min-height="100%" + max-height="100%" + /> +
+
+
+
+ +
+
+
+ + + To complete this waitpoint in your code use: + + } + code={` +await wait.completeToken(tokenId, +output +);`} + showLineNumbers={false} + /> + + ); +} From 53b8ecf209782321cd828428ad12ba7a6c4d1e2a Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 10:53:40 +0000 Subject: [PATCH 468/485] Completed waitpoints display better --- .../route.tsx | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 30160d207f..00fa294e46 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -1136,18 +1136,25 @@ function SpanEntity({ span }: { span: Span }) {
+ {span.waitpoint.status === "PENDING" ? ( + + ) : span.waitpoint.output ? ( + + ) : span.waitpoint.completedAfter ? ( + + Completed at + + + + + ) : ( + "Completed with no output" + )} - {span.waitpoint.status === "PENDING" ? ( - - ) : span.waitpoint.output ? ( - - ) : ( - "No output" - )} ); } From 14468c93e5a9ce01d9b17b190b2fd4174d489173 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 14:31:59 +0000 Subject: [PATCH 469/485] Increase Redis maxRetriesPerRequest to 20 (default) --- internal-packages/redis-worker/src/queue.ts | 2 +- internal-packages/testcontainers/src/index.ts | 2 +- internal-packages/testcontainers/src/utils.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal-packages/redis-worker/src/queue.ts b/internal-packages/redis-worker/src/queue.ts index 94309aaaa7..0eaae13150 100644 --- a/internal-packages/redis-worker/src/queue.ts +++ b/internal-packages/redis-worker/src/queue.ts @@ -57,7 +57,7 @@ export class SimpleQueue { const delay = Math.min(times * 50, 1000); return delay; }, - maxRetriesPerRequest: 3, + maxRetriesPerRequest: 20, }); this.#registerCommands(); this.schema = schema; diff --git a/internal-packages/testcontainers/src/index.ts b/internal-packages/testcontainers/src/index.ts index 5e3a675ca3..b21ba9b04e 100644 --- a/internal-packages/testcontainers/src/index.ts +++ b/internal-packages/testcontainers/src/index.ts @@ -97,7 +97,7 @@ const redisOptions = async ( host: redisContainer.getHost(), port: redisContainer.getPort(), password: redisContainer.getPassword(), - maxRetriesPerRequest: 3, // Lower the retry attempts + maxRetriesPerRequest: 20, // Lower the retry attempts retryStrategy(times) { const delay = Math.min(times * 50, 2000); return delay; diff --git a/internal-packages/testcontainers/src/utils.ts b/internal-packages/testcontainers/src/utils.ts index 7d341c3c99..d4e81b6c54 100644 --- a/internal-packages/testcontainers/src/utils.ts +++ b/internal-packages/testcontainers/src/utils.ts @@ -79,7 +79,7 @@ async function verifyRedisConnection(container: StartedRedisContainer) { host: container.getHost(), port: container.getPort(), password: container.getPassword(), - maxRetriesPerRequest: 3, + maxRetriesPerRequest: 20, connectTimeout: 10000, retryStrategy(times) { const delay = Math.min(times * 50, 2000); From 9c28bde7e2c21dc2f3a6cb0b0339c229093ea0af Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 14:32:37 +0000 Subject: [PATCH 470/485] Completing and skipping waitpoints is working --- .../app/presenters/v3/SpanPresenter.server.ts | 40 ++- ...ts.tokens.$waitpointFriendlyId.complete.ts | 15 +- .../route.tsx | 42 +++- .../route.tsx | 227 +++++++++--------- references/hello-world/src/trigger/waits.ts | 2 +- 5 files changed, 191 insertions(+), 135 deletions(-) diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index 990607bfa8..2d61e0db08 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -11,6 +11,7 @@ import { machinePresetFromName } from "~/v3/machinePresets.server"; import { FINAL_ATTEMPT_STATUSES, isFailedRunStatus, isFinalRunStatus } from "~/v3/taskStatus"; import { BasePresenter } from "./basePresenter.server"; import { getMaxDuration } from "@trigger.dev/core/v3/apps"; +import { logger } from "~/services/logger.server"; type Result = Awaited>; export type Span = NonNullable["span"]>; @@ -43,6 +44,7 @@ export class SpanPresenter extends BasePresenter { const parentRun = await this._prisma.taskRun.findFirst({ select: { traceId: true, + runtimeEnvironmentId: true, }, where: { friendlyId: runFriendlyId, @@ -64,7 +66,7 @@ export class SpanPresenter extends BasePresenter { } //get the run - const span = await this.getSpan(traceId, spanId); + const span = await this.getSpan(traceId, spanId, parentRun.runtimeEnvironmentId); if (!span) { throw new Error("Span not found"); @@ -399,7 +401,7 @@ export class SpanPresenter extends BasePresenter { }; } - async getSpan(traceId: string, spanId: string) { + async getSpan(traceId: string, spanId: string, environmentId: string) { const span = await eventRepository.getSpan(spanId, traceId); if (!span) { return; @@ -451,14 +453,42 @@ export class SpanPresenter extends BasePresenter { }, }); + if (!waitpoint) { + logger.error(`SpanPresenter: Waitpoint not found`, { + spanId, + waitpointFriendlyId: span.entity.id, + }); + return { ...data, entity: null }; + } + + const output = + waitpoint.outputType === "application/store" + ? `/resources/packets/${environmentId}/${waitpoint.output}` + : typeof waitpoint.output !== "undefined" && waitpoint.output !== null + ? await prettyPrintPacket(waitpoint.output, waitpoint.outputType ?? undefined) + : undefined; + return { ...data, - entityType: "waitpoint" as const, - waitpoint, + entity: { + type: "waitpoint" as const, + object: { + friendlyId: waitpoint.friendlyId, + type: waitpoint.type, + status: waitpoint.status, + idempotencyKey: waitpoint.idempotencyKey, + userProvidedIdempotencyKey: waitpoint.userProvidedIdempotencyKey, + idempotencyKeyExpiresAt: waitpoint.idempotencyKeyExpiresAt, + output: output, + outputType: waitpoint.outputType, + outputIsError: waitpoint.outputIsError, + completedAfter: waitpoint.completedAfter, + }, + }, }; default: - return { ...data, entityType: "span" as const }; + return { ...data, entity: null }; } } } diff --git a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts index ab3fbe33b9..e7109f67b2 100644 --- a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts +++ b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts @@ -8,6 +8,7 @@ import { import { WaitpointId } from "@trigger.dev/core/v3/apps"; import { z } from "zod"; import { $replica } from "~/db.server"; +import { env } from "~/env.server"; import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; import { engine } from "~/v3/runEngine.server"; @@ -18,7 +19,7 @@ const { action } = createActionApiRoute( waitpointFriendlyId: z.string(), }), body: CompleteWaitpointTokenRequestBody, - maxContentLength: 1024 * 10, // 10KB + maxContentLength: env.TASK_PAYLOAD_MAXIMUM_SIZE, method: "POST", }, async ({ authentication, body, params }) => { @@ -26,12 +27,6 @@ const { action } = createActionApiRoute( const waitpointId = WaitpointId.toId(params.waitpointFriendlyId); try { - const stringifiedData = await stringifyIO(body.data); - const finalData = await conditionallyExportPacket( - stringifiedData, - `${waitpointId}/waitpoint/token` - ); - //check permissions const waitpoint = await $replica.waitpoint.findFirst({ where: { @@ -44,6 +39,12 @@ const { action } = createActionApiRoute( throw json({ error: "Waitpoint not found" }, { status: 404 }); } + const stringifiedData = await stringifyIO(body.data); + const finalData = await conditionallyExportPacket( + stringifiedData, + `${waitpointId}/waitpoint/token` + ); + const result = await engine.completeWaitpoint({ id: waitpointId, output: finalData.data diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 0410985969..59dc23ce88 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -2,9 +2,9 @@ import { parse } from "@conform-to/zod"; import { InformationCircleIcon } from "@heroicons/react/20/solid"; import { Form, useLocation, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; +import { conditionallyExportPacket, stringifyIO } from "@trigger.dev/core/v3"; import { WaitpointId } from "@trigger.dev/core/v3/apps"; import { Waitpoint } from "@trigger.dev/database"; -import { motion } from "framer-motion"; import { useCallback, useRef } from "react"; import { z } from "zod"; import { AnimatedHourglassIcon } from "~/assets/icons/AnimatedHourglassIcon"; @@ -14,16 +14,14 @@ import { Button } from "~/components/primitives/Buttons"; import { DateTime } from "~/components/primitives/DateTime"; import { Paragraph } from "~/components/primitives/Paragraph"; import { LiveCountdown } from "~/components/runs/v3/LiveTimer"; -import { $replica, prisma } from "~/db.server"; +import { $replica } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; import { useProject } from "~/hooks/useProject"; import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; import { logger } from "~/services/logger.server"; import { requireUserId } from "~/services/session.server"; -import { ProjectParamSchema, v3RunsPath, v3SchedulesPath } from "~/utils/pathBuilder"; +import { ProjectParamSchema, v3RunsPath } from "~/utils/pathBuilder"; import { engine } from "~/v3/runEngine.server"; -import { UpsertSchedule } from "~/v3/schedules"; -import { UpsertTaskScheduleService } from "~/v3/services/upsertTaskSchedule.server"; const CompleteWaitpointFormData = z.discriminatedUnion("type", [ z.object({ @@ -106,8 +104,34 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { ); } case "MANUAL": { - //todo packet - //todo completeWaitpoint + let data: any; + try { + data = JSON.parse(submission.value.payload); + } catch (e) { + return redirectWithErrorMessage( + submission.value.failureRedirect, + request, + "Invalid payload, must be valid JSON" + ); + } + const stringifiedData = await stringifyIO(data); + const finalData = await conditionallyExportPacket( + stringifiedData, + `${waitpointId}/waitpoint/token` + ); + + const result = await engine.completeWaitpoint({ + id: waitpointId, + output: finalData.data + ? { type: finalData.dataType, value: finalData.data, isError: false } + : undefined, + }); + + return redirectWithSuccessMessage( + submission.value.successRedirect, + request, + "Waitpoint completed" + ); } } } catch (error: any) { @@ -259,7 +283,7 @@ function CompleteManualWaitpointForm({ waitpoint }: { waitpoint: { friendlyId: s onSubmit={(e) => submitForm(e)} className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" > - + (tokenId, -output + output );`} showLineNumbers={false} /> diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 00fa294e46..a5c36d28a6 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -1080,12 +1080,106 @@ function SpanEntity({ span }: { span: Span }) { const organization = useOrganization(); const project = useProject(); - switch (span.entityType) { - case "waitpoint": { - if (!span.waitpoint) { - return No waitpoint found: {span.entity.id}; - } + if (!span.entity) { + //normal span + return ( + <> + {span.level === "TRACE" ? ( + <> +
+ +
+ + + ) : ( +
+ } + state="complete" + /> +
+ )} + + + Message + {span.message} + + {span.triggeredRuns.length > 0 && ( + +
+ Triggered runs + + + + Run # + Task + Version + Created at + + + + {span.triggeredRuns.map((run) => { + const path = v3RunSpanPath( + organization, + project, + { friendlyId: run.friendlyId }, + { spanId: run.spanId } + ); + return ( + + + {run.number} + + + {run.taskIdentifier} + + + {run.lockedToVersion?.version ?? "–"} + + + + + + ); + })} + +
+
+
+ )} +
+ {span.events.length > 0 && } + {span.properties !== undefined ? ( + + ) : null} + + ); + } + switch (span.entity.type) { + case "waitpoint": { return ( <>
@@ -1101,9 +1195,9 @@ function SpanEntity({ span }: { span: Span }) { ID - {span.waitpoint?.friendlyId} + {span.entity.object.friendlyId} @@ -1122,33 +1216,33 @@ function SpanEntity({ span }: { span: Span }) {
- {span.waitpoint.userProvidedIdempotencyKey - ? span.waitpoint.idempotencyKey + {span.entity.object.userProvidedIdempotencyKey + ? span.entity.object.idempotencyKey : "–"}
- {span.waitpoint.idempotencyKeyExpiresAt ? ( + {span.entity.object.idempotencyKeyExpiresAt ? ( <> - TTL: + TTL: ) : null}
- {span.waitpoint.status === "PENDING" ? ( - - ) : span.waitpoint.output ? ( + {span.entity.object.status === "PENDING" ? ( + + ) : span.entity.object.output ? ( - ) : span.waitpoint.completedAfter ? ( + ) : span.entity.object.completedAfter ? ( Completed at - + ) : ( @@ -1159,100 +1253,7 @@ function SpanEntity({ span }: { span: Span }) { ); } default: { - return ( - <> - {span.level === "TRACE" ? ( - <> -
- -
- - - ) : ( -
- } - state="complete" - /> -
- )} - - - Message - {span.message} - - {span.triggeredRuns.length > 0 && ( - -
- Triggered runs - - - - Run # - Task - Version - Created at - - - - {span.triggeredRuns.map((run) => { - const path = v3RunSpanPath( - organization, - project, - { friendlyId: run.friendlyId }, - { spanId: run.spanId } - ); - return ( - - - {run.number} - - - {run.taskIdentifier} - - - {run.lockedToVersion?.version ?? "–"} - - - - - - ); - })} - -
-
-
- )} -
- {span.events.length > 0 && } - {span.properties !== undefined ? ( - - ) : null} - - ); + return No span for {span.entity.type}; } } } diff --git a/references/hello-world/src/trigger/waits.ts b/references/hello-world/src/trigger/waits.ts index 4014dc2156..f6c898d7a4 100644 --- a/references/hello-world/src/trigger/waits.ts +++ b/references/hello-world/src/trigger/waits.ts @@ -71,7 +71,7 @@ export const waitForDuration = task({ throw new Error("This is an error"); }, { - maxAttempts: 3, + maxAttempts: 2, } ); }, From 632881fe8120484e7902648bae248ba86c39a67b Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 15:24:38 +0000 Subject: [PATCH 471/485] Remove the database prisma dev command, since we need to use create only now. Updated docs --- CONTRIBUTING.md | 14 +++++++++++--- internal-packages/database/package.json | 1 - 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 260d9861f4..b3ed907610 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -230,13 +230,21 @@ pnpm run db:studio cd packages/database ``` -3. Create and apply the migrations +3. Create a migration ``` - pnpm run db:migrate:dev + pnpm run db:migrate:dev:create ``` - This creates a migration file and executes the migrations against your database and applies changes to the database schema(s) + This creates a migration file. Check the migration file does only what you want. If you're adding any database indexes they must use `CONCURRENTLY`, otherwise they'll lock the table when executed. + +4. Run the migration. + + ``` + pnpm run db:migrate:deploy + pnpm run generate + ``` + This executes the migrations against your database and applies changes to the database schema(s), and then regenerates the Prisma client. 4. Commit generated migrations as well as changes to the schema.prisma file 5. If you're using VSCode you may need to restart the Typescript server in the webapp to get updated type inference. Open a TypeScript file, then open the Command Palette (View > Command Palette) and run `TypeScript: Restart TS server`. diff --git a/internal-packages/database/package.json b/internal-packages/database/package.json index fc63f0a0af..a170b10cec 100644 --- a/internal-packages/database/package.json +++ b/internal-packages/database/package.json @@ -12,7 +12,6 @@ }, "scripts": { "generate": "prisma generate", - "db:migrate:dev": "prisma migrate dev", "db:migrate:dev:create": "prisma migrate dev --create-only", "db:migrate:deploy": "prisma migrate deploy", "db:push": "prisma db push", From 91538c9b674857e9699df7dd82178462525e903c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 19:01:28 +0000 Subject: [PATCH 472/485] Added skip timeout, reworked the UI --- .../app/presenters/v3/SpanPresenter.server.ts | 9 + ...points.tokens.$waitpointFriendlyId.wait.ts | 16 +- .../route.tsx | 178 +++++++---- .../route.tsx | 291 ++++++++++-------- .../database/prisma/schema.prisma | 3 +- .../run-engine/src/engine/index.ts | 20 +- .../src/engine/tests/waitpoints.test.ts | 4 +- packages/core/src/v3/apiClient/index.ts | 3 - packages/core/src/v3/runtime/index.ts | 1 - .../src/v3/runtime/managedRuntimeManager.ts | 1 - packages/core/src/v3/schemas/api.ts | 20 +- packages/core/src/v3/schemas/common.ts | 2 +- packages/trigger-sdk/src/v3/wait.ts | 26 +- references/hello-world/src/trigger/waits.ts | 4 +- 14 files changed, 324 insertions(+), 254 deletions(-) diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index 2d61e0db08..2be0d96f1d 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -1,4 +1,5 @@ import { + isWaitpointOutputTimeout, MachinePresetName, parsePacket, prettyPrintPacket, @@ -468,6 +469,13 @@ export class SpanPresenter extends BasePresenter { ? await prettyPrintPacket(waitpoint.output, waitpoint.outputType ?? undefined) : undefined; + let isTimeout = false; + if (waitpoint.outputIsError && output) { + if (isWaitpointOutputTimeout(output)) { + isTimeout = true; + } + } + return { ...data, entity: { @@ -483,6 +491,7 @@ export class SpanPresenter extends BasePresenter { outputType: waitpoint.outputType, outputIsError: waitpoint.outputIsError, completedAfter: waitpoint.completedAfter, + isTimeout, }, }, }; diff --git a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts index 6adee9cb66..e34e25529f 100644 --- a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts +++ b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts @@ -1,20 +1,10 @@ import { json } from "@remix-run/server-runtime"; -import { - CompleteWaitpointTokenRequestBody, - CompleteWaitpointTokenResponseBody, - conditionallyExportPacket, - CreateWaitpointTokenResponseBody, - stringifyIO, - WaitForWaitpointTokenRequestBody, - WaitForWaitpointTokenResponseBody, -} from "@trigger.dev/core/v3"; +import { WaitForWaitpointTokenResponseBody } from "@trigger.dev/core/v3"; import { RunId, WaitpointId } from "@trigger.dev/core/v3/apps"; import { z } from "zod"; import { $replica } from "~/db.server"; import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { parseDelay } from "~/utils/delays"; -import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; import { engine } from "~/v3/runEngine.server"; const { action } = createActionApiRoute( @@ -23,7 +13,6 @@ const { action } = createActionApiRoute( runFriendlyId: z.string(), waitpointFriendlyId: z.string(), }), - body: WaitForWaitpointTokenRequestBody, maxContentLength: 1024 * 10, // 10KB method: "POST", }, @@ -32,8 +21,6 @@ const { action } = createActionApiRoute( const waitpointId = WaitpointId.toId(params.waitpointFriendlyId); const runId = RunId.toId(params.runFriendlyId); - const timeout = await parseDelay(body.timeout); - try { //check permissions const waitpoint = await $replica.waitpoint.findFirst({ @@ -53,7 +40,6 @@ const { action } = createActionApiRoute( environmentId: authentication.environment.id, projectId: authentication.environment.project.id, organizationId: authentication.environment.organization.id, - failAfter: timeout, }); return json( diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 59dc23ce88..f0e0cc0465 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -1,18 +1,22 @@ import { parse } from "@conform-to/zod"; -import { InformationCircleIcon } from "@heroicons/react/20/solid"; import { Form, useLocation, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; -import { conditionallyExportPacket, stringifyIO } from "@trigger.dev/core/v3"; +import { + conditionallyExportPacket, + IOPacket, + stringifyIO, + timeoutError, +} from "@trigger.dev/core/v3"; import { WaitpointId } from "@trigger.dev/core/v3/apps"; import { Waitpoint } from "@trigger.dev/database"; import { useCallback, useRef } from "react"; import { z } from "zod"; import { AnimatedHourglassIcon } from "~/assets/icons/AnimatedHourglassIcon"; -import { CodeBlock } from "~/components/code/CodeBlock"; import { JSONEditor } from "~/components/code/JSONEditor"; import { Button } from "~/components/primitives/Buttons"; import { DateTime } from "~/components/primitives/DateTime"; import { Paragraph } from "~/components/primitives/Paragraph"; +import { InfoIconTooltip } from "~/components/primitives/Tooltip"; import { LiveCountdown } from "~/components/runs/v3/LiveTimer"; import { $replica } from "~/db.server"; import { useOrganization } from "~/hooks/useOrganizations"; @@ -26,7 +30,8 @@ import { engine } from "~/v3/runEngine.server"; const CompleteWaitpointFormData = z.discriminatedUnion("type", [ z.object({ type: z.literal("MANUAL"), - payload: z.string(), + payload: z.string().optional(), + isTimeout: z.string().optional(), successRedirect: z.string(), failureRedirect: z.string(), }), @@ -104,9 +109,51 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { ); } case "MANUAL": { - let data: any; + if (submission.value.isTimeout) { + try { + const result = await engine.completeWaitpoint({ + id: waitpointId, + output: { + type: "application/json", + value: JSON.stringify(timeoutError(new Date())), + isError: true, + }, + }); + + return redirectWithSuccessMessage( + submission.value.successRedirect, + request, + "Waitpoint timed out" + ); + } catch (e) { + return redirectWithErrorMessage( + submission.value.failureRedirect, + request, + "Invalid payload, must be valid JSON" + ); + } + } + try { - data = JSON.parse(submission.value.payload); + const data = submission.value.payload ? JSON.parse(submission.value.payload) : {}; + const stringifiedData = await stringifyIO(data); + const finalData = await conditionallyExportPacket( + stringifiedData, + `${waitpointId}/waitpoint/token` + ); + + const result = await engine.completeWaitpoint({ + id: waitpointId, + output: finalData.data + ? { type: finalData.dataType, value: finalData.data, isError: false } + : undefined, + }); + + return redirectWithSuccessMessage( + submission.value.successRedirect, + request, + "Waitpoint completed" + ); } catch (e) { return redirectWithErrorMessage( submission.value.failureRedirect, @@ -114,24 +161,6 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { "Invalid payload, must be valid JSON" ); } - const stringifiedData = await stringifyIO(data); - const finalData = await conditionallyExportPacket( - stringifiedData, - `${waitpointId}/waitpoint/token` - ); - - const result = await engine.completeWaitpoint({ - id: waitpointId, - output: finalData.data - ? { type: finalData.dataType, value: finalData.data, isError: false } - : undefined, - }); - - return redirectWithSuccessMessage( - submission.value.successRedirect, - request, - "Waitpoint completed" - ); } } } catch (error: any) { @@ -199,7 +228,7 @@ function CompleteDateTimeWaitpointForm({
Manually skip this waitpoint @@ -229,17 +258,15 @@ function CompleteDateTimeWaitpointForm({
-
-
- -
+
+
); @@ -281,7 +308,7 @@ function CompleteManualWaitpointForm({ waitpoint }: { waitpoint: { friendlyId: s action={formAction} method="post" onSubmit={(e) => submitForm(e)} - className="grid h-full max-h-full grid-rows-[2.5rem_1fr_2.5rem] overflow-hidden rounded-md border border-grid-bright" + className="grid h-full max-h-full grid-rows-[2.5rem_1fr_3.25rem] overflow-hidden border-t border-grid-bright" > -
+
Manually complete this waitpoint +
-
+
-
-
- -
+
+
- - - To complete this waitpoint in your code use: - - } - code={` -await wait.completeToken(tokenId, - output -);`} - showLineNumbers={false} - /> ); } + +export function ForceTimeout({ waitpoint }: { waitpoint: { friendlyId: string } }) { + const location = useLocation(); + const navigation = useNavigation(); + const isLoading = navigation.state !== "idle"; + const organization = useOrganization(); + const project = useProject(); + const formAction = `/resources/orgs/${organization.slug}/projects/${project.slug}/waitpoints/${waitpoint.friendlyId}/complete`; + + return ( +
+ + + + + +
+ ); +} diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index a5c36d28a6..23feaab90d 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -67,7 +67,10 @@ import { v3TraceSpanPath, } from "~/utils/pathBuilder"; import { SpanLink } from "~/v3/eventRepository.server"; -import { CompleteWaitpointForm } from "../resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route"; +import { + CompleteWaitpointForm, + ForceTimeout, +} from "../resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route"; export const loader = async ({ request, params }: LoaderFunctionArgs) => { const userId = await requireUserId(request); @@ -226,73 +229,67 @@ function SpanBody({
-
-
- {tab === "detail" ? ( -
- - - Status - - - - - - Task - - - {span.taskSlug} - - } - content={`Filter runs by ${span.taskSlug}`} - /> - - - {span.idempotencyKey && ( - - Idempotency key - {span.idempotencyKey} - - )} +
+ {tab === "detail" ? ( +
+ + + Status + + + + + + Task + + + {span.taskSlug} + + } + content={`Filter runs by ${span.taskSlug}`} + /> + + + {span.idempotencyKey && ( - Version - - {span.workerVersion ? ( - span.workerVersion - ) : ( - - Never started - - - )} - + Idempotency key + {span.idempotencyKey} - -
- ) : ( -
- -
- )} -
+ )} + + Version + + {span.workerVersion ? ( + span.workerVersion + ) : ( + + Never started + + + )} + + +
+
+ ) : ( + + )}
); @@ -1083,7 +1080,7 @@ function SpanEntity({ span }: { span: Span }) { if (!span.entity) { //normal span return ( - <> +
{span.level === "TRACE" ? ( <>
@@ -1174,82 +1171,110 @@ function SpanEntity({ span }: { span: Span }) { showLineNumbers={false} /> ) : null} - +
); } switch (span.entity.type) { case "waitpoint": { return ( - <> -
- Waitpoint - - A waitpoint pauses your code from continuing until the conditions are met.{" "} - View docs. - -
- - - Status - - - - - - ID - - {span.entity.object.friendlyId} - - - - Idempotency key - -
-
- {span.entity.object.userProvidedIdempotencyKey - ? span.entity.object.idempotencyKey - : "–"} -
-
- {span.entity.object.idempotencyKeyExpiresAt ? ( - <> - TTL: - - ) : null} -
-
-
-
- {span.entity.object.status === "PENDING" ? ( - - ) : span.entity.object.output ? ( - - ) : span.entity.object.completedAfter ? ( +
+
+
+ Waitpoint + + A waitpoint pauses your code from continuing until the conditions are met.{" "} + View docs. + +
+ - Completed at + Status - + - ) : ( - "Completed with no output" - )} - - + + ID + + {span.entity.object.friendlyId} + + + + Idempotency key + +
+
+ {span.entity.object.userProvidedIdempotencyKey + ? span.entity.object.idempotencyKey + : "–"} +
+
+ {span.entity.object.idempotencyKeyExpiresAt ? ( + <> + TTL: + + ) : null} +
+
+
+
+ {span.entity.object.type === "MANUAL" && ( + <> + + Timeout at + +
+ {span.entity.object.completedAfter ? ( + + ) : ( + "–" + )} + {span.entity.object.status === "PENDING" && ( + + )} +
+
+
+ + )} + {span.entity.object.status === "PENDING" ? null : span.entity.object.isTimeout ? ( + <> + ) : span.entity.object.output ? ( + + ) : span.entity.object.completedAfter ? ( + + Completed at + + + + + ) : ( + "Completed with no output" + )} + +
+ {span.entity.object.status === "PENDING" && ( +
+ +
+ )} +
); } default: { diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index 3f62b4bce8..d621754f42 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -2068,7 +2068,8 @@ model Waitpoint { completedByTaskRunId String? @unique completedByTaskRun TaskRun? @relation(fields: [completedByTaskRunId], references: [id], onDelete: SetNull) - /// If it's a DATETIME type waitpoint, this is the date + /// If it's a DATETIME type waitpoint, this is the date. + /// If it's a MANUAL waitpoint, this can be set as the `timeout`. completedAfter DateTime? /// If it's a BATCH type waitpoint, this is the associated batch diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 2de5f59300..04c0b71039 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -22,7 +22,7 @@ import { TaskRunFailedExecutionResult, TaskRunInternalError, TaskRunSuccessfulExecutionResult, - WAITPOINT_TIMEOUT_ERROR_CODE, + timeoutError, } from "@trigger.dev/core/v3"; import { BatchId, @@ -1788,10 +1788,7 @@ export class RunEngine { job: "finishWaitpoint", payload: { waitpointId: waitpoint.id, - error: JSON.stringify({ - code: WAITPOINT_TIMEOUT_ERROR_CODE, - message: `Waitpoint timed out at ${timeout.toISOString()}`, - }), + error: JSON.stringify(timeoutError(timeout)), }, availableAt: timeout, }); @@ -1947,7 +1944,7 @@ export class RunEngine { projectId, organizationId, releaseConcurrency, - failAfter, + timeout, spanIdToComplete, batch, workerId, @@ -1962,7 +1959,7 @@ export class RunEngine { releaseConcurrency?: { releaseQueue: boolean; }; - failAfter?: Date; + timeout?: Date; spanIdToComplete?: string; batch?: { id: string; index?: number }; workerId?: string; @@ -2033,19 +2030,16 @@ export class RunEngine { await this.#sendNotificationToWorker({ runId, snapshot }); } - if (failAfter) { + if (timeout) { for (const waitpoint of $waitpoints) { await this.worker.enqueue({ id: `finishWaitpoint.${waitpoint}`, job: "finishWaitpoint", payload: { waitpointId: waitpoint, - error: JSON.stringify({ - code: WAITPOINT_TIMEOUT_ERROR_CODE, - message: `Waitpoint timed out at ${failAfter.toISOString()}`, - }), + error: JSON.stringify(timeoutError(timeout)), }, - availableAt: failAfter, + availableAt: timeout, }); } } diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 0692ba35c0..1d09e336d9 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -498,6 +498,8 @@ describe("RunEngine Waitpoints", () => { const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, + //fail after 200ms + timeout: new Date(Date.now() + 200), }); //block the run @@ -507,8 +509,6 @@ describe("RunEngine Waitpoints", () => { environmentId: authenticatedEnvironment.id, projectId: authenticatedEnvironment.projectId, organizationId: authenticatedEnvironment.organizationId, - //fail after 200ms - failAfter: new Date(Date.now() + 200), }); const executionData = await engine.getRunExecutionData({ runId: run.id }); diff --git a/packages/core/src/v3/apiClient/index.ts b/packages/core/src/v3/apiClient/index.ts index d1ab68d341..5e766a99a5 100644 --- a/packages/core/src/v3/apiClient/index.ts +++ b/packages/core/src/v3/apiClient/index.ts @@ -34,7 +34,6 @@ import { UpdateScheduleOptions, WaitForDurationRequestBody, WaitForDurationResponseBody, - WaitForWaitpointTokenRequestBody, WaitForWaitpointTokenResponseBody, } from "../schemas/index.js"; import { taskContext } from "../task-context-api.js"; @@ -676,7 +675,6 @@ export class ApiClient { waitForWaitpointToken( runFriendlyId: string, waitpointFriendlyId: string, - options?: WaitForWaitpointTokenRequestBody, requestOptions?: ZodFetchOptions ) { return zodfetch( @@ -685,7 +683,6 @@ export class ApiClient { { method: "POST", headers: this.#getHeaders(false), - body: JSON.stringify(options ?? {}), }, mergeRequestOptions(this.defaultRequestOptions, requestOptions) ); diff --git a/packages/core/src/v3/runtime/index.ts b/packages/core/src/v3/runtime/index.ts index c53767e2ad..39e874f986 100644 --- a/packages/core/src/v3/runtime/index.ts +++ b/packages/core/src/v3/runtime/index.ts @@ -4,7 +4,6 @@ import { BatchTaskRunExecutionResult, TaskRunContext, TaskRunExecutionResult, - WaitForWaitpointTokenRequestBody, WaitpointTokenResult, } from "../schemas/index.js"; import { getGlobal, registerGlobal, unregisterGlobal } from "../utils/globals.js"; diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index f2c0c5b816..528d5a8755 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -6,7 +6,6 @@ import { TaskRunExecutionResult, TaskRunFailedExecutionResult, TaskRunSuccessfulExecutionResult, - WaitForWaitpointTokenRequestBody, WaitpointTokenResult, } from "../schemas/index.js"; import { ExecutorToWorkerProcessConnection } from "../zodIpc.js"; diff --git a/packages/core/src/v3/schemas/api.ts b/packages/core/src/v3/schemas/api.ts index 5c05a881e9..7f06d869de 100644 --- a/packages/core/src/v3/schemas/api.ts +++ b/packages/core/src/v3/schemas/api.ts @@ -928,17 +928,6 @@ export const CompleteWaitpointTokenResponseBody = z.object({ }); export type CompleteWaitpointTokenResponseBody = z.infer; -export const WaitForWaitpointTokenRequestBody = z.object({ - /** - * The maximum amount of time to wait for the token to be completed. - * If this is exceeded, the waitpoint will timeout and will return `ok` of `false`. - * - * You can pass a `Date` object, or a string in this format: "30s", "1m", "2h", "3d", "4w". - */ - timeout: TimePeriod.optional(), -}); -export type WaitForWaitpointTokenRequestBody = z.infer; - export const WaitForWaitpointTokenResponseBody = z.object({ success: z.boolean(), }); @@ -973,7 +962,7 @@ export const WaitForDurationResponseBody = z.object({ }); export type WaitForDurationResponseBody = z.infer; -export const WAITPOINT_TIMEOUT_ERROR_CODE = "TRIGGER_WAITPOINT_TIMEOUT"; +const WAITPOINT_TIMEOUT_ERROR_CODE = "TRIGGER_WAITPOINT_TIMEOUT"; export function isWaitpointOutputTimeout(output: string): boolean { try { @@ -983,3 +972,10 @@ export function isWaitpointOutputTimeout(output: string): boolean { return false; } } + +export function timeoutError(timeout: Date) { + return { + code: WAITPOINT_TIMEOUT_ERROR_CODE, + message: `Waitpoint timed out at ${timeout.toISOString()}`, + }; +} diff --git a/packages/core/src/v3/schemas/common.ts b/packages/core/src/v3/schemas/common.ts index a7845aeade..0112717eb2 100644 --- a/packages/core/src/v3/schemas/common.ts +++ b/packages/core/src/v3/schemas/common.ts @@ -399,7 +399,7 @@ export type WaitpointTokenTypedResult = } | { ok: false; - error: string; + error: Error; }; export const SerializedError = z.object({ diff --git a/packages/trigger-sdk/src/v3/wait.ts b/packages/trigger-sdk/src/v3/wait.ts index 52760d2f33..2f134e47f1 100644 --- a/packages/trigger-sdk/src/v3/wait.ts +++ b/packages/trigger-sdk/src/v3/wait.ts @@ -5,20 +5,17 @@ import { apiClientManager, ApiPromise, ApiRequestOptions, - conditionallyExportPacket, CreateWaitpointTokenRequestBody, CreateWaitpointTokenResponseBody, mergeRequestOptions, - stringifyIO, CompleteWaitpointTokenResponseBody, - WaitForWaitpointTokenRequestBody, WaitpointTokenTypedResult, Prettify, taskContext, } from "@trigger.dev/core/v3"; import { tracer } from "./tracer.js"; -import { conditionallyImportPacket } from "../../../core/dist/commonjs/v3/index.js"; import { conditionallyImportAndParsePacket } from "@trigger.dev/core/v3/utils/ioSerialization"; +import { SpanStatusCode } from "@opentelemetry/api"; function createToken( options?: CreateWaitpointTokenRequestBody, @@ -118,6 +115,13 @@ type WaitPeriod = years: number; }; +export class WaitpointTimeoutError extends Error { + constructor(message: string) { + super(message); + this.name = "WaitpointTimeoutError"; + } +} + export const wait = { for: async (options: WaitForOptions) => { const ctx = taskContext.ctx; @@ -203,8 +207,7 @@ export const wait = { createToken, completeToken, forToken: async ( - token: string | { id: string }, - options?: WaitForWaitpointTokenRequestBody + token: string | { id: string } ): Promise>> => { const ctx = taskContext.ctx; @@ -219,7 +222,7 @@ export const wait = { return tracer.startActiveSpan( `wait.forToken()`, async (span) => { - const response = await apiClient.waitForWaitpointToken(ctx.run.id, tokenId, options); + const response = await apiClient.waitForWaitpointToken(ctx.run.id, tokenId); if (!response.success) { throw new Error(`Failed to wait for wait token ${tokenId}`); @@ -240,9 +243,16 @@ export const wait = { output: data, } as WaitpointTokenTypedResult; } else { + const error = new WaitpointTimeoutError(data.message); + + span.recordException(error); + span.setStatus({ + code: SpanStatusCode.ERROR, + }); + return { ok: result.ok, - error: data, + error, } as WaitpointTokenTypedResult; } }, diff --git a/references/hello-world/src/trigger/waits.ts b/references/hello-world/src/trigger/waits.ts index f6c898d7a4..171d42ae7c 100644 --- a/references/hello-world/src/trigger/waits.ts +++ b/references/hello-world/src/trigger/waits.ts @@ -11,18 +11,20 @@ export const waitToken = task({ idempotencyKey, idempotencyKeyTTL, completionDelay, + timeout, }: { completeBeforeWaiting?: boolean; idempotencyKey?: string; idempotencyKeyTTL?: string; completionDelay?: number; + timeout?: string; }) => { logger.log("Hello, world", { completeBeforeWaiting }); const token = await wait.createToken({ idempotencyKey, idempotencyKeyTTL, - timeout: completionDelay ? undefined : new Date(Date.now() + 10_000), + timeout, }); logger.log("Token", token); From c6f6eac07185aabd25ee99ce98af074df2bccac3 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 19:05:06 +0000 Subject: [PATCH 473/485] Tweaked spacing --- .../route.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index 23feaab90d..28689b71c3 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -1235,7 +1235,7 @@ function SpanEntity({ span }: { span: Span }) { Timeout at -
+
{span.entity.object.completedAfter ? ( ) : ( From 76e49f703136bf8a7113c044aaef5c038049a7e9 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 19:48:37 +0000 Subject: [PATCH 474/485] Added payload limit to waitpoint token completion from dashboard --- .../route.tsx | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index f0e0cc0465..56ebef0726 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -1,3 +1,4 @@ +import { env } from "~/env.server"; import { parse } from "@conform-to/zod"; import { Form, useLocation, useNavigation, useSubmit } from "@remix-run/react"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; @@ -135,6 +136,17 @@ export const action = async ({ request, params }: ActionFunctionArgs) => { } try { + if ( + submission.value.payload && + submission.value.payload.length > env.TASK_PAYLOAD_MAXIMUM_SIZE + ) { + return redirectWithErrorMessage( + submission.value.failureRedirect, + request, + "Payload is too large" + ); + } + const data = submission.value.payload ? JSON.parse(submission.value.payload) : {}; const stringifiedData = await stringifyIO(data); const finalData = await conditionallyExportPacket( From e8b7bee656672e720cfd752aa244125e7b6cc5b4 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Wed, 26 Feb 2025 19:57:59 +0000 Subject: [PATCH 475/485] Test idempotency works on wait.for and wait.until --- references/hello-world/src/trigger/waits.ts | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/references/hello-world/src/trigger/waits.ts b/references/hello-world/src/trigger/waits.ts index 171d42ae7c..0749cde17d 100644 --- a/references/hello-world/src/trigger/waits.ts +++ b/references/hello-world/src/trigger/waits.ts @@ -1,4 +1,4 @@ -import { logger, wait, task, retry } from "@trigger.dev/sdk/v3"; +import { logger, wait, task, retry, idempotencyKeys } from "@trigger.dev/sdk/v3"; type Token = { status: "approved" | "pending" | "rejected"; @@ -62,11 +62,21 @@ export const completeWaitToken = task({ export const waitForDuration = task({ id: "wait-duration", - run: async ({ duration = 4 }: { duration?: number }) => { - await wait.for({ seconds: duration }); + run: async ({ + duration = 4, + idempotencyKey, + idempotencyKeyTTL, + }: { + duration?: number; + idempotencyKey?: string; + idempotencyKeyTTL?: string; + }) => { + const idempotency = idempotencyKey ? await idempotencyKeys.create(idempotencyKey) : undefined; + + await wait.for({ seconds: duration, idempotencyKey: idempotency, idempotencyKeyTTL }); await wait.until({ date: new Date(Date.now() + duration * 1000) }); - await retry.fetch("https://example.com/404/", { method: "GET" }); + await retry.fetch("https://example.com/404", { method: "GET" }); await retry.onThrow( async () => { From ef1f7e4de6cc2c8264e45d893d0bf316e42bc03c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 27 Feb 2025 15:23:23 +0000 Subject: [PATCH 476/485] Moved the worker-actions to /engine/ from /api/ --- ...ts => engine.v1.worker-actions.connect.ts} | 0 ...loyments.$deploymentFriendlyId.dequeue.ts} | 0 ...ts => engine.v1.worker-actions.dequeue.ts} | 0 ... => engine.v1.worker-actions.heartbeat.ts} | 0 ...actions.runs.$runFriendlyId.logs.debug.ts} | 0 ....$snapshotFriendlyId.attempts.complete.ts} | 0 ...ots.$snapshotFriendlyId.attempts.start.ts} | 0 ...snapshots.$snapshotFriendlyId.continue.ts} | 0 ...napshots.$snapshotFriendlyId.heartbeat.ts} | 0 ....snapshots.$snapshotFriendlyId.restore.ts} | 0 ....snapshots.$snapshotFriendlyId.suspend.ts} | 0 ...s.runs.$runFriendlyId.snapshots.latest.ts} | 0 .../src/v3/runEngineWorker/supervisor/http.ts | 22 +++++++++---------- 13 files changed, 11 insertions(+), 11 deletions(-) rename apps/webapp/app/routes/{api.v1.worker-actions.connect.ts => engine.v1.worker-actions.connect.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts => engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.dequeue.ts => engine.v1.worker-actions.dequeue.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.heartbeat.ts => engine.v1.worker-actions.heartbeat.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts => engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts} (100%) rename apps/webapp/app/routes/{api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts => engine.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts} (100%) diff --git a/apps/webapp/app/routes/api.v1.worker-actions.connect.ts b/apps/webapp/app/routes/engine.v1.worker-actions.connect.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.connect.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.connect.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts b/apps/webapp/app/routes/engine.v1.worker-actions.dequeue.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.dequeue.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.dequeue.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts b/apps/webapp/app/routes/engine.v1.worker-actions.heartbeat.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.heartbeat.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.continue.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.restore.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.suspend.ts diff --git a/apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts rename to apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.snapshots.latest.ts diff --git a/packages/core/src/v3/runEngineWorker/supervisor/http.ts b/packages/core/src/v3/runEngineWorker/supervisor/http.ts index 4116a5d135..fa2249e47a 100644 --- a/packages/core/src/v3/runEngineWorker/supervisor/http.ts +++ b/packages/core/src/v3/runEngineWorker/supervisor/http.ts @@ -52,7 +52,7 @@ export class SupervisorHttpClient { async connect(body: WorkerApiConnectRequestBody) { return wrapZodFetch( WorkerApiConnectResponseBody, - `${this.apiUrl}/api/v1/worker-actions/connect`, + `${this.apiUrl}/engine/v1/worker-actions/connect`, { method: "POST", headers: { @@ -67,7 +67,7 @@ export class SupervisorHttpClient { async dequeue(body: WorkerApiDequeueRequestBody) { return wrapZodFetch( WorkerApiDequeueResponseBody, - `${this.apiUrl}/api/v1/worker-actions/dequeue`, + `${this.apiUrl}/engine/v1/worker-actions/dequeue`, { method: "POST", headers: { @@ -82,7 +82,7 @@ export class SupervisorHttpClient { async dequeueFromVersion(deploymentId: string, maxRunCount = 1, runnerId?: string) { return wrapZodFetch( WorkerApiDequeueResponseBody, - `${this.apiUrl}/api/v1/worker-actions/deployments/${deploymentId}/dequeue?maxRunCount=${maxRunCount}`, + `${this.apiUrl}/engine/v1/worker-actions/deployments/${deploymentId}/dequeue?maxRunCount=${maxRunCount}`, { headers: { ...this.defaultHeaders, @@ -95,7 +95,7 @@ export class SupervisorHttpClient { async heartbeatWorker(body: WorkerApiHeartbeatRequestBody) { return wrapZodFetch( WorkerApiHeartbeatResponseBody, - `${this.apiUrl}/api/v1/worker-actions/heartbeat`, + `${this.apiUrl}/engine/v1/worker-actions/heartbeat`, { method: "POST", headers: { @@ -115,7 +115,7 @@ export class SupervisorHttpClient { ) { return wrapZodFetch( WorkerApiRunHeartbeatResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/heartbeat`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/heartbeat`, { method: "POST", headers: { @@ -136,7 +136,7 @@ export class SupervisorHttpClient { ) { return wrapZodFetch( WorkerApiRunAttemptStartResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/start`, { method: "POST", headers: { @@ -156,7 +156,7 @@ export class SupervisorHttpClient { ) { return wrapZodFetch( WorkerApiRunAttemptCompleteResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, { method: "POST", headers: { @@ -171,7 +171,7 @@ export class SupervisorHttpClient { async getLatestSnapshot(runId: string, runnerId?: string) { return wrapZodFetch( WorkerApiRunLatestSnapshotResponseBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/latest`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/latest`, { method: "GET", headers: { @@ -186,7 +186,7 @@ export class SupervisorHttpClient { try { const res = await wrapZodFetch( z.unknown(), - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/logs/debug`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/logs/debug`, { method: "POST", headers: { @@ -209,7 +209,7 @@ export class SupervisorHttpClient { async continueRunExecution(runId: string, snapshotId: string, runnerId?: string) { return wrapZodFetch( WorkerApiContinueRunExecutionRequestBody, - `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/continue`, + `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/continue`, { method: "GET", headers: { @@ -222,7 +222,7 @@ export class SupervisorHttpClient { getSuspendCompletionUrl(runId: string, snapshotId: string, runnerId?: string) { return { - url: `${this.apiUrl}/api/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/suspend`, + url: `${this.apiUrl}/engine/v1/worker-actions/runs/${runId}/snapshots/${snapshotId}/suspend`, headers: { ...this.defaultHeaders, ...this.runnerIdHeader(runnerId), From c63915f2589f448a7130929ff2537bbea397d9ae Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 27 Feb 2025 15:38:44 +0000 Subject: [PATCH 477/485] Moved dev engine endpoints to /engine/ from /api/ --- ....v1.dev.config.ts => engine.v1.dev.config.ts} | 0 ...1.dev.dequeue.ts => engine.v1.dev.dequeue.ts} | 0 ...dev.presence.ts => engine.v1.dev.presence.ts} | 0 ...ine.v1.dev.runs.$runFriendlyId.logs.debug.ts} | 0 ...ots.$snapshotFriendlyId.attempts.complete.ts} | 0 ...pshots.$snapshotFriendlyId.attempts.start.ts} | 0 ...d.snapshots.$snapshotFriendlyId.heartbeat.ts} | 0 ....dev.runs.$runFriendlyId.snapshots.latest.ts} | 0 packages/cli-v3/src/apiClient.ts | 16 ++++++++-------- 9 files changed, 8 insertions(+), 8 deletions(-) rename apps/webapp/app/routes/{api.v1.dev.config.ts => engine.v1.dev.config.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.dequeue.ts => engine.v1.dev.dequeue.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.presence.ts => engine.v1.dev.presence.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.runs.$runFriendlyId.logs.debug.ts => engine.v1.dev.runs.$runFriendlyId.logs.debug.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts => engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts => engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts => engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts} (100%) rename apps/webapp/app/routes/{api.v1.dev.runs.$runFriendlyId.snapshots.latest.ts => engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts} (100%) diff --git a/apps/webapp/app/routes/api.v1.dev.config.ts b/apps/webapp/app/routes/engine.v1.dev.config.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.config.ts rename to apps/webapp/app/routes/engine.v1.dev.config.ts diff --git a/apps/webapp/app/routes/api.v1.dev.dequeue.ts b/apps/webapp/app/routes/engine.v1.dev.dequeue.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.dequeue.ts rename to apps/webapp/app/routes/engine.v1.dev.dequeue.ts diff --git a/apps/webapp/app/routes/api.v1.dev.presence.ts b/apps/webapp/app/routes/engine.v1.dev.presence.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.presence.ts rename to apps/webapp/app/routes/engine.v1.dev.presence.ts diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.logs.debug.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.logs.debug.ts rename to apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts rename to apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts rename to apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts rename to apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts diff --git a/apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.latest.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts similarity index 100% rename from apps/webapp/app/routes/api.v1.dev.runs.$runFriendlyId.snapshots.latest.ts rename to apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts diff --git a/packages/cli-v3/src/apiClient.ts b/packages/cli-v3/src/apiClient.ts index 7bc58fb735..5e206f413a 100644 --- a/packages/cli-v3/src/apiClient.ts +++ b/packages/cli-v3/src/apiClient.ts @@ -465,7 +465,7 @@ export class CliApiClient { throw new Error("devConfig: No access token"); } - return wrapZodFetch(DevConfigResponseBody, `${this.apiURL}/api/v1/dev/config`, { + return wrapZodFetch(DevConfigResponseBody, `${this.apiURL}/engine/v1/dev/config`, { headers: { Authorization: `Bearer ${this.accessToken}`, Accept: "application/json", @@ -478,7 +478,7 @@ export class CliApiClient { throw new Error("connectToPresence: No access token"); } - const eventSource = new EventSource(`${this.apiURL}/api/v1/dev/presence`, { + const eventSource = new EventSource(`${this.apiURL}/engine/v1/dev/presence`, { fetch: (input, init) => fetch(input, { ...init, @@ -516,7 +516,7 @@ export class CliApiClient { throw new Error("devConfig: No access token"); } - return wrapZodFetch(DevDequeueResponseBody, `${this.apiURL}/api/v1/dev/dequeue`, { + return wrapZodFetch(DevDequeueResponseBody, `${this.apiURL}/engine/v1/dev/dequeue`, { method: "POST", headers: { Authorization: `Bearer ${this.accessToken}`, @@ -531,7 +531,7 @@ export class CliApiClient { throw new Error("devConfig: No access token"); } - return wrapZodFetch(z.unknown(), `${this.apiURL}/api/v1/dev/runs/${runId}/logs/debug`, { + return wrapZodFetch(z.unknown(), `${this.apiURL}/engine/v1/dev/runs/${runId}/logs/debug`, { method: "POST", headers: { Authorization: `Bearer ${this.accessToken}`, @@ -545,7 +545,7 @@ export class CliApiClient { private async devGetRunExecutionData(runId: string) { return wrapZodFetch( WorkloadRunLatestSnapshotResponseBody, - `${this.apiURL}/api/v1/dev/runs/${runId}/snapshots/latest`, + `${this.apiURL}/engine/v1/dev/runs/${runId}/snapshots/latest`, { method: "GET", headers: { @@ -563,7 +563,7 @@ export class CliApiClient { ) { return wrapZodFetch( WorkloadHeartbeatResponseBody, - `${this.apiURL}/api/v1/dev/runs/${runId}/snapshots/${snapshotId}/heartbeat`, + `${this.apiURL}/engine/v1/dev/runs/${runId}/snapshots/${snapshotId}/heartbeat`, { method: "POST", headers: { @@ -579,7 +579,7 @@ export class CliApiClient { private async devStartRunAttempt(runId: string, snapshotId: string) { return wrapZodFetch( WorkloadRunAttemptStartResponseBody, - `${this.apiURL}/api/v1/dev/runs/${runId}/snapshots/${snapshotId}/attempts/start`, + `${this.apiURL}/engine/v1/dev/runs/${runId}/snapshots/${snapshotId}/attempts/start`, { method: "POST", headers: { @@ -599,7 +599,7 @@ export class CliApiClient { ) { return wrapZodFetch( WorkloadRunAttemptCompleteResponseBody, - `${this.apiURL}/api/v1/dev/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, + `${this.apiURL}/engine/v1/dev/runs/${runId}/snapshots/${snapshotId}/attempts/complete`, { method: "POST", headers: { From 2bb3db41610f2b32c3416b503f9f05af002ad759 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Thu, 27 Feb 2025 16:42:13 +0000 Subject: [PATCH 478/485] Separate /engine/ rate limiter --- apps/webapp/app/entry.server.tsx | 1 + apps/webapp/app/env.server.ts | 16 +++++++++ .../app/services/apiRateLimit.server.ts | 3 -- .../app/services/engineRateLimit.server.ts | 34 +++++++++++++++++++ apps/webapp/server.ts | 2 ++ 5 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 apps/webapp/app/services/engineRateLimit.server.ts diff --git a/apps/webapp/app/entry.server.tsx b/apps/webapp/app/entry.server.tsx index a937c24ba7..35a94607e9 100644 --- a/apps/webapp/app/entry.server.tsx +++ b/apps/webapp/app/entry.server.tsx @@ -208,6 +208,7 @@ const sqsEventConsumer = singleton("sqsEventConsumer", getSharedSqsEventConsumer singleton("RunEngineEventBusHandlers", registerRunEngineEventBusHandlers); export { apiRateLimiter } from "./services/apiRateLimit.server"; +export { engineRateLimiter } from "./services/engineRateLimit.server"; export { socketIo } from "./v3/handleSocketIo.server"; export { wss } from "./v3/handleWebsockets.server"; export { registryProxy } from "./v3/registryProxy.server"; diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts index 4922158b57..2db2a9fb42 100644 --- a/apps/webapp/app/env.server.ts +++ b/apps/webapp/app/env.server.ts @@ -506,6 +506,22 @@ const EnvironmentSchema = z.object({ .string() .default(process.env.REDIS_TLS_DISABLED ?? "false"), + //API Rate limiting + /** + * @example "60s" + * @example "1m" + * @example "1h" + * @example "1d" + * @example "1000ms" + * @example "1000s" + */ + RUN_ENGINE_RATE_LIMIT_REFILL_INTERVAL: z.string().default("10s"), // refill 250 tokens every 10 seconds + RUN_ENGINE_RATE_LIMIT_MAX: z.coerce.number().int().default(1200), // allow bursts of 750 requests + RUN_ENGINE_RATE_LIMIT_REFILL_RATE: z.coerce.number().int().default(400), // refix 250 tokens every 10 seconds + RUN_ENGINE_RATE_LIMIT_REQUEST_LOGS_ENABLED: z.string().default("0"), + RUN_ENGINE_RATE_LIMIT_REJECTION_LOGS_ENABLED: z.string().default("1"), + RUN_ENGINE_RATE_LIMIT_LIMITER_LOGS_ENABLED: z.string().default("0"), + /** How long should the presence ttl last */ DEV_PRESENCE_TTL_MS: z.coerce.number().int().default(30_000), DEV_PRESENCE_POLL_INTERVAL_MS: z.coerce.number().int().default(5_000), diff --git a/apps/webapp/app/services/apiRateLimit.server.ts b/apps/webapp/app/services/apiRateLimit.server.ts index aeab1fff3f..466aaa98b8 100644 --- a/apps/webapp/app/services/apiRateLimit.server.ts +++ b/apps/webapp/app/services/apiRateLimit.server.ts @@ -59,9 +59,6 @@ export const apiRateLimiter = authorizationRateLimitMiddleware({ "/api/v1/usage/ingest", "/api/v1/auth/jwt/claims", /^\/api\/v1\/runs\/[^\/]+\/attempts$/, // /api/v1/runs/$runFriendlyId/attempts - // run engine DEV endpoints - "/api/v1/dev/dequeue", - "/api/v1/dev/presence", ], log: { rejections: env.API_RATE_LIMIT_REJECTION_LOGS_ENABLED === "1", diff --git a/apps/webapp/app/services/engineRateLimit.server.ts b/apps/webapp/app/services/engineRateLimit.server.ts new file mode 100644 index 0000000000..f34ed0ef44 --- /dev/null +++ b/apps/webapp/app/services/engineRateLimit.server.ts @@ -0,0 +1,34 @@ +import { env } from "~/env.server"; +import { authenticateAuthorizationHeader } from "./apiAuth.server"; +import { authorizationRateLimitMiddleware } from "./authorizationRateLimitMiddleware.server"; +import { Duration } from "./rateLimiter.server"; + +export const engineRateLimiter = authorizationRateLimitMiddleware({ + redis: { + port: env.RATE_LIMIT_REDIS_PORT, + host: env.RATE_LIMIT_REDIS_HOST, + username: env.RATE_LIMIT_REDIS_USERNAME, + password: env.RATE_LIMIT_REDIS_PASSWORD, + tlsDisabled: env.RATE_LIMIT_REDIS_TLS_DISABLED === "true", + clusterMode: env.RATE_LIMIT_REDIS_CLUSTER_MODE_ENABLED === "1", + }, + keyPrefix: "engine", + defaultLimiter: { + type: "tokenBucket", + refillRate: env.RUN_ENGINE_RATE_LIMIT_REFILL_RATE, + interval: env.RUN_ENGINE_RATE_LIMIT_REFILL_INTERVAL as Duration, + maxTokens: env.RUN_ENGINE_RATE_LIMIT_MAX, + }, + limiterCache: { + fresh: 60_000 * 10, // Data is fresh for 10 minutes + stale: 60_000 * 20, // Date is stale after 20 minutes + }, + pathMatchers: [/^\/engine/], + // Allow /api/v1/tasks/:id/callback/:secret + pathWhiteList: [], + log: { + rejections: env.RUN_ENGINE_RATE_LIMIT_REJECTION_LOGS_ENABLED === "1", + requests: env.RUN_ENGINE_RATE_LIMIT_REQUEST_LOGS_ENABLED === "1", + limiter: env.RUN_ENGINE_RATE_LIMIT_LIMITER_LOGS_ENABLED === "1", + }, +}); diff --git a/apps/webapp/server.ts b/apps/webapp/server.ts index 26e30343a2..4b4a6a843e 100644 --- a/apps/webapp/server.ts +++ b/apps/webapp/server.ts @@ -43,6 +43,7 @@ if (process.env.HTTP_SERVER_DISABLED !== "true") { const wss: WebSocketServer | undefined = build.entry.module.wss; const registryProxy: RegistryProxy | undefined = build.entry.module.registryProxy; const apiRateLimiter: RateLimitMiddleware = build.entry.module.apiRateLimiter; + const engineRateLimiter: RateLimitMiddleware = build.entry.module.engineRateLimiter; const runWithHttpContext: RunWithHttpContextFunction = build.entry.module.runWithHttpContext; if (registryProxy && process.env.ENABLE_REGISTRY_PROXY === "true") { @@ -95,6 +96,7 @@ if (process.env.HTTP_SERVER_DISABLED !== "true") { } app.use(apiRateLimiter); + app.use(engineRateLimiter); app.all( "*", From 56d2297004aa3c9d2711be99698dc48be611acd2 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Fri, 28 Feb 2025 11:04:08 +0000 Subject: [PATCH 479/485] =?UTF-8?q?Added=20parallel=20wait=20prevention,?= =?UTF-8?q?=20it=E2=80=99s=20working=20for=20duration=20waits=20but=20not?= =?UTF-8?q?=20well=20for=20triggerAndWait=20yet?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../run-engine/src/engine/index.ts | 2 +- .../src/v3/runtime/managedRuntimeManager.ts | 75 +++++++++++-------- packages/core/src/v3/workers/taskExecutor.ts | 8 +- .../hello-world/src/trigger/parallel-waits.ts | 12 +-- 4 files changed, 54 insertions(+), 43 deletions(-) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 04c0b71039..f9ad9d89fb 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -2857,7 +2857,7 @@ export class RunEngine { const retriableError = shouldRetryError(taskRunErrorEnhancer(completion.error)); const permanentlyFailRun = async (run?: { status: TaskRunStatus; spanId: string }) => { - // Emit and event so we can complete any spans of stalled executions + // Emit an event so we can complete any spans of stalled executions if (forceRequeue && run) { this.eventBus.emit("runAttemptFailed", { time: failedAt, diff --git a/packages/core/src/v3/runtime/managedRuntimeManager.ts b/packages/core/src/v3/runtime/managedRuntimeManager.ts index 528d5a8755..67ef064498 100644 --- a/packages/core/src/v3/runtime/managedRuntimeManager.ts +++ b/packages/core/src/v3/runtime/managedRuntimeManager.ts @@ -10,6 +10,7 @@ import { } from "../schemas/index.js"; import { ExecutorToWorkerProcessConnection } from "../zodIpc.js"; import { RuntimeManager } from "./manager.js"; +import { preventMultipleWaits } from "./preventMultipleWaits.js"; type Resolver = (value: CompletedWaitpoint) => void; @@ -19,6 +20,8 @@ export class ManagedRuntimeManager implements RuntimeManager { // Maps a waitpoint ID to a wait ID private readonly resolversByWaitpoint: Map = new Map(); + private _preventMultipleWaits = preventMultipleWaits(); + constructor( private ipc: ExecutorToWorkerProcessConnection, private showLogs: boolean @@ -36,14 +39,16 @@ export class ManagedRuntimeManager implements RuntimeManager { } async waitForTask(params: { id: string; ctx: TaskRunContext }): Promise { - const promise = new Promise((resolve) => { - this.resolversByWaitId.set(params.id, resolve); - }); + return this._preventMultipleWaits(async () => { + const promise = new Promise((resolve) => { + this.resolversByWaitId.set(params.id, resolve); + }); - const waitpoint = await promise; - const result = this.waitpointToTaskRunExecutionResult(waitpoint); + const waitpoint = await promise; + const result = this.waitpointToTaskRunExecutionResult(waitpoint); - return result; + return result; + }); } async waitForBatch(params: { @@ -51,25 +56,27 @@ export class ManagedRuntimeManager implements RuntimeManager { runCount: number; ctx: TaskRunContext; }): Promise { - if (!params.runCount) { - return Promise.resolve({ id: params.id, items: [] }); - } + return this._preventMultipleWaits(async () => { + if (!params.runCount) { + return Promise.resolve({ id: params.id, items: [] }); + } + + const promise = Promise.all( + Array.from({ length: params.runCount }, (_, index) => { + const resolverId = `${params.id}_${index}`; + return new Promise((resolve, reject) => { + this.resolversByWaitId.set(resolverId, resolve); + }); + }) + ); + + const waitpoints = await promise; - const promise = Promise.all( - Array.from({ length: params.runCount }, (_, index) => { - const resolverId = `${params.id}_${index}`; - return new Promise((resolve, reject) => { - this.resolversByWaitId.set(resolverId, resolve); - }); - }) - ); - - const waitpoints = await promise; - - return { - id: params.id, - items: waitpoints.map(this.waitpointToTaskRunExecutionResult), - }; + return { + id: params.id, + items: waitpoints.map(this.waitpointToTaskRunExecutionResult), + }; + }); } async waitForWaitpoint({ @@ -79,17 +86,19 @@ export class ManagedRuntimeManager implements RuntimeManager { waitpointFriendlyId: string; finishDate?: Date; }): Promise { - const promise = new Promise((resolve) => { - this.resolversByWaitId.set(waitpointFriendlyId, resolve); - }); + return this._preventMultipleWaits(async () => { + const promise = new Promise((resolve) => { + this.resolversByWaitId.set(waitpointFriendlyId, resolve); + }); - const waitpoint = await promise; + const waitpoint = await promise; - return { - ok: !waitpoint.outputIsError, - output: waitpoint.output, - outputType: waitpoint.outputType, - }; + return { + ok: !waitpoint.outputIsError, + output: waitpoint.output, + outputType: waitpoint.outputType, + }; + }); } associateWaitWithWaitpoint(waitId: string, waitpointId: string) { diff --git a/packages/core/src/v3/workers/taskExecutor.ts b/packages/core/src/v3/workers/taskExecutor.ts index a28816b639..1482f2e908 100644 --- a/packages/core/src/v3/workers/taskExecutor.ts +++ b/packages/core/src/v3/workers/taskExecutor.ts @@ -540,6 +540,10 @@ export class TaskExecutor { return { status: "noop" }; } + if (isInternalError(error) && error.skipRetrying) { + return { status: "skipped", error }; + } + if ( error instanceof Error && (error.name === "AbortTaskRunError" || error.name === "TaskPayloadParsedError") @@ -547,10 +551,6 @@ export class TaskExecutor { return { status: "skipped" }; } - if (isInternalError(error) && error.skipRetrying) { - return { status: "skipped", error }; - } - if (execution.run.maxAttempts) { retry.maxAttempts = Math.max(execution.run.maxAttempts, 1); } diff --git a/references/hello-world/src/trigger/parallel-waits.ts b/references/hello-world/src/trigger/parallel-waits.ts index eddfcfcc6b..76a8eee0d9 100644 --- a/references/hello-world/src/trigger/parallel-waits.ts +++ b/references/hello-world/src/trigger/parallel-waits.ts @@ -6,12 +6,14 @@ import { childTask } from "./example.js"; */ export const parallelWaits = task({ id: "parallel-waits", - run: async (payload: any, { ctx }) => { + run: async ({ skipDuration = false }: { skipDuration?: boolean }) => { //parallel wait for 5/10 seconds - await Promise.all([ - wait.for({ seconds: 5 }), - wait.until({ date: new Date(Date.now() + 10_000) }), - ]); + if (!skipDuration) { + await Promise.all([ + wait.for({ seconds: 5 }), + wait.until({ date: new Date(Date.now() + 10_000) }), + ]); + } //parallel task call await Promise.all([ From 6fd1fd920ac8672231009af837b967d3bc584e1c Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 14:06:28 +0000 Subject: [PATCH 480/485] WIP post-merge conflicts --- apps/coordinator/src/checkpointer.ts | 2 +- .../app/v3/services/cancelTaskRunV1.server.ts | 12 ++- .../app/v3/services/triggerTaskV1.server.ts | 91 +++++++++++++------ apps/webapp/app/v3/taskEventStore.server.ts | 17 ++-- 4 files changed, 82 insertions(+), 40 deletions(-) diff --git a/apps/coordinator/src/checkpointer.ts b/apps/coordinator/src/checkpointer.ts index 269bf6d421..69c51e2fb5 100644 --- a/apps/coordinator/src/checkpointer.ts +++ b/apps/coordinator/src/checkpointer.ts @@ -1,5 +1,5 @@ import { ExponentialBackoff } from "@trigger.dev/core/v3/apps"; -import { testDockerCheckpoint } from "@trigger.dev/core/v3/apps"; +import { testDockerCheckpoint } from "@trigger.dev/core/v3/checkpoints"; import { nanoid } from "nanoid"; import fs from "node:fs/promises"; import { ChaosMonkey } from "./chaosMonkey"; diff --git a/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts b/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts index f25a0da86c..78aec652f7 100644 --- a/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts +++ b/apps/webapp/app/v3/services/cancelTaskRunV1.server.ts @@ -9,6 +9,7 @@ import { BaseService } from "./baseService.server"; import { CancelAttemptService } from "./cancelAttempt.server"; import { CancelTaskAttemptDependenciesService } from "./cancelTaskAttemptDependencies.server"; import { FinalizeTaskRunService } from "./finalizeTaskRun.server"; +import { getTaskEventStoreTableForRun } from "../taskEventStore.server"; type ExtendedTaskRun = Prisma.TaskRunGetPayload<{ include: { @@ -83,9 +84,14 @@ export class CancelTaskRunServiceV1 extends BaseService { }, }); - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: taskRun.friendlyId, - }); + const inProgressEvents = await eventRepository.queryIncompleteEvents( + getTaskEventStoreTableForRun(taskRun), + { + runId: taskRun.friendlyId, + }, + taskRun.createdAt, + taskRun.completedAt ?? undefined + ); logger.debug("Cancelling in-progress events", { inProgressEvents: inProgressEvents.map((event) => event.id), diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts index 480e8c251d..e8a89c00ec 100644 --- a/apps/webapp/app/v3/services/triggerTaskV1.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -3,6 +3,8 @@ import { packetRequiresOffloading, QueueOptions, SemanticInternalAttributes, + taskRunErrorToString, + taskRunErrorEnhancer, TriggerTaskRequestBody, } from "@trigger.dev/core/v3"; import { @@ -39,6 +41,8 @@ import { TriggerTaskServiceOptions, TriggerTaskServiceResult, } from "./triggerTask.server"; +import { getTaskEventStore } from "../taskEventStore.server"; +import { enqueueRun } from "./enqueueRun.server"; /** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV1 extends BaseService { @@ -168,6 +172,8 @@ export class TriggerTaskServiceV1 extends BaseService { taskIdentifier: true, rootTaskRunId: true, depth: true, + queueTimestamp: true, + queue: true, }, }, }, @@ -224,6 +230,8 @@ export class TriggerTaskServiceV1 extends BaseService { taskIdentifier: true, rootTaskRunId: true, depth: true, + queueTimestamp: true, + queue: true, }, }, }, @@ -276,7 +284,7 @@ export class TriggerTaskServiceV1 extends BaseService { : undefined; try { - return await eventRepository.traceEvent( + const result = await eventRepository.traceEvent( taskId, { context: options.traceContext, @@ -349,6 +357,12 @@ export class TriggerTaskServiceV1 extends BaseService { ? dependentBatchRun.dependentTaskAttempt.taskRun.depth + 1 : 0; + const queueTimestamp = + dependentAttempt?.taskRun.queueTimestamp ?? + dependentBatchRun?.dependentTaskAttempt?.taskRun.queueTimestamp ?? + delayUntil ?? + new Date(); + const taskRun = await tx.taskRun.create({ data: { status: delayUntil ? "DELAYED" : "PENDING", @@ -376,7 +390,9 @@ export class TriggerTaskServiceV1 extends BaseService { isTest: body.options?.test ?? false, delayUntil, queuedAt: delayUntil ? undefined : new Date(), + queueTimestamp, maxAttempts: body.options?.maxAttempts, + taskEventStore: getTaskEventStore(), ttl, tags: tagIds.length === 0 @@ -528,44 +544,61 @@ export class TriggerTaskServiceV1 extends BaseService { this._prisma ); - //release the concurrency for the env and org, if part of a (batch)triggerAndWait - if (dependentAttempt) { - const isSameTask = dependentAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency(dependentAttempt.taskRun.id, isSameTask); - } - if (dependentBatchRun?.dependentTaskAttempt) { - const isSameTask = - dependentBatchRun.dependentTaskAttempt.taskRun.taskIdentifier === taskId; - await marqs?.releaseConcurrency( - dependentBatchRun.dependentTaskAttempt.taskRun.id, - isSameTask - ); - } - if (!run) { return; } - // We need to enqueue the task run into the appropriate queue. This is done after the tx completes to prevent a race condition where the task run hasn't been created yet by the time we dequeue. + // Now enqueue the run if it's not delayed if (run.status === "PENDING") { - await marqs?.enqueueMessage( - environment, - run.queue, - run.id, - { - type: "EXECUTE", - taskIdentifier: taskId, - projectId: environment.projectId, - environmentId: environment.id, - environmentType: environment.type, - }, - body.options?.concurrencyKey - ); + const enqueueResult = await enqueueRun({ + env: environment, + run, + dependentRun: + dependentAttempt?.taskRun ?? dependentBatchRun?.dependentTaskAttempt?.taskRun, + }); + + if (!enqueueResult.ok) { + // Now we need to fail the run with enqueueResult.error and make sure and + // set the traced event to failed as well + await this._prisma.taskRun.update({ + where: { id: run.id }, + data: { + status: "SYSTEM_FAILURE", + completedAt: new Date(), + error: enqueueResult.error, + }, + }); + + event.failWithError(enqueueResult.error); + + return { + run, + isCached: false, + error: enqueueResult.error, + }; + } } return { run, isCached: false }; } ); + + if (result?.error) { + throw new ServiceValidationError( + taskRunErrorToString(taskRunErrorEnhancer(result.error)) + ); + } + + const run = result?.run; + + if (!run) { + return; + } + + return { + run, + isCached: result?.isCached, + }; } catch (error) { // Detect a prisma transaction Unique constraint violation if (error instanceof Prisma.PrismaClientKnownRequestError) { diff --git a/apps/webapp/app/v3/taskEventStore.server.ts b/apps/webapp/app/v3/taskEventStore.server.ts index 2a677101c2..ec66fd8ce1 100644 --- a/apps/webapp/app/v3/taskEventStore.server.ts +++ b/apps/webapp/app/v3/taskEventStore.server.ts @@ -20,6 +20,7 @@ export type TraceEvent = Pick< | "level" | "events" | "environmentType" + | "isDebug" >; export type TaskEventStoreTable = "taskEvent" | "taskEventPartitioned"; @@ -122,7 +123,7 @@ export class TaskEventStore { ) { if (table === "taskEventPartitioned") { return await this.readReplica.$queryRaw` - SELECT + SELECT "spanId", "parentId", "runId", @@ -136,11 +137,12 @@ export class TaskEventStore { "isCancelled", level, events, - "environmentType" + "environmentType", + "isDebug" FROM "TaskEventPartitioned" - WHERE - "traceId" = ${traceId} - AND "createdAt" >= ${startCreatedAt.toISOString()}::timestamp + WHERE + "traceId" = ${traceId} + AND "createdAt" >= ${startCreatedAt.toISOString()}::timestamp AND "createdAt" < ${(endCreatedAt ? new Date(endCreatedAt.getTime() + env.TASK_EVENT_PARTITIONED_WINDOW_IN_SECONDS * 1000) : new Date() @@ -150,7 +152,7 @@ export class TaskEventStore { `; } else { return await this.readReplica.$queryRaw` - SELECT + SELECT id, "spanId", "parentId", @@ -165,7 +167,8 @@ export class TaskEventStore { "isCancelled", level, events, - "environmentType" + "environmentType", + "isDebug" FROM "TaskEvent" WHERE "traceId" = ${traceId} ORDER BY "startTime" ASC From fe2fb82c7c3463aab79a3d063cb06b04fa9e1e93 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 14:08:38 +0000 Subject: [PATCH 481/485] Set taskEventStore column in the new engine --- apps/webapp/app/v3/services/triggerTaskV2.server.ts | 2 ++ internal-packages/run-engine/src/engine/index.ts | 2 ++ internal-packages/run-engine/src/engine/types.ts | 1 + 3 files changed, 5 insertions(+) diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index e9447a99c2..dacb7e1dee 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -31,6 +31,7 @@ import { TriggerTaskServiceResult, } from "./triggerTask.server"; import { WorkerGroupService } from "./worker/workerGroupService.server"; +import { getTaskEventStore } from "../taskEventStore.server"; /** @deprecated Use TriggerTaskService in `triggerTask.server.ts` instead. */ export class TriggerTaskServiceV2 extends WithRunEngine { @@ -355,6 +356,7 @@ export class TriggerTaskServiceV2 extends WithRunEngine { delayUntil, queuedAt: delayUntil ? undefined : new Date(), maxAttempts: body.options?.maxAttempts, + taskEventStore: getTaskEventStore(), ttl, tags, oneTimeUseToken: options.oneTimeUseToken, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index f9ad9d89fb..3a13ba4e81 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -246,6 +246,7 @@ export class RunEngine { delayUntil, queuedAt, maxAttempts, + taskEventStore, priorityMs, ttl, tags, @@ -329,6 +330,7 @@ export class RunEngine { delayUntil, queuedAt, maxAttempts, + taskEventStore, priorityMs, ttl, tags: diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index f8d416f0d0..a708fd1269 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -65,6 +65,7 @@ export type TriggerParams = { delayUntil?: Date; queuedAt?: Date; maxAttempts?: number; + taskEventStore: string; priorityMs?: number; ttl?: string; tags: { id: string; name: string }[]; From d4651444123ab0b4b6142a0fa42ae2ebfe97e8b7 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 14:40:22 +0000 Subject: [PATCH 482/485] Remove duplicate keys --- apps/webapp/app/v3/eventRepository.server.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/apps/webapp/app/v3/eventRepository.server.ts b/apps/webapp/app/v3/eventRepository.server.ts index 871b00b6b1..391b3be1b9 100644 --- a/apps/webapp/app/v3/eventRepository.server.ts +++ b/apps/webapp/app/v3/eventRepository.server.ts @@ -1037,8 +1037,6 @@ export class EventRepository { parentId, tracestate, duration: options.incomplete ? 0 : duration, - isPartial: options.incomplete, - isError: options.isError, isPartial: failedWithError ? false : options.incomplete, isError: !!failedWithError, message: message, From 897b512051d57ecaa06a83402d5a70df628b6111 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 15:23:27 +0000 Subject: [PATCH 483/485] Post-merge fixes --- .../webapp/app/v3/runEngineHandlers.server.ts | 221 ++++--- .../app/v3/services/batchTriggerV4.server.ts | 2 + .../changeCurrentDeployment.server.ts | 2 +- .../worker/workerGroupTokenService.server.ts | 1 - apps/webapp/test/GCRARateLimiter.test.ts | 152 +++-- .../webapp/test/fairDequeuingStrategy.test.ts | 596 +++++++++--------- apps/webapp/test/realtimeClient.test.ts | 13 +- .../run-engine/src/engine/errors.ts | 2 + .../run-engine/src/engine/eventBus.ts | 21 +- .../run-engine/src/engine/index.ts | 45 +- 10 files changed, 627 insertions(+), 428 deletions(-) diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 2eaf53dae6..4bd833974f 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -1,4 +1,4 @@ -import { prisma } from "~/db.server"; +import { $replica, prisma } from "~/db.server"; import { createExceptionPropertiesFromError, eventRepository, @@ -16,23 +16,30 @@ import { RunId } from "@trigger.dev/core/v3/apps"; import { updateMetadataService } from "~/services/metadata/updateMetadata.server"; import { findEnvironmentFromRun } from "~/models/runtimeEnvironment.server"; import { env } from "~/env.server"; +import { getTaskEventStoreTableForRun } from "./taskEventStore.server"; export function registerRunEngineEventBusHandlers() { engine.eventBus.on("runSucceeded", async ({ time, run }) => { try { - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: false, - output: - run.outputType === "application/store" || run.outputType === "text/plain" - ? run.output - : run.output - ? (safeJsonParse(run.output) as Attributes) - : undefined, - outputType: run.outputType, - }, - }); + const completedEvent = await eventRepository.completeEvent( + getTaskEventStoreTableForRun(run), + run.spanId, + run.createdAt, + run.completedAt ?? undefined, + { + endTime: time, + attributes: { + isError: false, + output: + run.outputType === "application/store" || run.outputType === "text/plain" + ? run.output + : run.output + ? (safeJsonParse(run.output) as Attributes) + : undefined, + outputType: run.outputType, + }, + } + ); if (!completedEvent) { logger.error("[runSucceeded] Failed to complete event for unknown reason", { @@ -69,21 +76,29 @@ export function registerRunEngineEventBusHandlers() { const sanitizedError = sanitizeError(run.error); const exception = createExceptionPropertiesFromError(sanitizedError); - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception, - }, + const eventStore = getTaskEventStoreTableForRun(run); + + const completedEvent = await eventRepository.completeEvent( + eventStore, + run.spanId, + run.createdAt, + run.completedAt ?? undefined, + { + endTime: time, + attributes: { + isError: true, }, - ], - }); + events: [ + { + name: "exception", + time, + properties: { + exception, + }, + }, + ], + } + ); if (!completedEvent) { logger.error("[runFailed] Failed to complete event for unknown reason", { @@ -93,28 +108,39 @@ export function registerRunEngineEventBusHandlers() { return; } - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: completedEvent?.runId, - }); + const inProgressEvents = await eventRepository.queryIncompleteEvents( + eventStore, + { + runId: completedEvent?.runId, + }, + run.createdAt, + run.completedAt ?? undefined + ); await Promise.all( inProgressEvents.map((event) => { try { - const completedEvent = eventRepository.completeEvent(event.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception, - }, + const completedEvent = eventRepository.completeEvent( + eventStore, + run.spanId, + run.createdAt, + run.completedAt ?? undefined, + { + endTime: time, + attributes: { + isError: true, }, - ], - }); + events: [ + { + name: "exception", + time, + properties: { + exception, + }, + }, + ], + } + ); if (!completedEvent) { logger.error("[runFailed] Failed to complete in-progress event for unknown reason", { @@ -147,13 +173,19 @@ export function registerRunEngineEventBusHandlers() { try { const sanitizedError = sanitizeError(run.error); const exception = createExceptionPropertiesFromError(sanitizedError); + const eventStore = getTaskEventStoreTableForRun(run); - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: RunId.toFriendlyId(run.id), - spanId: { - not: run.spanId, + const inProgressEvents = await eventRepository.queryIncompleteEvents( + eventStore, + { + runId: RunId.toFriendlyId(run.id), + spanId: { + not: run.spanId, + }, }, - }); + run.createdAt, + run.completedAt ?? undefined + ); await Promise.all( inProgressEvents.map((event) => { @@ -173,48 +205,80 @@ export function registerRunEngineEventBusHandlers() { } }); - engine.eventBus.on("cachedRunCompleted", async ({ time, spanId, hasError }) => { + engine.eventBus.on("cachedRunCompleted", async ({ time, span, blockedRunId, hasError }) => { try { - const completedEvent = await eventRepository.completeEvent(spanId, { - endTime: time, - attributes: { - isError: hasError, + const blockedRun = await $replica.taskRun.findFirst({ + select: { + taskEventStore: true, + }, + where: { + id: blockedRunId, }, }); + if (!blockedRun) { + logger.error("[cachedRunCompleted] Blocked run not found", { + blockedRunId, + }); + return; + } + + const eventStore = getTaskEventStoreTableForRun(blockedRun); + + const completedEvent = await eventRepository.completeEvent( + eventStore, + span.id, + span.createdAt, + time, + { + endTime: time, + attributes: { + isError: hasError, + }, + } + ); + if (!completedEvent) { logger.error("[cachedRunCompleted] Failed to complete event for unknown reason", { - spanId, + span, }); return; } } catch (error) { logger.error("[cachedRunCompleted] Failed to complete event for unknown reason", { error: error instanceof Error ? error.message : error, - spanId, + span, }); } }); engine.eventBus.on("runExpired", async ({ time, run }) => { try { - const completedEvent = await eventRepository.completeEvent(run.spanId, { - endTime: time, - attributes: { - isError: true, - }, - events: [ - { - name: "exception", - time, - properties: { - exception: { - message: `Run expired because the TTL (${run.ttl}) was reached`, + const eventStore = getTaskEventStoreTableForRun(run); + + const completedEvent = await eventRepository.completeEvent( + eventStore, + run.spanId, + run.createdAt, + run.completedAt ?? undefined, + { + endTime: time, + attributes: { + isError: true, + }, + events: [ + { + name: "exception", + time, + properties: { + exception: { + message: `Run expired because the TTL (${run.ttl}) was reached`, + }, }, }, - }, - ], - }); + ], + } + ); if (!completedEvent) { logger.error("[runFailed] Failed to complete event for unknown reason", { @@ -234,9 +298,16 @@ export function registerRunEngineEventBusHandlers() { engine.eventBus.on("runCancelled", async ({ time, run }) => { try { - const inProgressEvents = await eventRepository.queryIncompleteEvents({ - runId: run.friendlyId, - }); + const eventStore = getTaskEventStoreTableForRun(run); + + const inProgressEvents = await eventRepository.queryIncompleteEvents( + eventStore, + { + runId: run.friendlyId, + }, + run.createdAt, + run.completedAt ?? undefined + ); await Promise.all( inProgressEvents.map((event) => { diff --git a/apps/webapp/app/v3/services/batchTriggerV4.server.ts b/apps/webapp/app/v3/services/batchTriggerV4.server.ts index 94a749d439..aaa945f915 100644 --- a/apps/webapp/app/v3/services/batchTriggerV4.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerV4.server.ts @@ -171,6 +171,7 @@ export class BatchTriggerV4Service extends WithRunEngine { batchId: batch.id, environmentId: environment.id, projectId: environment.projectId, + organizationId: environment.organizationId, }); } @@ -262,6 +263,7 @@ export class BatchTriggerV4Service extends WithRunEngine { batchId: batch.id, environmentId: environment.id, projectId: environment.projectId, + organizationId: environment.organizationId, tx, }); } diff --git a/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts b/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts index 9a28fc503a..a5740bfe90 100644 --- a/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts +++ b/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts @@ -1,8 +1,8 @@ import { WorkerDeployment } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "~/consts"; import { BaseService, ServiceValidationError } from "./baseService.server"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; import { compareDeploymentVersions } from "../utils/deploymentVersions"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; export type ChangeCurrentDeploymentDirection = "promote" | "rollback"; diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index ef0bf2e7be..1c58efcf6a 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -17,7 +17,6 @@ import { StartRunAttemptResult, ExecutionResult, MachinePreset, - WaitForDurationResult, MachineResources, CheckpointInput, } from "@trigger.dev/core/v3"; diff --git a/apps/webapp/test/GCRARateLimiter.test.ts b/apps/webapp/test/GCRARateLimiter.test.ts index 9c645310c0..95f0e6118b 100644 --- a/apps/webapp/test/GCRARateLimiter.test.ts +++ b/apps/webapp/test/GCRARateLimiter.test.ts @@ -2,12 +2,15 @@ import { redisTest } from "@internal/testcontainers"; import { describe, expect, vi } from "vitest"; import { GCRARateLimiter } from "../app/v3/GCRARateLimiter.server.js"; // adjust the import as needed +import Redis from "ioredis"; // Extend the timeout to 30 seconds (as in your redis tests) vi.setConfig({ testTimeout: 30_000 }); describe("GCRARateLimiter", () => { - redisTest("should allow a single request when under the rate limit", async ({ redis }) => { + redisTest("should allow a single request when under the rate limit", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const limiter = new GCRARateLimiter({ redis, emissionInterval: 1000, // 1 request per second on average @@ -21,7 +24,9 @@ describe("GCRARateLimiter", () => { redisTest( "should allow bursts up to the configured limit and then reject further requests", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const limiter = new GCRARateLimiter({ redis, emissionInterval: 1000, @@ -45,55 +50,67 @@ describe("GCRARateLimiter", () => { } ); - redisTest("should allow a request after the required waiting period", async ({ redis }) => { - const limiter = new GCRARateLimiter({ - redis, - emissionInterval: 1000, - burstTolerance: 3000, - keyPrefix: "test:ratelimit:", - }); + redisTest( + "should allow a request after the required waiting period", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - // Exhaust burst capacity with 4 rapid calls. - await limiter.check("user:wait"); - await limiter.check("user:wait"); - await limiter.check("user:wait"); - await limiter.check("user:wait"); + const limiter = new GCRARateLimiter({ + redis, + emissionInterval: 1000, + burstTolerance: 3000, + keyPrefix: "test:ratelimit:", + }); - // The 5th call should be rejected. - const rejection = await limiter.check("user:wait"); - expect(rejection.allowed).toBe(false); - expect(rejection.retryAfter).toBeGreaterThan(0); + // Exhaust burst capacity with 4 rapid calls. + await limiter.check("user:wait"); + await limiter.check("user:wait"); + await limiter.check("user:wait"); + await limiter.check("user:wait"); - // Wait for the period specified in retryAfter (plus a small buffer) - await new Promise((resolve) => setTimeout(resolve, rejection.retryAfter! + 50)); + // The 5th call should be rejected. + const rejection = await limiter.check("user:wait"); + expect(rejection.allowed).toBe(false); + expect(rejection.retryAfter).toBeGreaterThan(0); - // Now the next call should be allowed. - const allowedAfterWait = await limiter.check("user:wait"); - expect(allowedAfterWait.allowed).toBe(true); - }); + // Wait for the period specified in retryAfter (plus a small buffer) + await new Promise((resolve) => setTimeout(resolve, rejection.retryAfter! + 50)); - redisTest("should rate limit independently for different identifiers", async ({ redis }) => { - const limiter = new GCRARateLimiter({ - redis, - emissionInterval: 1000, - burstTolerance: 3000, - keyPrefix: "test:ratelimit:", - }); + // Now the next call should be allowed. + const allowedAfterWait = await limiter.check("user:wait"); + expect(allowedAfterWait.allowed).toBe(true); + } + ); - // For "user:independent", exhaust burst capacity. - await limiter.check("user:independent"); - await limiter.check("user:independent"); - await limiter.check("user:independent"); - await limiter.check("user:independent"); - const rejected = await limiter.check("user:independent"); - expect(rejected.allowed).toBe(false); - - // A different identifier should start fresh. - const fresh = await limiter.check("user:different"); - expect(fresh.allowed).toBe(true); - }); + redisTest( + "should rate limit independently for different identifiers", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + + const limiter = new GCRARateLimiter({ + redis, + emissionInterval: 1000, + burstTolerance: 3000, + keyPrefix: "test:ratelimit:", + }); + + // For "user:independent", exhaust burst capacity. + await limiter.check("user:independent"); + await limiter.check("user:independent"); + await limiter.check("user:independent"); + await limiter.check("user:independent"); + const rejected = await limiter.check("user:independent"); + expect(rejected.allowed).toBe(false); + + // A different identifier should start fresh. + const fresh = await limiter.check("user:different"); + expect(fresh.allowed).toBe(true); + } + ); + + redisTest("should gradually reduce retryAfter with time", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - redisTest("should gradually reduce retryAfter with time", async ({ redis }) => { const limiter = new GCRARateLimiter({ redis, emissionInterval: 1000, @@ -120,7 +137,9 @@ describe("GCRARateLimiter", () => { expect(secondRetry).toBeLessThan(firstRetry); }); - redisTest("should expire the key after the TTL", async ({ redis }) => { + redisTest("should expire the key after the TTL", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + // For this test, override keyExpiration to a short value. const keyExpiration = 1500; // 1.5 seconds TTL const limiter = new GCRARateLimiter({ @@ -147,7 +166,9 @@ describe("GCRARateLimiter", () => { expect(stored).toBeNull(); }); - redisTest("should not share state across different key prefixes", async ({ redis }) => { + redisTest("should not share state across different key prefixes", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const limiter1 = new GCRARateLimiter({ redis, emissionInterval: 1000, @@ -174,25 +195,32 @@ describe("GCRARateLimiter", () => { expect(result2.allowed).toBe(true); }); - redisTest("should increment TAT correctly on sequential allowed requests", async ({ redis }) => { - const limiter = new GCRARateLimiter({ - redis, - emissionInterval: 1000, - burstTolerance: 3000, - keyPrefix: "test:ratelimit:", - }); + redisTest( + "should increment TAT correctly on sequential allowed requests", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - // The first request should be allowed. - const r1 = await limiter.check("user:sequential"); - expect(r1.allowed).toBe(true); + const limiter = new GCRARateLimiter({ + redis, + emissionInterval: 1000, + burstTolerance: 3000, + keyPrefix: "test:ratelimit:", + }); - // Wait a bit longer than the emission interval. - await new Promise((resolve) => setTimeout(resolve, 1100)); - const r2 = await limiter.check("user:sequential"); - expect(r2.allowed).toBe(true); - }); + // The first request should be allowed. + const r1 = await limiter.check("user:sequential"); + expect(r1.allowed).toBe(true); + + // Wait a bit longer than the emission interval. + await new Promise((resolve) => setTimeout(resolve, 1100)); + const r2 = await limiter.check("user:sequential"); + expect(r2.allowed).toBe(true); + } + ); + + redisTest("should throw an error if redis command fails", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - redisTest("should throw an error if redis command fails", async ({ redis }) => { const limiter = new GCRARateLimiter({ redis, emissionInterval: 1000, diff --git a/apps/webapp/test/fairDequeuingStrategy.test.ts b/apps/webapp/test/fairDequeuingStrategy.test.ts index 94f9f4a3e7..109e49168e 100644 --- a/apps/webapp/test/fairDequeuingStrategy.test.ts +++ b/apps/webapp/test/fairDequeuingStrategy.test.ts @@ -10,13 +10,16 @@ import { import { trace } from "@opentelemetry/api"; import { EnvQueues } from "~/v3/marqs/types.js"; import { MARQS_RESUME_PRIORITY_TIMESTAMP_OFFSET } from "~/v3/marqs/constants.server.js"; +import Redis from "ioredis"; const tracer = trace.getTracer("test"); vi.setConfig({ testTimeout: 30_000 }); // 30 seconds timeout describe("FairDequeuingStrategy", () => { - redisTest("should distribute a single queue from a single env", async ({ redis }) => { + redisTest("should distribute a single queue from a single env", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -46,7 +49,9 @@ describe("FairDequeuingStrategy", () => { }); }); - redisTest("should respect env concurrency limits", async ({ redis }) => { + redisTest("should respect env concurrency limits", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -79,7 +84,9 @@ describe("FairDequeuingStrategy", () => { redisTest( "should give extra concurrency when the env has reserve concurrency", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -118,7 +125,9 @@ describe("FairDequeuingStrategy", () => { } ); - redisTest("should respect parentQueueLimit", async ({ redis }) => { + redisTest("should respect parentQueueLimit", async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -173,255 +182,267 @@ describe("FairDequeuingStrategy", () => { }); }); - redisTest("should reuse snapshots across calls for the same consumer", async ({ redis }) => { - const keyProducer = createKeyProducer("test"); - const strategy = new FairDequeuingStrategy({ - tracer, - redis, - keys: keyProducer, - defaultEnvConcurrency: 5, - parentQueueLimit: 10, - seed: "test-seed-reuse-1", - reuseSnapshotCount: 1, - }); + redisTest( + "should reuse snapshots across calls for the same consumer", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - const now = Date.now(); + const keyProducer = createKeyProducer("test"); + const strategy = new FairDequeuingStrategy({ + tracer, + redis, + keys: keyProducer, + defaultEnvConcurrency: 5, + parentQueueLimit: 10, + seed: "test-seed-reuse-1", + reuseSnapshotCount: 1, + }); - await setupQueue({ - redis, - keyProducer, - parentQueue: "parent-queue", - score: now - 3000, - queueId: "queue-1", - orgId: "org-1", - envId: "env-1", - }); + const now = Date.now(); - await setupQueue({ - redis, - keyProducer, - parentQueue: "parent-queue", - score: now - 2000, - queueId: "queue-2", - orgId: "org-2", - envId: "env-2", - }); + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 3000, + queueId: "queue-1", + orgId: "org-1", + envId: "env-1", + }); - await setupQueue({ - redis, - keyProducer, - parentQueue: "parent-queue", - score: now - 1000, - queueId: "queue-3", - orgId: "org-3", - envId: "env-3", - }); + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 2000, + queueId: "queue-2", + orgId: "org-2", + envId: "env-2", + }); - const startDistribute1 = performance.now(); + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000, + queueId: "queue-3", + orgId: "org-3", + envId: "env-3", + }); - const envResult = await strategy.distributeFairQueuesFromParentQueue( - "parent-queue", - "consumer-1" - ); - const result = flattenResults(envResult); + const startDistribute1 = performance.now(); - const distribute1Duration = performance.now() - startDistribute1; + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + const result = flattenResults(envResult); - console.log("First distribution took", distribute1Duration, "ms"); + const distribute1Duration = performance.now() - startDistribute1; - expect(result).toHaveLength(3); - // Should only get the two oldest queues - const queue1 = keyProducer.queueKey("org-1", "env-1", "queue-1"); - const queue2 = keyProducer.queueKey("org-2", "env-2", "queue-2"); - const queue3 = keyProducer.queueKey("org-3", "env-3", "queue-3"); - expect(result).toEqual([queue2, queue1, queue3]); + console.log("First distribution took", distribute1Duration, "ms"); - const startDistribute2 = performance.now(); + expect(result).toHaveLength(3); + // Should only get the two oldest queues + const queue1 = keyProducer.queueKey("org-1", "env-1", "queue-1"); + const queue2 = keyProducer.queueKey("org-2", "env-2", "queue-2"); + const queue3 = keyProducer.queueKey("org-3", "env-3", "queue-3"); + expect(result).toEqual([queue2, queue1, queue3]); - const result2 = await strategy.distributeFairQueuesFromParentQueue( - "parent-queue", - "consumer-1" - ); + const startDistribute2 = performance.now(); - const distribute2Duration = performance.now() - startDistribute2; + const result2 = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); - console.log("Second distribution took", distribute2Duration, "ms"); + const distribute2Duration = performance.now() - startDistribute2; - // Make sure the second call is more than 9 times faster than the first - expect(distribute2Duration).toBeLessThan(distribute1Duration / 9); + console.log("Second distribution took", distribute2Duration, "ms"); - const startDistribute3 = performance.now(); + // Make sure the second call is more than 9 times faster than the first + expect(distribute2Duration).toBeLessThan(distribute1Duration / 9); - const result3 = await strategy.distributeFairQueuesFromParentQueue( - "parent-queue", - "consumer-1" - ); + const startDistribute3 = performance.now(); - const distribute3Duration = performance.now() - startDistribute3; + const result3 = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); - console.log("Third distribution took", distribute3Duration, "ms"); + const distribute3Duration = performance.now() - startDistribute3; - // Make sure the third call is more than 4 times the second - expect(distribute3Duration).toBeGreaterThan(distribute2Duration * 4); - }); + console.log("Third distribution took", distribute3Duration, "ms"); - redisTest("should fairly distribute queues across environments over time", async ({ redis }) => { - const keyProducer = createKeyProducer("test"); - const strategy = new FairDequeuingStrategy({ - tracer, - redis, - keys: keyProducer, - defaultEnvConcurrency: 5, - parentQueueLimit: 100, - seed: "test-seed-5", - }); + // Make sure the third call is more than 4 times the second + expect(distribute3Duration).toBeGreaterThan(distribute2Duration * 4); + } + ); - const now = Date.now(); + redisTest( + "should fairly distribute queues across environments over time", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + + const keyProducer = createKeyProducer("test"); + const strategy = new FairDequeuingStrategy({ + tracer, + redis, + keys: keyProducer, + defaultEnvConcurrency: 5, + parentQueueLimit: 100, + seed: "test-seed-5", + }); - // Test configuration - const orgs = ["org-1", "org-2", "org-3"]; - const envsPerOrg = 3; // Each org has 3 environments - const queuesPerEnv = 5; // Each env has 5 queues - const iterations = 1000; + const now = Date.now(); - // Setup queues - for (const orgId of orgs) { - for (let envNum = 1; envNum <= envsPerOrg; envNum++) { - const envId = `env-${orgId}-${envNum}`; + // Test configuration + const orgs = ["org-1", "org-2", "org-3"]; + const envsPerOrg = 3; // Each org has 3 environments + const queuesPerEnv = 5; // Each env has 5 queues + const iterations = 1000; - for (let queueNum = 1; queueNum <= queuesPerEnv; queueNum++) { - await setupQueue({ + // Setup queues + for (const orgId of orgs) { + for (let envNum = 1; envNum <= envsPerOrg; envNum++) { + const envId = `env-${orgId}-${envNum}`; + + for (let queueNum = 1; queueNum <= queuesPerEnv; queueNum++) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + // Vary the ages slightly + score: now - Math.random() * 10000, + queueId: `queue-${orgId}-${envId}-${queueNum}`, + orgId, + envId, + }); + } + + // Setup reasonable concurrency limits + await setupConcurrency({ redis, keyProducer, - parentQueue: "parent-queue", - // Vary the ages slightly - score: now - Math.random() * 10000, - queueId: `queue-${orgId}-${envId}-${queueNum}`, - orgId, - envId, + env: { id: envId, currentConcurrency: 1, limit: 5 }, }); } - - // Setup reasonable concurrency limits - await setupConcurrency({ - redis, - keyProducer, - env: { id: envId, currentConcurrency: 1, limit: 5 }, - }); } - } - // Track distribution statistics - type PositionStats = { - firstPosition: number; // Count of times this env/org was first - positionSums: number; // Sum of positions (for averaging) - appearances: number; // Total number of appearances - }; - - const envStats: Record = {}; - const orgStats: Record = {}; - - // Initialize stats objects - for (const orgId of orgs) { - orgStats[orgId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; - for (let envNum = 1; envNum <= envsPerOrg; envNum++) { - const envId = `env-${orgId}-${envNum}`; - envStats[envId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; - } - } + // Track distribution statistics + type PositionStats = { + firstPosition: number; // Count of times this env/org was first + positionSums: number; // Sum of positions (for averaging) + appearances: number; // Total number of appearances + }; - // Run multiple iterations - for (let i = 0; i < iterations; i++) { - const envResult = await strategy.distributeFairQueuesFromParentQueue( - "parent-queue", - `consumer-${i % 3}` // Simulate 3 different consumers - ); - const result = flattenResults(envResult); + const envStats: Record = {}; + const orgStats: Record = {}; - // Track positions of queues - result.forEach((queueId, position) => { - const orgId = keyProducer.orgIdFromQueue(queueId); - const envId = keyProducer.envIdFromQueue(queueId); + // Initialize stats objects + for (const orgId of orgs) { + orgStats[orgId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; + for (let envNum = 1; envNum <= envsPerOrg; envNum++) { + const envId = `env-${orgId}-${envNum}`; + envStats[envId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; + } + } - // Update org stats - orgStats[orgId].appearances++; - orgStats[orgId].positionSums += position; - if (position === 0) orgStats[orgId].firstPosition++; + // Run multiple iterations + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + `consumer-${i % 3}` // Simulate 3 different consumers + ); + const result = flattenResults(envResult); - // Update env stats - envStats[envId].appearances++; - envStats[envId].positionSums += position; - if (position === 0) envStats[envId].firstPosition++; - }); - } + // Track positions of queues + result.forEach((queueId, position) => { + const orgId = keyProducer.orgIdFromQueue(queueId); + const envId = keyProducer.envIdFromQueue(queueId); + + // Update org stats + orgStats[orgId].appearances++; + orgStats[orgId].positionSums += position; + if (position === 0) orgStats[orgId].firstPosition++; + + // Update env stats + envStats[envId].appearances++; + envStats[envId].positionSums += position; + if (position === 0) envStats[envId].firstPosition++; + }); + } - // Calculate and log statistics - console.log("\nOrganization Statistics:"); - for (const [orgId, stats] of Object.entries(orgStats)) { - const avgPosition = stats.positionSums / stats.appearances; - const firstPositionPercentage = (stats.firstPosition / iterations) * 100; - console.log(`${orgId}: + // Calculate and log statistics + console.log("\nOrganization Statistics:"); + for (const [orgId, stats] of Object.entries(orgStats)) { + const avgPosition = stats.positionSums / stats.appearances; + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + console.log(`${orgId}: First Position: ${firstPositionPercentage.toFixed(2)}% Average Position: ${avgPosition.toFixed(2)} Total Appearances: ${stats.appearances}`); - } + } - console.log("\nEnvironment Statistics:"); - for (const [envId, stats] of Object.entries(envStats)) { - const avgPosition = stats.positionSums / stats.appearances; - const firstPositionPercentage = (stats.firstPosition / iterations) * 100; - console.log(`${envId}: + console.log("\nEnvironment Statistics:"); + for (const [envId, stats] of Object.entries(envStats)) { + const avgPosition = stats.positionSums / stats.appearances; + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + console.log(`${envId}: First Position: ${firstPositionPercentage.toFixed(2)}% Average Position: ${avgPosition.toFixed(2)} Total Appearances: ${stats.appearances}`); - } + } - // Verify fairness of first position distribution - const expectedFirstPositionPercentage = 100 / orgs.length; - const firstPositionStdDevOrgs = calculateStandardDeviation( - Object.values(orgStats).map((stats) => (stats.firstPosition / iterations) * 100) - ); - - const expectedEnvFirstPositionPercentage = 100 / (orgs.length * envsPerOrg); - const firstPositionStdDevEnvs = calculateStandardDeviation( - Object.values(envStats).map((stats) => (stats.firstPosition / iterations) * 100) - ); - - // Assert reasonable fairness for first position - expect(firstPositionStdDevOrgs).toBeLessThan(5); // Allow 5% standard deviation for orgs - expect(firstPositionStdDevEnvs).toBeLessThan(5); // Allow 5% standard deviation for envs - - // Verify that each org and env gets a fair chance at first position - for (const [orgId, stats] of Object.entries(orgStats)) { - const firstPositionPercentage = (stats.firstPosition / iterations) * 100; - expect(firstPositionPercentage).toBeGreaterThan(expectedFirstPositionPercentage * 0.7); // Within 30% of expected - expect(firstPositionPercentage).toBeLessThan(expectedFirstPositionPercentage * 1.3); - } + // Verify fairness of first position distribution + const expectedFirstPositionPercentage = 100 / orgs.length; + const firstPositionStdDevOrgs = calculateStandardDeviation( + Object.values(orgStats).map((stats) => (stats.firstPosition / iterations) * 100) + ); - for (const [envId, stats] of Object.entries(envStats)) { - const firstPositionPercentage = (stats.firstPosition / iterations) * 100; - expect(firstPositionPercentage).toBeGreaterThan(expectedEnvFirstPositionPercentage * 0.7); // Within 30% of expected - expect(firstPositionPercentage).toBeLessThan(expectedEnvFirstPositionPercentage * 1.3); - } + const expectedEnvFirstPositionPercentage = 100 / (orgs.length * envsPerOrg); + const firstPositionStdDevEnvs = calculateStandardDeviation( + Object.values(envStats).map((stats) => (stats.firstPosition / iterations) * 100) + ); - // Verify average positions are reasonably distributed - const avgPositionsOrgs = Object.values(orgStats).map( - (stats) => stats.positionSums / stats.appearances - ); - const avgPositionsEnvs = Object.values(envStats).map( - (stats) => stats.positionSums / stats.appearances - ); + // Assert reasonable fairness for first position + expect(firstPositionStdDevOrgs).toBeLessThan(5); // Allow 5% standard deviation for orgs + expect(firstPositionStdDevEnvs).toBeLessThan(5); // Allow 5% standard deviation for envs - const avgPositionStdDevOrgs = calculateStandardDeviation(avgPositionsOrgs); - const avgPositionStdDevEnvs = calculateStandardDeviation(avgPositionsEnvs); + // Verify that each org and env gets a fair chance at first position + for (const [orgId, stats] of Object.entries(orgStats)) { + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + expect(firstPositionPercentage).toBeGreaterThan(expectedFirstPositionPercentage * 0.7); // Within 30% of expected + expect(firstPositionPercentage).toBeLessThan(expectedFirstPositionPercentage * 1.3); + } - expect(avgPositionStdDevOrgs).toBeLessThan(1); // Average positions should be fairly consistent - expect(avgPositionStdDevEnvs).toBeLessThan(1); - }); + for (const [envId, stats] of Object.entries(envStats)) { + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + expect(firstPositionPercentage).toBeGreaterThan(expectedEnvFirstPositionPercentage * 0.7); // Within 30% of expected + expect(firstPositionPercentage).toBeLessThan(expectedEnvFirstPositionPercentage * 1.3); + } + + // Verify average positions are reasonably distributed + const avgPositionsOrgs = Object.values(orgStats).map( + (stats) => stats.positionSums / stats.appearances + ); + const avgPositionsEnvs = Object.values(envStats).map( + (stats) => stats.positionSums / stats.appearances + ); + + const avgPositionStdDevOrgs = calculateStandardDeviation(avgPositionsOrgs); + const avgPositionStdDevEnvs = calculateStandardDeviation(avgPositionsEnvs); + + expect(avgPositionStdDevOrgs).toBeLessThan(1); // Average positions should be fairly consistent + expect(avgPositionStdDevEnvs).toBeLessThan(1); + } + ); redisTest( "should shuffle environments while maintaining age order within environments", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -523,7 +544,9 @@ describe("FairDequeuingStrategy", () => { redisTest( "should bias shuffling based on concurrency limits and available capacity", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const now = Date.now(); @@ -650,102 +673,109 @@ describe("FairDequeuingStrategy", () => { } ); - redisTest("should respect ageInfluence parameter for queue ordering", async ({ redis }) => { - const keyProducer = createKeyProducer("test"); - const now = Date.now(); + redisTest( + "should respect ageInfluence parameter for queue ordering", + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); - // Setup queues with different ages in the same environment - const queueAges = [ - { id: "queue-1", age: 5000 }, // oldest - { id: "queue-2", age: 3000 }, - { id: "queue-3", age: 1000 }, // newest - ]; + const keyProducer = createKeyProducer("test"); + const now = Date.now(); - // Helper function to run iterations with a specific age influence - async function runWithQueueAgeRandomization(queueAgeRandomization: number) { - const strategy = new FairDequeuingStrategy({ - tracer, - redis, - keys: keyProducer, - defaultEnvConcurrency: 5, - parentQueueLimit: 100, - seed: "fixed-seed", - biases: { - concurrencyLimitBias: 0, - availableCapacityBias: 0, - queueAgeRandomization, - }, - }); + // Setup queues with different ages in the same environment + const queueAges = [ + { id: "queue-1", age: 5000 }, // oldest + { id: "queue-2", age: 3000 }, + { id: "queue-3", age: 1000 }, // newest + ]; - const positionCounts: Record = { - "queue-1": [0, 0, 0], - "queue-2": [0, 0, 0], - "queue-3": [0, 0, 0], - }; + // Helper function to run iterations with a specific age influence + async function runWithQueueAgeRandomization(queueAgeRandomization: number) { + const strategy = new FairDequeuingStrategy({ + tracer, + redis, + keys: keyProducer, + defaultEnvConcurrency: 5, + parentQueueLimit: 100, + seed: "fixed-seed", + biases: { + concurrencyLimitBias: 0, + availableCapacityBias: 0, + queueAgeRandomization, + }, + }); - const iterations = 1000; - for (let i = 0; i < iterations; i++) { - const envResult = await strategy.distributeFairQueuesFromParentQueue( - "parent-queue", - "consumer-1" - ); - const result = flattenResults(envResult); + const positionCounts: Record = { + "queue-1": [0, 0, 0], + "queue-2": [0, 0, 0], + "queue-3": [0, 0, 0], + }; - result.forEach((queueId, position) => { - const baseQueueId = queueId.split(":").pop()!; - positionCounts[baseQueueId][position]++; - }); + const iterations = 1000; + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + const result = flattenResults(envResult); + + result.forEach((queueId, position) => { + const baseQueueId = queueId.split(":").pop()!; + positionCounts[baseQueueId][position]++; + }); + } + + return positionCounts; } - return positionCounts; - } + // Setup test data + for (const { id, age } of queueAges) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - age, + queueId: id, + orgId: "org-1", + envId: "env-1", + }); + } - // Setup test data - for (const { id, age } of queueAges) { - await setupQueue({ + await setupConcurrency({ redis, keyProducer, - parentQueue: "parent-queue", - score: now - age, - queueId: id, - orgId: "org-1", - envId: "env-1", + env: { id: "env-1", currentConcurrency: 0, limit: 5 }, }); - } - - await setupConcurrency({ - redis, - keyProducer, - env: { id: "env-1", currentConcurrency: 0, limit: 5 }, - }); - // Test with different age influence values - const strictAge = await runWithQueueAgeRandomization(0); // Strict age-based ordering - const mixed = await runWithQueueAgeRandomization(0.5); // Mix of age and random - const fullyRandom = await runWithQueueAgeRandomization(1); // Completely random + // Test with different age influence values + const strictAge = await runWithQueueAgeRandomization(0); // Strict age-based ordering + const mixed = await runWithQueueAgeRandomization(0.5); // Mix of age and random + const fullyRandom = await runWithQueueAgeRandomization(1); // Completely random - console.log("Distribution with strict age ordering (0.0):", strictAge); - console.log("Distribution with mixed ordering (0.5):", mixed); - console.log("Distribution with random ordering (1.0):", fullyRandom); + console.log("Distribution with strict age ordering (0.0):", strictAge); + console.log("Distribution with mixed ordering (0.5):", mixed); + console.log("Distribution with random ordering (1.0):", fullyRandom); - // With strict age ordering (0.0), oldest should always be first - expect(strictAge["queue-1"][0]).toBe(1000); // Always in first position - expect(strictAge["queue-3"][0]).toBe(0); // Never in first position + // With strict age ordering (0.0), oldest should always be first + expect(strictAge["queue-1"][0]).toBe(1000); // Always in first position + expect(strictAge["queue-3"][0]).toBe(0); // Never in first position - // With fully random (1.0), positions should still allow for some age bias - const randomFirstPositionSpread = Math.abs( - fullyRandom["queue-1"][0] - fullyRandom["queue-3"][0] - ); - expect(randomFirstPositionSpread).toBeLessThan(200); // Allow for larger spread in distribution + // With fully random (1.0), positions should still allow for some age bias + const randomFirstPositionSpread = Math.abs( + fullyRandom["queue-1"][0] - fullyRandom["queue-3"][0] + ); + expect(randomFirstPositionSpread).toBeLessThan(200); // Allow for larger spread in distribution - // With mixed (0.5), should show preference for age but not absolute - expect(mixed["queue-1"][0]).toBeGreaterThan(mixed["queue-3"][0]); // Older preferred - expect(mixed["queue-3"][0]).toBeGreaterThan(0); // But newer still gets chances - }); + // With mixed (0.5), should show preference for age but not absolute + expect(mixed["queue-1"][0]).toBeGreaterThan(mixed["queue-3"][0]); // Older preferred + expect(mixed["queue-3"][0]).toBeGreaterThan(0); // But newer still gets chances + } + ); redisTest( "should respect maximumEnvCount and select envs based on queue ages", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, @@ -874,7 +904,9 @@ describe("FairDequeuingStrategy", () => { redisTest( "should not overly bias picking environments when queue have priority offset ages", - async ({ redis }) => { + async ({ redisOptions }) => { + const redis = new Redis(redisOptions); + const keyProducer = createKeyProducer("test"); const strategy = new FairDequeuingStrategy({ tracer, diff --git a/apps/webapp/test/realtimeClient.test.ts b/apps/webapp/test/realtimeClient.test.ts index f8aab54fd0..5cfa8c39d9 100644 --- a/apps/webapp/test/realtimeClient.test.ts +++ b/apps/webapp/test/realtimeClient.test.ts @@ -1,12 +1,15 @@ import { containerWithElectricAndRedisTest } from "@internal/testcontainers"; import { expect, describe } from "vitest"; import { RealtimeClient } from "../app/services/realtimeClient.server.js"; +import Redis from "ioredis"; describe.skipIf(process.env.GITHUB_ACTIONS)("RealtimeClient", () => { containerWithElectricAndRedisTest( "Should only track concurrency for live requests", { timeout: 30_000 }, - async ({ redis, electricOrigin, prisma }) => { + async ({ redisOptions, electricOrigin, prisma }) => { + const redis = new Redis(redisOptions); + const client = new RealtimeClient({ electricOrigin, keyPrefix: "test:realtime", @@ -146,7 +149,9 @@ describe.skipIf(process.env.GITHUB_ACTIONS)("RealtimeClient", () => { containerWithElectricAndRedisTest( "Should support subscribing to a run tag", { timeout: 30_000 }, - async ({ redis, electricOrigin, prisma }) => { + async ({ redisOptions, electricOrigin, prisma }) => { + const redis = new Redis(redisOptions); + const client = new RealtimeClient({ electricOrigin, keyPrefix: "test:realtime", @@ -229,7 +234,9 @@ describe.skipIf(process.env.GITHUB_ACTIONS)("RealtimeClient", () => { containerWithElectricAndRedisTest( "Should adapt for older client versions", { timeout: 30_000 }, - async ({ redis, electricOrigin, prisma }) => { + async ({ redisOptions, electricOrigin, prisma }) => { + const redis = new Redis(redisOptions); + const client = new RealtimeClient({ electricOrigin, keyPrefix: "test:realtime", diff --git a/internal-packages/run-engine/src/engine/errors.ts b/internal-packages/run-engine/src/engine/errors.ts index 5ba494b4f5..33d9be6961 100644 --- a/internal-packages/run-engine/src/engine/errors.ts +++ b/internal-packages/run-engine/src/engine/errors.ts @@ -12,6 +12,8 @@ export function runStatusFromError(error: TaskRunError): TaskRunStatus { //"SYSTEM_FAILURE" should be used if it's an error from our system //e.g. a bug switch (error.code) { + case "RECURSIVE_WAIT_DEADLOCK": + return "COMPLETED_WITH_ERRORS"; case "TASK_RUN_CANCELLED": return "CANCELED"; case "MAX_DURATION_EXCEEDED": diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 59d8b0b956..871115ed6a 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -25,6 +25,9 @@ export type EventBusEvents = { spanId: string; error: TaskRunError; attemptNumber: number; + taskEventStore: string; + createdAt: Date; + completedAt: Date | null; }; }, ]; @@ -35,6 +38,9 @@ export type EventBusEvents = { id: string; spanId: string; ttl: string | null; + taskEventStore: string; + createdAt: Date; + completedAt: Date | null; }; }, ]; @@ -46,6 +52,9 @@ export type EventBusEvents = { spanId: string; output: string | undefined; outputType: string; + taskEventStore: string; + createdAt: Date; + completedAt: Date | null; }; }, ]; @@ -57,6 +66,9 @@ export type EventBusEvents = { status: TaskRunStatus; spanId: string; error: TaskRunError; + taskEventStore: string; + createdAt: Date; + completedAt: Date | null; }; }, ]; @@ -88,14 +100,21 @@ export type EventBusEvents = { friendlyId: string; spanId: string; error: TaskRunError; + taskEventStore: string; + createdAt: Date; + completedAt: Date | null; }; }, ]; cachedRunCompleted: [ { time: Date; - spanId: string; + span: { + id: string; + createdAt: Date; + }; hasError: boolean; + blockedRunId: string; }, ]; runMetadataUpdated: [ diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 3a13ba4e81..c22fcaf365 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1455,7 +1455,9 @@ export class RunEngine { attemptNumber: true, spanId: true, batchId: true, + createdAt: true, completedAt: true, + taskEventStore: true, runtimeEnvironment: { select: { organizationId: true, @@ -1525,6 +1527,9 @@ export class RunEngine { id: run.id, friendlyId: run.friendlyId, spanId: run.spanId, + taskEventStore: run.taskEventStore, + createdAt: run.createdAt, + completedAt: run.completedAt, error, }, }); @@ -2091,7 +2096,7 @@ export class RunEngine { // 1. Find the TaskRuns blocked by this waitpoint const affectedTaskRuns = await tx.taskRunWaitpoint.findMany({ where: { waitpointId: id }, - select: { taskRunId: true, spanIdToComplete: true }, + select: { taskRunId: true, spanIdToComplete: true, createdAt: true }, }); if (affectedTaskRuns.length === 0) { @@ -2157,7 +2162,11 @@ export class RunEngine { if (run.spanIdToComplete) { this.eventBus.emit("cachedRunCompleted", { time: new Date(), - spanId: run.spanIdToComplete, + span: { + id: run.spanIdToComplete, + createdAt: run.createdAt, + }, + blockedRunId: run.taskRunId, hasError: output?.isError ?? false, }); } @@ -2610,6 +2619,9 @@ export class RunEngine { organizationId: true, }, }, + createdAt: true, + completedAt: true, + taskEventStore: true, }, }); @@ -2748,6 +2760,9 @@ export class RunEngine { }, }, batchId: true, + createdAt: true, + completedAt: true, + taskEventStore: true, }, }); const newSnapshot = await getLatestExecutionSnapshot(prisma, runId); @@ -2783,6 +2798,9 @@ export class RunEngine { spanId: run.spanId, output: completion.output, outputType: completion.outputType, + createdAt: run.createdAt, + completedAt: run.completedAt, + taskEventStore: run.taskEventStore, }, }); @@ -2858,7 +2876,13 @@ export class RunEngine { const error = sanitizeError(completion.error); const retriableError = shouldRetryError(taskRunErrorEnhancer(completion.error)); - const permanentlyFailRun = async (run?: { status: TaskRunStatus; spanId: string }) => { + const permanentlyFailRun = async (run?: { + status: TaskRunStatus; + spanId: string; + createdAt: Date; + completedAt: Date | null; + taskEventStore: string; + }) => { // Emit an event so we can complete any spans of stalled executions if (forceRequeue && run) { this.eventBus.emit("runAttemptFailed", { @@ -2869,6 +2893,9 @@ export class RunEngine { spanId: run.spanId, error, attemptNumber: latestSnapshot.attemptNumber ?? 0, + createdAt: run.createdAt, + completedAt: run.completedAt, + taskEventStore: run.taskEventStore, }, }); } @@ -2914,6 +2941,9 @@ export class RunEngine { organizationId: true, }, }, + taskEventStore: true, + createdAt: true, + completedAt: true, }, }); @@ -2944,6 +2974,9 @@ export class RunEngine { spanId: minimalRun.spanId, error, attemptNumber: latestSnapshot.attemptNumber ?? 0, + taskEventStore: minimalRun.taskEventStore, + createdAt: minimalRun.createdAt, + completedAt: minimalRun.completedAt, }, }); } @@ -3094,6 +3127,9 @@ export class RunEngine { organizationId: true, }, }, + taskEventStore: true, + createdAt: true, + completedAt: true, }, }); @@ -3127,6 +3163,9 @@ export class RunEngine { status: run.status, spanId: run.spanId, error, + taskEventStore: run.taskEventStore, + createdAt: run.createdAt, + completedAt: run.completedAt, }, }); From 4a728982a4a1b9a574a5c0b8ce0f9e064c276e69 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 18:34:56 +0000 Subject: [PATCH 484/485] Fix for span merge layout --- .../route.tsx | 120 +++++++++--------- 1 file changed, 58 insertions(+), 62 deletions(-) diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx index d35fcc0605..805cf89b96 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.v3.$projectParam.runs.$runParam.spans.$spanParam/route.tsx @@ -232,70 +232,66 @@ function SpanBody({
-
- {tab === "detail" ? ( -
- - - Status - - - - - - Task - - - {span.taskSlug} - - } - content={`Filter runs by ${span.taskSlug}`} - /> - - - {span.idempotencyKey && ( - - Idempotency key - {span.idempotencyKey} - - )} + {tab === "detail" ? ( +
+ + + Status + + + + + + Task + + + {span.taskSlug} + + } + content={`Filter runs by ${span.taskSlug}`} + /> + + + {span.idempotencyKey && ( - Version - - {span.workerVersion ? ( - span.workerVersion - ) : ( - - Never started - - - )} - + Idempotency key + {span.idempotencyKey} - -
- ) : ( - - )} -
+ )} + + Version + + {span.workerVersion ? ( + span.workerVersion + ) : ( + + Never started + + + )} + + + +
+ ) : ( + + )}
); From 6531e728f5b0d1a267b32aef751af1346b6b3784 Mon Sep 17 00:00:00 2001 From: Matt Aitken Date: Tue, 4 Mar 2025 18:52:04 +0000 Subject: [PATCH 485/485] Use executedAt instead of firstAttemptStartedAt --- .../migration.sql | 9 +++++++++ internal-packages/database/prisma/schema.prisma | 3 --- internal-packages/run-engine/src/engine/index.ts | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 internal-packages/database/prisma/migrations/20250304184614_remove_task_run_first_attempt_started_at_column/migration.sql diff --git a/internal-packages/database/prisma/migrations/20250304184614_remove_task_run_first_attempt_started_at_column/migration.sql b/internal-packages/database/prisma/migrations/20250304184614_remove_task_run_first_attempt_started_at_column/migration.sql new file mode 100644 index 0000000000..fdd51378e7 --- /dev/null +++ b/internal-packages/database/prisma/migrations/20250304184614_remove_task_run_first_attempt_started_at_column/migration.sql @@ -0,0 +1,9 @@ +/* + Warnings: + + - You are about to drop the column `firstAttemptStartedAt` on the `TaskRun` table. All the data in the column will be lost. + +*/ + +-- AlterTable +ALTER TABLE "TaskRun" DROP COLUMN "firstAttemptStartedAt"; diff --git a/internal-packages/database/prisma/schema.prisma b/internal-packages/database/prisma/schema.prisma index e06b59dcd2..e38846e382 100644 --- a/internal-packages/database/prisma/schema.prisma +++ b/internal-packages/database/prisma/schema.prisma @@ -1741,9 +1741,6 @@ model TaskRun { completedAt DateTime? machinePreset String? - /// Run Engine 2.0+ - firstAttemptStartedAt DateTime? - usageDurationMs Int @default(0) costInCents Float @default(0) baseCostInCents Float @default(0) diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index c22fcaf365..1e8ada42a7 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1204,7 +1204,7 @@ export class RunEngine { data: { status: "EXECUTING", attemptNumber: nextAttemptNumber, - firstAttemptStartedAt: taskRun.attemptNumber === null ? new Date() : undefined, + executedAt: taskRun.attemptNumber === null ? new Date() : undefined, }, include: { tags: true,