From 184ae5ae861d980622071be942c91823d3f89e47 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 6 Mar 2025 12:04:35 +0000 Subject: [PATCH 01/12] re2: fix @trigger.dev/core exports --- packages/core/package.json | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/packages/core/package.json b/packages/core/package.json index d3f2d38f2a..5fe268a77d 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -54,8 +54,6 @@ "./v3/zodSocket": "./src/v3/zodSocket.ts", "./v3/zodIpc": "./src/v3/zodIpc.ts", "./v3/utils/timers": "./src/v3/utils/timers.ts", - "./v3/dev": "./src/v3/dev/index.ts", - "./v3/prod": "./src/v3/prod/index.ts", "./v3/workers": "./src/v3/workers/index.ts", "./v3/schemas": "./src/v3/schemas/index.ts", "./v3/runEngineWorker": "./src/v3/runEngineWorker/index.ts", @@ -161,12 +159,6 @@ "v3/utils/timers": [ "dist/commonjs/v3/utils/timers.d.ts" ], - "v3/dev": [ - "dist/commonjs/v3/dev/index.d.ts" - ], - "v3/prod": [ - "dist/commonjs/v3/prod/index.d.ts" - ], "v3/workers": [ "dist/commonjs/v3/workers/index.d.ts" ], @@ -612,28 +604,6 @@ "default": "./dist/commonjs/v3/utils/timers.js" } }, - "./v3/dev": { - "import": { - "@triggerdotdev/source": "./src/v3/dev/index.ts", - "types": "./dist/esm/v3/dev/index.d.ts", - "default": "./dist/esm/v3/dev/index.js" - }, - "require": { - "types": "./dist/commonjs/v3/dev/index.d.ts", - "default": "./dist/commonjs/v3/dev/index.js" - } - }, - "./v3/prod": { - "import": { - "@triggerdotdev/source": "./src/v3/prod/index.ts", - "types": "./dist/esm/v3/prod/index.d.ts", - "default": "./dist/esm/v3/prod/index.js" - }, - "require": { - "types": "./dist/commonjs/v3/prod/index.d.ts", - "default": "./dist/commonjs/v3/prod/index.js" - } - }, "./v3/workers": { "import": { "@triggerdotdev/source": "./src/v3/workers/index.ts", From 0e1c8ff232bc9326f9fe3d060b9564a880c0869f Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 6 Mar 2025 14:12:31 +0000 Subject: [PATCH 02/12] re2: WIP env based queue selection algo --- .../v3/marqs/fairDequeuingStrategy.server.ts | 1 - internal-packages/redis/package.json | 2 +- internal-packages/redis/src/index.ts | 2 + internal-packages/redis/tsconfig.json | 4 +- internal-packages/run-engine/package.json | 13 +- .../run-queue/fairDequeuingStrategy.server.ts | 622 ++++++++++++++++++ .../run-engine/src/run-queue/index.test.ts | 70 +- .../run-engine/src/run-queue/index.ts | 454 +++---------- .../src/run-queue/keyProducer.test.ts | 4 +- .../run-engine/src/run-queue/keyProducer.ts | 100 ++- .../simpleWeightedPriorityStrategy.ts | 130 ---- .../run-engine/src/run-queue/types.ts | 80 +-- .../run-engine/src/shared/index.ts | 2 +- internal-packages/run-engine/tsconfig.json | 4 +- internal-packages/tracing/README.md | 3 + internal-packages/tracing/package.json | 20 + internal-packages/tracing/src/index.ts | 94 +++ internal-packages/tracing/tsconfig.json | 23 + pnpm-lock.yaml | 37 +- 19 files changed, 1038 insertions(+), 627 deletions(-) create mode 100644 internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts delete mode 100644 internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts create mode 100644 internal-packages/tracing/README.md create mode 100644 internal-packages/tracing/package.json create mode 100644 internal-packages/tracing/src/index.ts create mode 100644 internal-packages/tracing/tsconfig.json diff --git a/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts b/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts index 986f75c525..617e5e391d 100644 --- a/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts +++ b/apps/webapp/app/v3/marqs/fairDequeuingStrategy.server.ts @@ -1,4 +1,3 @@ -import { flattenAttributes } from "@trigger.dev/core/v3"; import { createCache, DefaultStatefulContext, Namespace, Cache as UnkeyCache } from "@unkey/cache"; import { MemoryStore } from "@unkey/cache/stores"; import { randomUUID } from "crypto"; diff --git a/internal-packages/redis/package.json b/internal-packages/redis/package.json index b161c50028..1bd9a146d9 100644 --- a/internal-packages/redis/package.json +++ b/internal-packages/redis/package.json @@ -15,4 +15,4 @@ "scripts": { "typecheck": "tsc --noEmit" } -} +} \ No newline at end of file diff --git a/internal-packages/redis/src/index.ts b/internal-packages/redis/src/index.ts index fb731b6cab..5eb631cb9d 100644 --- a/internal-packages/redis/src/index.ts +++ b/internal-packages/redis/src/index.ts @@ -1,6 +1,8 @@ import { Redis, RedisOptions } from "ioredis"; import { Logger } from "@trigger.dev/core/logger"; +export { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; + const defaultOptions: Partial = { retryStrategy: (times: number) => { const delay = Math.min(times * 50, 1000); diff --git a/internal-packages/redis/tsconfig.json b/internal-packages/redis/tsconfig.json index 5da677943b..0104339620 100644 --- a/internal-packages/redis/tsconfig.json +++ b/internal-packages/redis/tsconfig.json @@ -2,8 +2,8 @@ "compilerOptions": { "target": "ES2019", "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "CommonJS", - "moduleResolution": "Node", + "module": "Node16", + "moduleResolution": "Node16", "moduleDetection": "force", "verbatimModuleSyntax": false, "types": ["vitest/globals"], diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index d559891824..23f29d246e 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -4,25 +4,28 @@ "version": "0.0.1", "main": "./src/index.ts", "types": "./src/index.ts", + "type": "module", "dependencies": { "@internal/redis": "workspace:*", "@internal/redis-worker": "workspace:*", - "@opentelemetry/api": "^1.9.0", - "@opentelemetry/semantic-conventions": "^1.27.0", + "@internal/tracing": "workspace:*", "@trigger.dev/core": "workspace:*", "@trigger.dev/database": "workspace:*", "assert-never": "^1.2.1", "ioredis": "^5.3.2", "nanoid": "^3.3.4", "redlock": "5.0.0-beta.2", - "zod": "3.23.8" + "zod": "3.23.8", + "@unkey/cache": "^1.5.0", + "seedrandom": "^3.0.5" }, "devDependencies": { "@internal/testcontainers": "workspace:*", - "vitest": "^1.4.0" + "vitest": "^1.4.0", + "@types/seedrandom": "^3.0.8" }, "scripts": { "typecheck": "tsc --noEmit", "test": "vitest --sequence.concurrent=false" } -} +} \ No newline at end of file diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts new file mode 100644 index 0000000000..c3e2279f2f --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts @@ -0,0 +1,622 @@ +import { createCache, DefaultStatefulContext, Namespace, Cache as UnkeyCache } from "@unkey/cache"; +import { MemoryStore } from "@unkey/cache/stores"; +import { randomUUID } from "crypto"; +import { + EnvDescriptor, + EnvQueues, + RunQueueFairDequeueStrategy, + RunQueueKeyProducer, +} from "./types.js"; +import seedrandom from "seedrandom"; +import { startSpan, type Tracer } from "@internal/tracing"; +import { Redis, type RedisOptions, createRedisClient } from "@internal/redis"; + +export type FairDequeuingStrategyBiases = { + /** + * How much to bias towards environments with higher concurrency limits + * 0 = no bias, 1 = full bias based on limit differences + */ + concurrencyLimitBias: number; + + /** + * How much to bias towards environments with more available capacity + * 0 = no bias, 1 = full bias based on available capacity + */ + availableCapacityBias: number; + + /** + * Controls randomization of queue ordering within environments + * 0 = strict age-based ordering (oldest first) + * 1 = completely random ordering + * Values between 0-1 blend between age-based and random ordering + */ + queueAgeRandomization: number; +}; + +export type FairDequeuingStrategyOptions = { + redis: RedisOptions; + keys: RunQueueKeyProducer; + defaultEnvConcurrency?: number; + parentQueueLimit?: number; + tracer?: Tracer; + seed?: string; + /** + * Configure biasing for environment shuffling + * If not provided, no biasing will be applied (completely random shuffling) + */ + biases?: FairDequeuingStrategyBiases; + reuseSnapshotCount?: number; + maximumEnvCount?: number; +}; + +type FairQueueConcurrency = { + current: number; + limit: number; + reserve: number; +}; + +type FairQueue = { id: string; age: number; org: string; env: string; project: string }; + +type FairQueueSnapshot = { + id: string; + envs: Record; + queues: Array; +}; + +type WeightedEnv = { + envId: string; + weight: number; +}; + +type WeightedQueue = { + queue: FairQueue; + weight: number; +}; + +const emptyFairQueueSnapshot: FairQueueSnapshot = { + id: "empty", + envs: {}, + queues: [], +}; + +const defaultBiases: FairDequeuingStrategyBiases = { + concurrencyLimitBias: 0, + availableCapacityBias: 0, + queueAgeRandomization: 0, // Default to completely age-based ordering +}; + +export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { + private _cache: UnkeyCache<{ + concurrencyLimit: number; + }>; + + private _rng: seedrandom.PRNG; + private _reusedSnapshotForConsumer: Map< + string, + { snapshot: FairQueueSnapshot; reuseCount: number } + > = new Map(); + private _redis: Redis; + + private _defaultEnvConcurrency: number; + private _parentQueueLimit: number; + + constructor(private options: FairDequeuingStrategyOptions) { + const ctx = new DefaultStatefulContext(); + const memory = new MemoryStore({ persistentMap: new Map() }); + + this._cache = createCache({ + concurrencyLimit: new Namespace(ctx, { + stores: [memory], + fresh: 60_000, // The time in milliseconds that a value is considered fresh. Cache hits within this time will return the cached value. + stale: 180_000, // The time in milliseconds that a value is considered stale. Cache hits within this time will return the cached value and trigger a background refresh. + }), + }); + + this._rng = seedrandom(options.seed); + this._redis = createRedisClient(options.redis); + + this._defaultEnvConcurrency = options.defaultEnvConcurrency ?? 10; + this._parentQueueLimit = options.parentQueueLimit ?? 100; + } + + async distributeFairQueuesFromParentQueue( + parentQueue: string, + consumerId: string + ): Promise> { + return await startSpan( + this.options.tracer, + "distributeFairQueuesFromParentQueue", + async (span) => { + span.setAttribute("consumer_id", consumerId); + span.setAttribute("parent_queue", parentQueue); + + const snapshot = await this.#createQueueSnapshot(parentQueue, consumerId); + + span.setAttributes({ + snapshot_env_count: Object.keys(snapshot.envs).length, + snapshot_queue_count: snapshot.queues.length, + }); + + const queues = snapshot.queues; + + if (queues.length === 0) { + return []; + } + + const envQueues = this.#shuffleQueuesByEnv(snapshot); + + span.setAttribute( + "shuffled_queue_count", + envQueues.reduce((sum, env) => sum + env.queues.length, 0) + ); + + if (envQueues[0]?.queues[0]) { + span.setAttribute("winning_env", envQueues[0].envId); + span.setAttribute( + "winning_org", + this.options.keys.orgIdFromQueue(envQueues[0].queues[0]) + ); + } + + return envQueues; + } + ); + } + + #shuffleQueuesByEnv(snapshot: FairQueueSnapshot): Array { + const envs = Object.keys(snapshot.envs); + const biases = this.options.biases ?? defaultBiases; + + if (biases.concurrencyLimitBias === 0 && biases.availableCapacityBias === 0) { + const shuffledEnvs = this.#shuffle(envs); + return this.#orderQueuesByEnvs(shuffledEnvs, snapshot); + } + + // Find the maximum concurrency limit for normalization + const maxLimit = Math.max(...envs.map((envId) => snapshot.envs[envId].concurrency.limit)); + + // Calculate weights for each environment + const weightedEnvs: WeightedEnv[] = envs.map((envId) => { + const env = snapshot.envs[envId]; + + // Start with base weight of 1 + let weight = 1; + + // Add normalized concurrency limit bias if configured + if (biases.concurrencyLimitBias > 0) { + const normalizedLimit = env.concurrency.limit / maxLimit; + // Square or cube the bias to make it more pronounced at higher values + weight *= 1 + Math.pow(normalizedLimit * biases.concurrencyLimitBias, 2); + } + + // Add available capacity bias if configured + if (biases.availableCapacityBias > 0) { + const usedCapacityPercentage = env.concurrency.current / env.concurrency.limit; + const availableCapacityBonus = 1 - usedCapacityPercentage; + // Square or cube the bias to make it more pronounced at higher values + weight *= 1 + Math.pow(availableCapacityBonus * biases.availableCapacityBias, 2); + } + + return { envId, weight }; + }); + + const shuffledEnvs = this.#weightedShuffle(weightedEnvs); + return this.#orderQueuesByEnvs(shuffledEnvs, snapshot); + } + + #weightedShuffle(weightedItems: WeightedEnv[]): string[] { + const totalWeight = weightedItems.reduce((sum, item) => sum + item.weight, 0); + const result: string[] = []; + const items = [...weightedItems]; + + while (items.length > 0) { + let random = this._rng() * totalWeight; + let index = 0; + + // Find item based on weighted random selection + while (random > 0 && index < items.length) { + random -= items[index].weight; + index++; + } + index = Math.max(0, index - 1); + + // Add selected item to result and remove from items + result.push(items[index].envId); + items.splice(index, 1); + } + + return result; + } + + // Helper method to maintain DRY principle + // Update return type + #orderQueuesByEnvs(envs: string[], snapshot: FairQueueSnapshot): Array { + const queuesByEnv = snapshot.queues.reduce( + (acc, queue) => { + if (!acc[queue.env]) { + acc[queue.env] = []; + } + acc[queue.env].push(queue); + return acc; + }, + {} as Record> + ); + + return envs.reduce((acc, envId) => { + if (queuesByEnv[envId]) { + // Get ordered queues for this env + const orderedQueues = this.#weightedRandomQueueOrder(queuesByEnv[envId]); + // Only add the env if it has queues + if (orderedQueues.length > 0) { + acc.push({ + envId, + queues: orderedQueues.map((queue) => queue.id), + }); + } + } + return acc; + }, [] as Array); + } + + #weightedRandomQueueOrder(queues: FairQueue[]): FairQueue[] { + if (queues.length <= 1) return queues; + + const biases = this.options.biases ?? defaultBiases; + + // When queueAgeRandomization is 0, use strict age-based ordering + if (biases.queueAgeRandomization === 0) { + return [...queues].sort((a, b) => b.age - a.age); + } + + // Find the maximum age for normalization + const maxAge = Math.max(...queues.map((q) => q.age)); + + // Calculate weights for each queue + const weightedQueues: WeightedQueue[] = queues.map((queue) => { + // Normalize age to be between 0 and 1 + const normalizedAge = queue.age / maxAge; + + // Calculate weight: combine base weight with configurable age influence + const baseWeight = 1; + const weight = baseWeight + normalizedAge * biases.queueAgeRandomization; + + return { queue, weight }; + }); + + // Perform weighted random selection for ordering + const result: FairQueue[] = []; + let remainingQueues = [...weightedQueues]; + let totalWeight = remainingQueues.reduce((sum, wq) => sum + wq.weight, 0); + + while (remainingQueues.length > 0) { + let random = this._rng() * totalWeight; + let index = 0; + + // Find queue based on weighted random selection + while (random > 0 && index < remainingQueues.length) { + random -= remainingQueues[index].weight; + index++; + } + index = Math.max(0, index - 1); + + // Add selected queue to result and remove from remaining + result.push(remainingQueues[index].queue); + totalWeight -= remainingQueues[index].weight; + remainingQueues.splice(index, 1); + } + + return result; + } + + #shuffle(array: Array): Array { + let currentIndex = array.length; + let temporaryValue; + let randomIndex; + + const newArray = [...array]; + + while (currentIndex !== 0) { + randomIndex = Math.floor(this._rng() * currentIndex); + currentIndex -= 1; + + temporaryValue = newArray[currentIndex]; + newArray[currentIndex] = newArray[randomIndex]; + newArray[randomIndex] = temporaryValue; + } + + return newArray; + } + + async #createQueueSnapshot(parentQueue: string, consumerId: string): Promise { + return await startSpan(this.options.tracer, "createQueueSnapshot", async (span) => { + span.setAttribute("consumer_id", consumerId); + span.setAttribute("parent_queue", parentQueue); + + if ( + typeof this.options.reuseSnapshotCount === "number" && + this.options.reuseSnapshotCount > 0 + ) { + const key = `${parentQueue}:${consumerId}`; + const reusedSnapshot = this._reusedSnapshotForConsumer.get(key); + + if (reusedSnapshot) { + if (reusedSnapshot.reuseCount < this.options.reuseSnapshotCount) { + span.setAttribute("reused_snapshot", true); + + this._reusedSnapshotForConsumer.set(key, { + snapshot: reusedSnapshot.snapshot, + reuseCount: reusedSnapshot.reuseCount + 1, + }); + + return reusedSnapshot.snapshot; + } else { + this._reusedSnapshotForConsumer.delete(key); + } + } + } + + span.setAttribute("reused_snapshot", false); + + const now = Date.now(); + + let queues = await this.#allChildQueuesByScore(parentQueue, consumerId, now); + + span.setAttribute("parent_queue_count", queues.length); + + if (queues.length === 0) { + return emptyFairQueueSnapshot; + } + + // Apply env selection if maximumEnvCount is specified + let selectedEnvIds: Set; + if (this.options.maximumEnvCount && this.options.maximumEnvCount > 0) { + selectedEnvIds = this.#selectTopEnvs(queues, this.options.maximumEnvCount); + // Filter queues to only include selected envs + queues = queues.filter((queue) => selectedEnvIds.has(queue.env)); + + span.setAttribute("selected_env_count", selectedEnvIds.size); + } + + span.setAttribute("selected_queue_count", queues.length); + + const envIds = new Set(); + const envIdToEnvDescriptor = new Map(); + + for (const queue of queues) { + envIds.add(queue.env); + envIdToEnvDescriptor.set(queue.env, this.#envDescriptorFromFairQueue(queue)); + } + + const envs = await Promise.all( + Array.from(envIds).map(async (envId) => { + const envDescriptor = envIdToEnvDescriptor.get(envId); + + if (!envDescriptor) { + throw new Error(`No env descriptor found for envId: ${envId}`); + } + + return { + id: envId, + concurrency: await this.#getEnvConcurrency(envDescriptor), + }; + }) + ); + + const envsAtFullConcurrency = envs.filter( + (env) => env.concurrency.current >= env.concurrency.limit + env.concurrency.reserve + ); + + const envIdsAtFullConcurrency = new Set(envsAtFullConcurrency.map((env) => env.id)); + + const envsSnapshot = envs.reduce( + (acc, env) => { + if (!envIdsAtFullConcurrency.has(env.id)) { + acc[env.id] = env; + } + return acc; + }, + {} as Record + ); + + span.setAttributes({ + env_count: envs.length, + envs_at_full_concurrency_count: envsAtFullConcurrency.length, + }); + + const queuesSnapshot = queues.filter((queue) => !envIdsAtFullConcurrency.has(queue.env)); + + const snapshot = { + id: randomUUID(), + envs: envsSnapshot, + queues: queuesSnapshot, + }; + + if ( + typeof this.options.reuseSnapshotCount === "number" && + this.options.reuseSnapshotCount > 0 + ) { + this._reusedSnapshotForConsumer.set(`${parentQueue}:${consumerId}`, { + snapshot, + reuseCount: 0, + }); + } + + return snapshot; + }); + } + + #selectTopEnvs(queues: FairQueue[], maximumEnvCount: number): Set { + // Group queues by env + const queuesByEnv = queues.reduce( + (acc, queue) => { + if (!acc[`${queue.org}:${queue.project}:${queue.env}`]) { + acc[queue.env] = []; + } + acc[queue.env].push(queue); + return acc; + }, + {} as Record + ); + + // Calculate average age for each env + const envAverageAges = Object.entries(queuesByEnv).map(([envId, envQueues]) => { + const averageAge = envQueues.reduce((sum, q) => sum + q.age, 0) / envQueues.length; + return { envId, averageAge }; + }); + + // Perform weighted shuffle based on average ages + const maxAge = Math.max(...envAverageAges.map((e) => e.averageAge)); + const weightedEnvs = envAverageAges.map((env) => ({ + envId: env.envId, + weight: env.averageAge / maxAge, // Normalize weights + })); + + // Select top N envs using weighted shuffle + const selectedEnvs = new Set(); + let remainingEnvs = [...weightedEnvs]; + let totalWeight = remainingEnvs.reduce((sum, env) => sum + env.weight, 0); + + while (selectedEnvs.size < maximumEnvCount && remainingEnvs.length > 0) { + let random = this._rng() * totalWeight; + let index = 0; + + while (random > 0 && index < remainingEnvs.length) { + random -= remainingEnvs[index].weight; + index++; + } + index = Math.max(0, index - 1); + + selectedEnvs.add(remainingEnvs[index].envId); + totalWeight -= remainingEnvs[index].weight; + remainingEnvs.splice(index, 1); + } + + return selectedEnvs; + } + + async #getEnvConcurrency(env: EnvDescriptor): Promise { + return await startSpan(this.options.tracer, "getEnvConcurrency", async (span) => { + span.setAttribute("env_id", env.envId); + span.setAttribute("org_id", env.orgId); + span.setAttribute("project_id", env.projectId); + + const [currentValue, limitValue, reserveValue] = await Promise.all([ + this.#getEnvCurrentConcurrency(env), + this.#getEnvConcurrencyLimit(env), + this.#getEnvReserveConcurrency(env), + ]); + + span.setAttribute("current_value", currentValue); + span.setAttribute("limit_value", limitValue); + span.setAttribute("reserve_value", reserveValue); + + return { current: currentValue, limit: limitValue, reserve: reserveValue }; + }); + } + + async #allChildQueuesByScore( + parentQueue: string, + consumerId: string, + now: number + ): Promise> { + return await startSpan(this.options.tracer, "allChildQueuesByScore", async (span) => { + span.setAttribute("consumer_id", consumerId); + span.setAttribute("parent_queue", parentQueue); + + const valuesWithScores = await this._redis.zrangebyscore( + parentQueue, + "-inf", + now, + "WITHSCORES", + "LIMIT", + 0, + this._parentQueueLimit + ); + + const result: Array = []; + + for (let i = 0; i < valuesWithScores.length; i += 2) { + result.push({ + id: valuesWithScores[i], + age: now - Number(valuesWithScores[i + 1]), + env: this.options.keys.envIdFromQueue(valuesWithScores[i]), + org: this.options.keys.orgIdFromQueue(valuesWithScores[i]), + project: this.options.keys.projectIdFromQueue(valuesWithScores[i]), + }); + } + + span.setAttribute("queue_count", result.length); + + return result; + }); + } + + async #getEnvConcurrencyLimit(env: EnvDescriptor) { + return await startSpan(this.options.tracer, "getEnvConcurrencyLimit", async (span) => { + span.setAttribute("env_id", env.envId); + + const key = this.options.keys.envConcurrencyLimitKey(env); + + const result = await this._cache.concurrencyLimit.swr(key, async () => { + const value = await this._redis.get(key); + + if (!value) { + return this._defaultEnvConcurrency; + } + + return Number(value); + }); + + return result.val ?? this._defaultEnvConcurrency; + }); + } + + async #getEnvCurrentConcurrency(env: EnvDescriptor) { + return await startSpan(this.options.tracer, "getEnvCurrentConcurrency", async (span) => { + span.setAttribute("env_id", env.envId); + span.setAttribute("org_id", env.orgId); + span.setAttribute("project_id", env.projectId); + + const key = this.options.keys.envCurrentConcurrencyKey(env); + + const result = await this._redis.scard(key); + + span.setAttribute("current_value", result); + + return result; + }); + } + + async #getEnvReserveConcurrency(env: EnvDescriptor) { + return await startSpan(this.options.tracer, "getEnvReserveConcurrency", async (span) => { + span.setAttribute("env_id", env.envId); + span.setAttribute("org_id", env.orgId); + span.setAttribute("project_id", env.projectId); + + const key = this.options.keys.envReserveConcurrencyKey(env); + + const result = await this._redis.scard(key); + + span.setAttribute("current_value", result); + + return result; + }); + } + + #envDescriptorFromFairQueue(queue: FairQueue): EnvDescriptor { + return { + envId: queue.env, + projectId: queue.project, + orgId: queue.org, + }; + } +} + +export class NoopFairDequeuingStrategy implements RunQueueFairDequeueStrategy { + async distributeFairQueuesFromParentQueue( + parentQueue: string, + consumerId: string + ): Promise> { + return []; + } +} diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 307a95ebdb..755daa0b8d 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -1,19 +1,17 @@ import { redisTest } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { Logger } from "@trigger.dev/core/logger"; -import Redis from "ioredis"; import { describe } from "node:test"; import { setTimeout } from "node:timers/promises"; import { RunQueue } from "./index.js"; -import { SimpleWeightedChoiceStrategy } from "./simpleWeightedPriorityStrategy.js"; import { InputPayload } from "./types.js"; import { createRedisClient } from "@internal/redis"; +import { FairDequeuingStrategy } from "./fairDequeuingStrategy.server.js"; +import { RunQueueShortKeyProducer } from "./keyProducer.js"; const testOptions = { name: "rq", tracer: trace.getTracer("rq"), - queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), - envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), workers: 1, defaultEnvConcurrency: 25, enableRebalancing: false, @@ -25,6 +23,7 @@ const testOptions = { maxTimeoutInMs: 1_000, randomize: true, }, + keys: new RunQueueShortKeyProducer("rq:"), }; const authenticatedEnvProd = { @@ -71,6 +70,14 @@ describe("RunQueue", () => { redisTest("Get/set Queue concurrency limit", { timeout: 15_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -113,6 +120,14 @@ describe("RunQueue", () => { redisTest("Update env concurrency limits", { timeout: 5_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -145,6 +160,14 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -422,43 +445,6 @@ describe("RunQueue", () => { } ); - redisTest("Get shared queue details", { timeout: 5_000 }, async ({ redisContainer }) => { - const queue = new RunQueue({ - ...testOptions, - redis: { - keyPrefix: "runqueue:test:", - host: redisContainer.getHost(), - port: redisContainer.getPort(), - }, - }); - - try { - const result = await queue.getSharedQueueDetails("main", 10); - expect(result.selectionId).toBe("getSharedQueueDetails"); - expect(result.queueCount).toBe(0); - expect(result.queueChoice.choices).toStrictEqual({ abort: true }); - - await queue.enqueueMessage({ - env: authenticatedEnvProd, - message: messageProd, - masterQueues: "main", - }); - - const result2 = await queue.getSharedQueueDetails("main", 10); - expect(result2.selectionId).toBe("getSharedQueueDetails"); - expect(result2.queueCount).toBe(1); - expect(result2.queues[0].score).toBe(messageProd.timestamp); - if (!Array.isArray(result2.queueChoice.choices)) { - throw new Error("Expected queueChoice.choices to be an array"); - } - expect(result2.queueChoice.choices[0]).toBe( - "{org:o1234}:proj:p1234:env:e1234:queue:task/my-task" - ); - } finally { - await queue.quit(); - } - }); - redisTest("Acking", { timeout: 5_000 }, async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 942300772f..a0458daa5c 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -1,27 +1,34 @@ -import { context, propagation, Span, SpanKind, SpanOptions, Tracer } from "@opentelemetry/api"; import { + context, + propagation, + Span, + SpanKind, + SpanOptions, + Tracer, SEMATTRS_MESSAGE_ID, SEMATTRS_MESSAGING_OPERATION, SEMATTRS_MESSAGING_SYSTEM, -} from "@opentelemetry/semantic-conventions"; +} from "@internal/tracing"; import { Logger } from "@trigger.dev/core/logger"; -import { calculateNextRetryDelay, flattenAttributes } from "@trigger.dev/core/v3"; +import { calculateNextRetryDelay } from "@trigger.dev/core/v3"; import { type RetryOptions } from "@trigger.dev/core/v3/schemas"; -import { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; import { attributesFromAuthenticatedEnv, MinimalAuthenticatedEnvironment, } from "../shared/index.js"; -import { RunQueueShortKeyProducer } from "./keyProducer.js"; import { InputPayload, OutputPayload, - QueueCapacities, - QueueRange, RunQueueKeyProducer, - RunQueuePriorityStrategy, + RunQueueFairDequeueStrategy, } from "./types.js"; -import { createRedisClient } from "@internal/redis"; +import { + createRedisClient, + type Redis, + type Callback, + type RedisOptions, + type Result, +} from "@internal/redis"; const SemanticAttributes = { QUEUE: "runqueue.queue", @@ -38,8 +45,8 @@ export type RunQueueOptions = { redis: RedisOptions; defaultEnvConcurrency: number; windowSize?: number; - queuePriorityStrategy: RunQueuePriorityStrategy; - envQueuePriorityStrategy: RunQueuePriorityStrategy; + keys: RunQueueKeyProducer; + queuePriorityStrategy: RunQueueFairDequeueStrategy; verbose?: boolean; logger: Logger; retryOptions?: RetryOptions; @@ -68,7 +75,7 @@ export class RunQueue { private logger: Logger; private redis: Redis; public keys: RunQueueKeyProducer; - private queuePriorityStrategy: RunQueuePriorityStrategy; + private queuePriorityStrategy: RunQueueFairDequeueStrategy; constructor(private readonly options: RunQueueOptions) { this.retryOptions = options.retryOptions ?? defaultRetrySettings; @@ -82,7 +89,7 @@ export class RunQueue { }); this.logger = options.logger; - this.keys = new RunQueueShortKeyProducer("rq:"); + this.keys = options.keys; this.queuePriorityStrategy = options.queuePriorityStrategy; this.subscriber = createRedisClient(options.redis, { @@ -241,36 +248,6 @@ export class RunQueue { ); } - public async getSharedQueueDetails(masterQueue: string, maxCount: number) { - const { range } = await this.queuePriorityStrategy.nextCandidateSelection( - masterQueue, - "getSharedQueueDetails" - ); - const queues = await this.#getChildQueuesWithScores(masterQueue, range); - - const queuesWithScores = await this.#calculateQueueScores(queues, (queue) => - this.#calculateMessageQueueCapacities(queue) - ); - - // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue - const result = this.queuePriorityStrategy.chooseQueues( - queuesWithScores, - masterQueue, - "getSharedQueueDetails", - range, - maxCount - ); - - return { - selectionId: "getSharedQueueDetails", - queues, - queuesWithScores, - nextRange: range, - queueCount: queues.length, - queueChoice: result, - }; - } - /** * Dequeue messages from the master queue */ @@ -282,67 +259,99 @@ export class RunQueue { return this.#trace( "dequeueMessageInSharedQueue", async (span) => { - // Read the parent queue for matching queues - const selectedQueues = await this.#getRandomQueueFromParentQueue( + const envQueues = await this.queuePriorityStrategy.distributeFairQueuesFromParentQueue( masterQueue, - this.options.queuePriorityStrategy, - (queue) => this.#calculateMessageQueueCapacities(queue, { checkForDisabled: true }), - consumerId, - maxCount + consumerId ); - if (!selectedQueues || selectedQueues.length === 0) { + span.setAttribute("environment_count", envQueues.length); + + if (envQueues.length === 0) { return []; } + let attemptedEnvs = 0; + let attemptedQueues = 0; + const messages: DequeuedMessage[] = []; - const remainingMessages = selectedQueues.map((q) => q.size); - let currentQueueIndex = 0; + // Keep track of queues we've tried that didn't return a message + const emptyQueues = new Set(); + + // Continue until we've hit max count or tried all queues while (messages.length < maxCount) { - let foundMessage = false; - - // Try each queue once in this round - for (let i = 0; i < selectedQueues.length; i++) { - currentQueueIndex = (currentQueueIndex + i) % selectedQueues.length; - - // Skip if this queue is empty - if (remainingMessages[currentQueueIndex] <= 0) continue; - - const selectedQueue = selectedQueues[currentQueueIndex]; - const queue = selectedQueue.queue; - - const message = await this.#callDequeueMessage({ - messageQueue: queue, - concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), - currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), - envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), - envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), - projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), - messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), - envQueueKey: this.keys.envQueueKeyFromQueue(queue), - taskCurrentConcurrentKeyPrefix: - this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), - }); + // Calculate how many more messages we need + const remainingCount = maxCount - messages.length; + if (remainingCount <= 0) break; + + // Find all available queues across environments that we haven't marked as empty + const availableEnvQueues = envQueues + .map((env) => ({ + env: env, + queues: env.queues.filter((queue) => !emptyQueues.has(queue)), + })) + .filter((env) => env.queues.length > 0); + + if (availableEnvQueues.length === 0) break; + + attemptedEnvs += availableEnvQueues.length; + + // Create a dequeue operation for each environment, taking one queue from each + const dequeueOperations = availableEnvQueues.map(({ env, queues }) => { + const queue = queues[0]; + attemptedQueues++; + + return { + queue, + operation: this.#callDequeueMessage({ + messageQueue: queue, + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), + envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), + projectCurrentConcurrencyKey: + this.keys.projectCurrentConcurrencyKeyFromQueue(queue), + messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), + envQueueKey: this.keys.envQueueKeyFromQueue(queue), + taskCurrentConcurrentKeyPrefix: + this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), + }), + }; + }); + // Execute all dequeue operations in parallel + const results = await Promise.all( + dequeueOperations.map(async ({ queue, operation }) => { + const message = await operation; + return { queue, message }; + }) + ); + + // Process results + let foundAnyMessage = false; + for (const { queue, message } of results) { if (message) { messages.push(message); - remainingMessages[currentQueueIndex]--; - foundMessage = true; - break; + foundAnyMessage = true; } else { - // If we failed to get a message, mark this queue as empty - remainingMessages[currentQueueIndex] = 0; + // Mark this queue as empty + emptyQueues.add(queue); } } - // If we couldn't get a message from any queue, break - if (!foundMessage) break; + // If we couldn't get a message from any queue in any env, break + if (!foundAnyMessage) break; + + // If we've marked all queues as empty, break + const totalQueues = envQueues.reduce((sum, env) => sum + env.queues.length, 0); + if (emptyQueues.size >= totalQueues) break; } span.setAttributes({ [SemanticAttributes.RESULT_COUNT]: messages.length, [SemanticAttributes.MASTER_QUEUES]: masterQueue, + attempted_environments: attemptedEnvs, + attempted_queues: attemptedQueues, }); return messages; @@ -803,167 +812,6 @@ export class RunQueue { ); } - async #getRandomQueueFromParentQueue( - parentQueue: string, - queuePriorityStrategy: RunQueuePriorityStrategy, - calculateCapacities: (queue: string) => Promise, - consumerId: string, - maxCount: number - ): Promise< - | { - queue: string; - capacities: QueueCapacities; - age: number; - size: number; - }[] - | undefined - > { - return this.#trace( - "getRandomQueueFromParentQueue", - async (span) => { - span.setAttribute("consumerId", consumerId); - - const { range } = await queuePriorityStrategy.nextCandidateSelection( - parentQueue, - consumerId - ); - - const queues = await this.#getChildQueuesWithScores(parentQueue, range, span); - span.setAttribute("queueCount", queues.length); - - const queuesWithScores = await this.#calculateQueueScores(queues, calculateCapacities); - span.setAttribute("queuesWithScoresCount", queuesWithScores.length); - - // We need to priority shuffle here to ensure all workers aren't just working on the highest priority queue - const { choices, nextRange } = queuePriorityStrategy.chooseQueues( - queuesWithScores, - parentQueue, - consumerId, - range, - maxCount - ); - - span.setAttributes({ - ...flattenAttributes(queues, "runqueue.queues"), - }); - span.setAttributes({ - ...flattenAttributes(queuesWithScores, "runqueue.queuesWithScores"), - }); - span.setAttribute("range.offset", range.offset); - span.setAttribute("range.count", range.count); - span.setAttribute("nextRange.offset", nextRange.offset); - span.setAttribute("nextRange.count", nextRange.count); - - if (this.options.verbose || nextRange.offset > 0) { - if (Array.isArray(choices)) { - this.logger.debug(`[${this.name}] getRandomQueueFromParentQueue`, { - queues, - queuesWithScores, - range, - nextRange, - queueCount: queues.length, - queuesWithScoresCount: queuesWithScores.length, - queueChoices: choices, - consumerId, - }); - } else { - this.logger.debug(`[${this.name}] getRandomQueueFromParentQueue`, { - queues, - queuesWithScores, - range, - nextRange, - queueCount: queues.length, - queuesWithScoresCount: queuesWithScores.length, - noQueueChoice: true, - consumerId, - }); - } - } - - if (Array.isArray(choices)) { - span.setAttribute("queueChoices", choices); - return queuesWithScores.filter((queue) => choices.includes(queue.queue)); - } else { - span.setAttribute("noQueueChoice", true); - return; - } - }, - { - kind: SpanKind.CONSUMER, - attributes: { - [SEMATTRS_MESSAGING_OPERATION]: "receive", - [SEMATTRS_MESSAGING_SYSTEM]: "runqueue", - [SemanticAttributes.MASTER_QUEUES]: parentQueue, - }, - } - ); - } - - // Calculate the weights of the queues based on the age and the capacity - async #calculateQueueScores( - queues: Array<{ value: string; score: number }>, - calculateCapacities: (queue: string) => Promise - ) { - const now = Date.now(); - - const queueScores = await Promise.all( - queues.map(async (queue) => { - return { - queue: queue.value, - capacities: await calculateCapacities(queue.value), - age: now - queue.score, - size: await this.redis.zcard(queue.value), - }; - }) - ); - - return queueScores; - } - - async #calculateMessageQueueCapacities(queue: string, options?: { checkForDisabled?: boolean }) { - return await this.#callCalculateMessageCapacities({ - currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), - currentEnvConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), - concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), - envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), - disabledConcurrencyLimitKey: options?.checkForDisabled - ? this.keys.disabledConcurrencyLimitKeyFromQueue(queue) - : undefined, - }); - } - - async #getChildQueuesWithScores( - key: string, - range: QueueRange, - span?: Span - ): Promise> { - const valuesWithScores = await this.redis.zrangebyscore( - key, - "-inf", - Date.now(), - "WITHSCORES", - "LIMIT", - range.offset, - range.count - ); - - span?.setAttribute("zrangebyscore.valuesWithScores.rawLength", valuesWithScores.length); - span?.setAttributes({ - ...flattenAttributes(valuesWithScores, "zrangebyscore.valuesWithScores.rawValues"), - }); - - const result: Array<{ value: string; score: number }> = []; - - for (let i = 0; i < valuesWithScores.length; i += 2) { - result.push({ - value: valuesWithScores[i], - score: Number(valuesWithScores[i + 1]), - }); - } - - return result; - } - async #callEnqueueMessage(message: OutputPayload, masterQueues: string[]) { const concurrencyKey = this.keys.currentConcurrencyKeyFromQueue(message.queue); const envConcurrencyKey = this.keys.envCurrentConcurrencyKeyFromQueue(message.queue); @@ -1125,50 +973,6 @@ export class RunQueue { ); } - async #callCalculateMessageCapacities({ - currentConcurrencyKey, - currentEnvConcurrencyKey, - concurrencyLimitKey, - envConcurrencyLimitKey, - disabledConcurrencyLimitKey, - }: { - currentConcurrencyKey: string; - currentEnvConcurrencyKey: string; - concurrencyLimitKey: string; - envConcurrencyLimitKey: string; - disabledConcurrencyLimitKey: string | undefined; - }): Promise { - const capacities = disabledConcurrencyLimitKey - ? await this.redis.calculateMessageQueueCapacitiesWithDisabling( - currentConcurrencyKey, - currentEnvConcurrencyKey, - concurrencyLimitKey, - envConcurrencyLimitKey, - disabledConcurrencyLimitKey, - String(this.options.defaultEnvConcurrency) - ) - : await this.redis.calculateMessageQueueCapacities( - currentConcurrencyKey, - currentEnvConcurrencyKey, - concurrencyLimitKey, - envConcurrencyLimitKey, - String(this.options.defaultEnvConcurrency) - ); - - const queueCurrent = Number(capacities[0]); - const envLimit = Number(capacities[3]); - const isOrgEnabled = Boolean(capacities[4]); - const queueLimit = capacities[1] - ? Number(capacities[1]) - : Math.min(envLimit, isOrgEnabled ? Infinity : 0); - const envCurrent = Number(capacities[2]); - - return { - queue: { current: queueCurrent, limit: queueLimit }, - env: { current: envCurrent, limit: envLimit }, - }; - } - #callUpdateGlobalConcurrencyLimits({ envConcurrencyLimitKey, envConcurrencyLimit, @@ -1487,61 +1291,6 @@ redis.call('SADD', taskCurrentConcurrencyKey, messageId) `, }); - this.redis.defineCommand("calculateMessageQueueCapacitiesWithDisabling", { - numberOfKeys: 5, - lua: ` --- Keys -local currentConcurrencyKey = KEYS[1] -local currentEnvConcurrencyKey = KEYS[2] -local concurrencyLimitKey = KEYS[3] -local envConcurrencyLimitKey = KEYS[4] -local disabledConcurrencyLimitKey = KEYS[5] - --- Args -local defaultEnvConcurrencyLimit = tonumber(ARGV[1]) - --- Check if disabledConcurrencyLimitKey exists -local orgIsEnabled -if redis.call('EXISTS', disabledConcurrencyLimitKey) == 1 then - orgIsEnabled = false -else - orgIsEnabled = true -end - -local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0') -local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit) - -local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0') -local concurrencyLimit = redis.call('GET', concurrencyLimitKey) - --- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, orgIsEnabled } - `, - }); - - this.redis.defineCommand("calculateMessageQueueCapacities", { - numberOfKeys: 4, - lua: ` --- Keys: -local currentConcurrencyKey = KEYS[1] -local currentEnvConcurrencyKey = KEYS[2] -local concurrencyLimitKey = KEYS[3] -local envConcurrencyLimitKey = KEYS[4] - --- Args -local defaultEnvConcurrencyLimit = tonumber(ARGV[1]) - -local currentEnvConcurrency = tonumber(redis.call('SCARD', currentEnvConcurrencyKey) or '0') -local envConcurrencyLimit = tonumber(redis.call('GET', envConcurrencyLimitKey) or defaultEnvConcurrencyLimit) - -local currentConcurrency = tonumber(redis.call('SCARD', currentConcurrencyKey) or '0') -local concurrencyLimit = redis.call('GET', concurrencyLimitKey) - --- Return current capacity and concurrency limits for the queue, env, org -return { currentConcurrency, concurrencyLimit, currentEnvConcurrency, envConcurrencyLimit, true } - `, - }); - this.redis.defineCommand("updateGlobalConcurrencyLimits", { numberOfKeys: 1, lua: ` @@ -1666,25 +1415,6 @@ declare module "ioredis" { callback?: Callback ): Result; - calculateMessageQueueCapacities( - currentConcurrencyKey: string, - currentEnvConcurrencyKey: string, - concurrencyLimitKey: string, - envConcurrencyLimitKey: string, - defaultEnvConcurrencyLimit: string, - callback?: Callback - ): Result<[number, number, number, number, boolean], Context>; - - calculateMessageQueueCapacitiesWithDisabling( - currentConcurrencyKey: string, - currentEnvConcurrencyKey: string, - concurrencyLimitKey: string, - envConcurrencyLimitKey: string, - disabledConcurrencyLimitKey: string, - defaultEnvConcurrencyLimit: string, - callback?: Callback - ): Result<[number, number, number, number, boolean], Context>; - updateGlobalConcurrencyLimits( envConcurrencyLimitKey: string, envConcurrencyLimit: string, diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts index 886d695f59..aa0ce54f43 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts @@ -326,7 +326,7 @@ describe("KeyProducer", () => { }, "task/task-name" ); - const components = keyProducer.extractComponentsFromQueue(queueKey); + const components = keyProducer.descriptorFromQueue(queueKey); expect(components).toEqual({ orgId: "o1234", projectId: "p1234", @@ -349,7 +349,7 @@ describe("KeyProducer", () => { "task/task-name", "c1234" ); - const components = keyProducer.extractComponentsFromQueue(queueKey); + const components = keyProducer.descriptorFromQueue(queueKey); expect(components).toEqual({ orgId: "o1234", projectId: "p1234", diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.ts b/internal-packages/run-engine/src/run-queue/keyProducer.ts index 1ba42f7f0f..69d1e4d66d 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.ts @@ -1,5 +1,5 @@ import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; -import { RunQueueKeyProducer } from "./types.js"; +import { EnvDescriptor, RunQueueKeyProducer } from "./types.js"; const constants = { CURRENT_CONCURRENCY_PART: "currentConcurrency", @@ -12,6 +12,7 @@ const constants = { CONCURRENCY_KEY_PART: "ck", TASK_PART: "task", MESSAGE_PART: "message", + RESERVE_CONCURRENCY_PART: "reserveConcurrency", } as const; export class RunQueueShortKeyProducer implements RunQueueKeyProducer { @@ -37,13 +38,24 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { return [this.queueKey(env, queue), constants.CONCURRENCY_LIMIT_PART].join(":"); } - envConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment) { - return [ - this.orgKeySection(env.organization.id), - this.projKeySection(env.project.id), - this.envKeySection(env.id), - constants.CONCURRENCY_LIMIT_PART, - ].join(":"); + envConcurrencyLimitKey(env: EnvDescriptor): string; + envConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment): string; + envConcurrencyLimitKey(envOrDescriptor: EnvDescriptor | MinimalAuthenticatedEnvironment): string { + if ("id" in envOrDescriptor) { + return [ + this.orgKeySection(envOrDescriptor.organization.id), + this.projKeySection(envOrDescriptor.project.id), + this.envKeySection(envOrDescriptor.id), + constants.CONCURRENCY_LIMIT_PART, + ].join(":"); + } else { + return [ + this.orgKeySection(envOrDescriptor.orgId), + this.projKeySection(envOrDescriptor.projectId), + this.envKeySection(envOrDescriptor.envId), + constants.CONCURRENCY_LIMIT_PART, + ].join(":"); + } } queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string) { @@ -62,7 +74,7 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { } envQueueKeyFromQueue(queue: string) { - const { orgId, envId } = this.extractComponentsFromQueue(queue); + const { orgId, envId } = this.descriptorFromQueue(queue); return [this.orgKeySection(orgId), this.envKeySection(envId)].join(":"); } @@ -86,30 +98,62 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { } disabledConcurrencyLimitKeyFromQueue(queue: string) { - const { orgId } = this.extractComponentsFromQueue(queue); + const { orgId } = this.descriptorFromQueue(queue); return `{${constants.ORG_PART}:${orgId}}:${constants.DISABLED_CONCURRENCY_LIMIT_PART}`; } envConcurrencyLimitKeyFromQueue(queue: string) { - const { orgId, envId } = this.extractComponentsFromQueue(queue); + const { orgId, envId } = this.descriptorFromQueue(queue); return `{${constants.ORG_PART}:${orgId}}:${constants.ENV_PART}:${envId}:${constants.CONCURRENCY_LIMIT_PART}`; } envCurrentConcurrencyKeyFromQueue(queue: string) { - const { orgId, envId } = this.extractComponentsFromQueue(queue); + const { orgId, envId } = this.descriptorFromQueue(queue); return `{${constants.ORG_PART}:${orgId}}:${constants.ENV_PART}:${envId}:${constants.CURRENT_CONCURRENCY_PART}`; } - envCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string { - return [ - this.orgKeySection(env.organization.id), - this.envKeySection(env.id), - constants.CURRENT_CONCURRENCY_PART, - ].join(":"); + envCurrentConcurrencyKey(env: EnvDescriptor): string; + envCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + envCurrentConcurrencyKey( + envOrDescriptor: EnvDescriptor | MinimalAuthenticatedEnvironment + ): string { + if ("id" in envOrDescriptor) { + return [ + this.orgKeySection(envOrDescriptor.organization.id), + this.envKeySection(envOrDescriptor.id), + constants.CURRENT_CONCURRENCY_PART, + ].join(":"); + } else { + return [ + this.orgKeySection(envOrDescriptor.orgId), + this.envKeySection(envOrDescriptor.envId), + constants.CURRENT_CONCURRENCY_PART, + ].join(":"); + } + } + + envReserveConcurrencyKey(env: EnvDescriptor): string; + envReserveConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + envReserveConcurrencyKey( + envOrDescriptor: EnvDescriptor | MinimalAuthenticatedEnvironment + ): string { + if ("id" in envOrDescriptor) { + return [ + this.orgKeySection(envOrDescriptor.organization.id), + this.envKeySection(envOrDescriptor.id), + constants.RESERVE_CONCURRENCY_PART, + ].join(":"); + } else { + return [ + this.orgKeySection(envOrDescriptor.orgId), + this.envKeySection(envOrDescriptor.envId), + constants.RESERVE_CONCURRENCY_PART, + ].join(":"); + } } taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue: string) { - const { orgId, projectId } = this.extractComponentsFromQueue(queue); + const { orgId, projectId } = this.descriptorFromQueue(queue); return `${[this.orgKeySection(orgId), this.projKeySection(projectId), constants.TASK_PART] .filter(Boolean) @@ -141,14 +185,14 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { } projectCurrentConcurrencyKeyFromQueue(queue: string): string { - const { orgId, projectId } = this.extractComponentsFromQueue(queue); + const { orgId, projectId } = this.descriptorFromQueue(queue); return `${this.orgKeySection(orgId)}:${this.projKeySection(projectId)}:${ constants.CURRENT_CONCURRENCY_PART }`; } messageKeyPrefixFromQueue(queue: string) { - const { orgId } = this.extractComponentsFromQueue(queue); + const { orgId } = this.descriptorFromQueue(queue); return `${this.orgKeySection(orgId)}:${constants.MESSAGE_PART}:`; } @@ -158,7 +202,19 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { .join(":"); } - extractComponentsFromQueue(queue: string) { + orgIdFromQueue(queue: string): string { + return this.descriptorFromQueue(queue).orgId; + } + + envIdFromQueue(queue: string): string { + return this.descriptorFromQueue(queue).envId; + } + + projectIdFromQueue(queue: string): string { + return this.descriptorFromQueue(queue).projectId; + } + + descriptorFromQueue(queue: string) { const parts = this.normalizeQueue(queue).split(":"); return { orgId: parts[1].replace("{", "").replace("}", ""), diff --git a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts b/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts deleted file mode 100644 index 04eb68c7d7..0000000000 --- a/internal-packages/run-engine/src/run-queue/simpleWeightedPriorityStrategy.ts +++ /dev/null @@ -1,130 +0,0 @@ -import { - RunQueuePriorityStrategy, - PriorityStrategyChoice, - QueueRange, - QueueWithScores, -} from "./types.js"; - -export type SimpleWeightedChoiceStrategyOptions = { - queueSelectionCount: number; - randomSeed?: string; - excludeEnvCapacity?: boolean; -}; - -export class SimpleWeightedChoiceStrategy implements RunQueuePriorityStrategy { - private _nextRangesByParentQueue: Map = new Map(); - - constructor(private options: SimpleWeightedChoiceStrategyOptions) {} - - private nextRangeForParentQueue(parentQueue: string, consumerId: string): QueueRange { - return ( - this._nextRangesByParentQueue.get(`${consumerId}:${parentQueue}`) ?? { - offset: 0, - count: this.options.queueSelectionCount, - } - ); - } - - chooseQueues( - queues: QueueWithScores[], - parentQueue: string, - consumerId: string, - previousRange: QueueRange, - maxCount: number - ): { choices: PriorityStrategyChoice; nextRange: QueueRange } { - const filteredQueues = filterQueuesAtCapacity(queues); - - if (queues.length === this.options.queueSelectionCount) { - const nextRange: QueueRange = { - offset: previousRange.offset + this.options.queueSelectionCount, - count: this.options.queueSelectionCount, - }; - - // If all queues are at capacity, and we were passed the max number of queues, then we will slide the window "to the right" - this._nextRangesByParentQueue.set(`${consumerId}:${parentQueue}`, nextRange); - } else { - this._nextRangesByParentQueue.delete(`${consumerId}:${parentQueue}`); - } - - if (filteredQueues.length === 0) { - return { - choices: { abort: true }, - nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), - }; - } - - const queueWeights = this.#calculateQueueWeights(filteredQueues); - - const choices = []; - for (let i = 0; i < maxCount; i++) { - const chosenIndex = weightedRandomIndex(queueWeights); - - const choice = queueWeights.at(chosenIndex)?.queue; - if (choice) { - queueWeights.splice(chosenIndex, 1); - choices.push(choice); - } - } - - return { - choices, - nextRange: this.nextRangeForParentQueue(parentQueue, consumerId), - }; - } - - async nextCandidateSelection( - parentQueue: string, - consumerId: string - ): Promise<{ range: QueueRange }> { - return { - range: this.nextRangeForParentQueue(parentQueue, consumerId), - }; - } - - #calculateQueueWeights(queues: QueueWithScores[]) { - const avgQueueSize = queues.reduce((acc, { size }) => acc + size, 0) / queues.length; - const avgMessageAge = queues.reduce((acc, { age }) => acc + age, 0) / queues.length; - - return queues.map(({ capacities, age, queue, size }) => { - let totalWeight = 1; - - if (size > avgQueueSize) { - totalWeight += Math.min(size / avgQueueSize, 4); - } - - if (age > avgMessageAge) { - totalWeight += Math.min(age / avgMessageAge, 4); - } - - return { - queue, - totalWeight, - }; - }); - } -} - -function filterQueuesAtCapacity(queues: QueueWithScores[]) { - return queues.filter( - (queue) => - queue.capacities.queue.current < queue.capacities.queue.limit && - queue.capacities.env.current < queue.capacities.env.limit - ); -} - -function weightedRandomIndex(queues: Array<{ queue: string; totalWeight: number }>): number { - const totalWeight = queues.reduce((acc, queue) => acc + queue.totalWeight, 0); - let randomNum = Math.random() * totalWeight; - - for (let i = 0; i < queues.length; i++) { - const queue = queues[i]; - if (randomNum < queue.totalWeight) { - return i; - } - - randomNum -= queue.totalWeight; - } - - // If we get here, we should just return a random queue - return Math.floor(Math.random() * queues.length); -} diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 2d936264c1..27efb25d64 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -22,24 +22,19 @@ export const OutputPayload = InputPayload.extend({ }); export type OutputPayload = z.infer; -export type QueueCapacity = { - current: number; - limit: number; -}; - -export type QueueCapacities = { - queue: QueueCapacity; - env: QueueCapacity; -}; - -export type QueueWithScores = { +export type QueueDescriptor = { + orgId: string; + projectId: string; + envId: string; queue: string; - capacities: QueueCapacities; - age: number; - size: number; + concurrencyKey: string | undefined; }; -export type QueueRange = { offset: number; count: number }; +export type EnvDescriptor = { + orgId: string; + projectId: string; + envId: string; +}; export interface RunQueueKeyProducer { masterQueueScanPattern(masterQueue: string): string; @@ -58,8 +53,15 @@ export interface RunQueueKeyProducer { ): string; disabledConcurrencyLimitKeyFromQueue(queue: string): string; //env oncurrency + envCurrentConcurrencyKey(env: EnvDescriptor): string; envCurrentConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + + envConcurrencyLimitKey(env: EnvDescriptor): string; envConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment): string; + + envReserveConcurrencyKey(env: EnvDescriptor): string; + envReserveConcurrencyKey(env: MinimalAuthenticatedEnvironment): string; + envConcurrencyLimitKeyFromQueue(queue: string): string; envCurrentConcurrencyKeyFromQueue(queue: string): string; //task concurrency @@ -77,44 +79,20 @@ export interface RunQueueKeyProducer { messageKey(orgId: string, messageId: string): string; //utils stripKeyPrefix(key: string): string; - extractComponentsFromQueue(queue: string): { - orgId: string; - projectId: string; - envId: string; - queue: string; - concurrencyKey: string | undefined; - }; + orgIdFromQueue(queue: string): string; + envIdFromQueue(queue: string): string; + projectIdFromQueue(queue: string): string; + descriptorFromQueue(queue: string): QueueDescriptor; } -export type PriorityStrategyChoice = string[] | { abort: true }; +export type EnvQueues = { + envId: string; + queues: string[]; +}; -export interface RunQueuePriorityStrategy { - /** - * chooseQueue is called to select the next queue to process a message from - * - * @param queues - * @param parentQueue - * @param consumerId - * - * @returns The queue to process the message from, or an object with `abort: true` if no queue is available - */ - chooseQueues( - queues: Array, +export interface RunQueueFairDequeueStrategy { + distributeFairQueuesFromParentQueue( parentQueue: string, - consumerId: string, - previousRange: QueueRange, - maxCount: number - ): { choices: PriorityStrategyChoice; nextRange: QueueRange }; - - /** - * This function is called to get the next candidate selection for the queue - * The `range` is used to select the set of queues that will be considered for the next selection (passed to chooseQueue) - * The `selectionId` is used to identify the selection and should be passed to chooseQueue - * - * @param parentQueue The parent queue that holds the candidate queues - * @param consumerId The consumerId that is making the request - * - * @returns The scores and the selectionId for the next candidate selection - */ - nextCandidateSelection(parentQueue: string, consumerId: string): Promise<{ range: QueueRange }>; + consumerId: string + ): Promise>; } diff --git a/internal-packages/run-engine/src/shared/index.ts b/internal-packages/run-engine/src/shared/index.ts index 3790918eab..c327c63f37 100644 --- a/internal-packages/run-engine/src/shared/index.ts +++ b/internal-packages/run-engine/src/shared/index.ts @@ -1,4 +1,4 @@ -import { Attributes } from "@opentelemetry/api"; +import { Attributes } from "@internal/tracing"; import { Prisma } from "@trigger.dev/database"; export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json index 44c35a5d5e..ee2b56fe80 100644 --- a/internal-packages/run-engine/tsconfig.json +++ b/internal-packages/run-engine/tsconfig.json @@ -2,8 +2,8 @@ "compilerOptions": { "target": "ES2019", "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "CommonJS", - "moduleResolution": "Node", + "module": "Node16", + "moduleResolution": "Node16", "moduleDetection": "force", "verbatimModuleSyntax": false, "types": ["vitest/globals"], diff --git a/internal-packages/tracing/README.md b/internal-packages/tracing/README.md new file mode 100644 index 0000000000..e0f344f788 --- /dev/null +++ b/internal-packages/tracing/README.md @@ -0,0 +1,3 @@ +# Redis + +This is a simple package that is used to return a valid Redis client and provides an error callback. It will log and swallow errors if they're not handled. diff --git a/internal-packages/tracing/package.json b/internal-packages/tracing/package.json new file mode 100644 index 0000000000..60aa0ba8b4 --- /dev/null +++ b/internal-packages/tracing/package.json @@ -0,0 +1,20 @@ +{ + "name": "@internal/tracing", + "private": true, + "version": "0.0.1", + "main": "./src/index.ts", + "types": "./src/index.ts", + "type": "module", + "dependencies": { + "@opentelemetry/api": "1.9.0", + "@opentelemetry/api-logs": "0.52.1", + "@opentelemetry/semantic-conventions": "^1.27.0", + "@trigger.dev/core": "workspace:*" + }, + "devDependencies": { + "vitest": "^1.4.0" + }, + "scripts": { + "typecheck": "tsc --noEmit" + } +} \ No newline at end of file diff --git a/internal-packages/tracing/src/index.ts b/internal-packages/tracing/src/index.ts new file mode 100644 index 0000000000..18c5eec2e6 --- /dev/null +++ b/internal-packages/tracing/src/index.ts @@ -0,0 +1,94 @@ +import { Span, SpanOptions, SpanStatusCode, Tracer } from "@opentelemetry/api"; +import { Logger, SeverityNumber } from "@opentelemetry/api-logs"; +import { flattenAttributes } from "@trigger.dev/core/v3/utils/flattenAttributes"; + +export * from "@opentelemetry/semantic-conventions"; + +export type { Tracer, Attributes } from "@opentelemetry/api"; + +import { trace, context, propagation, SpanKind } from "@opentelemetry/api"; +export { trace, context, propagation, type Span, SpanKind, type SpanOptions }; + +export function getTracer(name: string): Tracer { + return trace.getTracer(name); +} + +export async function startSpan( + tracer: Tracer | undefined, + name: string, + fn: (span: Span) => Promise, + options?: SpanOptions +): Promise { + tracer ??= getTracer("default"); + + return tracer.startActiveSpan(name, options ?? {}, async (span) => { + try { + return await fn(span); + } catch (error) { + if (error instanceof Error) { + span.recordException(error); + } else if (typeof error === "string") { + span.recordException(new Error(error)); + } else { + span.recordException(new Error(String(error))); + } + + span.setStatus({ + code: SpanStatusCode.ERROR, + message: error instanceof Error ? error.message : String(error), + }); + + throw error; + } finally { + span.end(); + } + }); +} + +export async function emitDebugLog( + logger: Logger, + message: string, + params: Record = {} +) { + logger.emit({ + severityNumber: SeverityNumber.DEBUG, + body: message, + attributes: { ...flattenAttributes(params, "params") }, + }); +} + +export async function emitInfoLog( + logger: Logger, + message: string, + params: Record = {} +) { + logger.emit({ + severityNumber: SeverityNumber.INFO, + body: message, + attributes: { ...flattenAttributes(params, "params") }, + }); +} + +export async function emitErrorLog( + logger: Logger, + message: string, + params: Record = {} +) { + logger.emit({ + severityNumber: SeverityNumber.ERROR, + body: message, + attributes: { ...flattenAttributes(params, "params") }, + }); +} + +export async function emitWarnLog( + logger: Logger, + message: string, + params: Record = {} +) { + logger.emit({ + severityNumber: SeverityNumber.WARN, + body: message, + attributes: { ...flattenAttributes(params, "params") }, + }); +} diff --git a/internal-packages/tracing/tsconfig.json b/internal-packages/tracing/tsconfig.json new file mode 100644 index 0000000000..0104339620 --- /dev/null +++ b/internal-packages/tracing/tsconfig.json @@ -0,0 +1,23 @@ +{ + "compilerOptions": { + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "types": ["vitest/globals"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "noEmit": true, + "strict": true, + "paths": { + "@trigger.dev/core": ["../../packages/core/src/index"], + "@trigger.dev/core/*": ["../../packages/core/src/*"] + } + }, + "exclude": ["node_modules"] +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7d501c67a4..0e22ce80e5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -999,18 +999,18 @@ importers: '@internal/redis-worker': specifier: workspace:* version: link:../redis-worker - '@opentelemetry/api': - specifier: ^1.9.0 - version: 1.9.0 - '@opentelemetry/semantic-conventions': - specifier: ^1.27.0 - version: 1.28.0 + '@internal/tracing': + specifier: workspace:* + version: link:../tracing '@trigger.dev/core': specifier: workspace:* version: link:../../packages/core '@trigger.dev/database': specifier: workspace:* version: link:../database + '@unkey/cache': + specifier: ^1.5.0 + version: 1.5.0 assert-never: specifier: ^1.2.1 version: 1.2.1 @@ -1023,6 +1023,9 @@ importers: redlock: specifier: 5.0.0-beta.2 version: 5.0.0-beta.2(patch_hash=rwyegdki7iserrd7fgjwxkhnlu) + seedrandom: + specifier: ^3.0.5 + version: 3.0.5 zod: specifier: 3.23.8 version: 3.23.8 @@ -1030,6 +1033,9 @@ importers: '@internal/testcontainers': specifier: workspace:* version: link:../testcontainers + '@types/seedrandom': + specifier: ^3.0.8 + version: 3.0.8 vitest: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) @@ -1065,6 +1071,25 @@ importers: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) + internal-packages/tracing: + dependencies: + '@opentelemetry/api': + specifier: 1.9.0 + version: 1.9.0 + '@opentelemetry/api-logs': + specifier: 0.52.1 + version: 0.52.1 + '@opentelemetry/semantic-conventions': + specifier: ^1.27.0 + version: 1.28.0 + '@trigger.dev/core': + specifier: workspace:* + version: link:../../packages/core + devDependencies: + vitest: + specifier: ^1.4.0 + version: 1.6.0(@types/node@20.14.14) + internal-packages/zod-worker: dependencies: '@opentelemetry/api': From 6db88f2c034f1b86ea8f6d1ec2505a57b5d1aff4 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 6 Mar 2025 14:43:34 +0000 Subject: [PATCH 03/12] more wip --- internal-packages/redis-worker/package.json | 5 +- internal-packages/redis-worker/src/index.ts | 4 +- internal-packages/redis-worker/src/queue.ts | 11 +++- .../redis-worker/src/telemetry.ts | 31 ---------- .../redis-worker/src/worker.test.ts | 1 - internal-packages/redis-worker/src/worker.ts | 6 +- internal-packages/redis-worker/tsconfig.json | 4 +- internal-packages/redis/src/index.ts | 2 +- internal-packages/run-engine/package.json | 1 - .../run-engine/src/engine/eventBus.ts | 2 +- .../run-engine/src/engine/index.ts | 41 +++++++------ .../run-engine/src/engine/locking.ts | 2 +- .../src/engine/tests/batchTrigger.test.ts | 2 +- .../engine/tests/batchTriggerAndWait.test.ts | 2 +- .../src/engine/tests/cancelling.test.ts | 2 +- .../src/engine/tests/checkpoints.test.ts | 2 +- .../src/engine/tests/delays.test.ts | 2 +- .../src/engine/tests/dequeuing.test.ts | 2 +- .../src/engine/tests/heartbeats.test.ts | 2 +- .../src/engine/tests/notDeployed.test.ts | 2 +- .../src/engine/tests/priority.test.ts | 2 +- .../src/engine/tests/trigger.test.ts | 2 +- .../src/engine/tests/triggerAndWait.test.ts | 2 +- .../run-engine/src/engine/tests/ttl.test.ts | 2 +- .../run-engine/src/engine/types.ts | 6 +- ...egy.server.ts => fairDequeuingStrategy.ts} | 0 .../run-engine/src/run-queue/index.test.ts | 58 ++++++++++++++++++- .../run-engine/src/run-queue/index.ts | 2 +- internal-packages/tracing/src/index.ts | 2 +- internal-packages/zod-worker/package.json | 2 +- internal-packages/zod-worker/src/index.ts | 2 +- pnpm-lock.yaml | 18 ++---- 32 files changed, 124 insertions(+), 100 deletions(-) delete mode 100644 internal-packages/redis-worker/src/telemetry.ts rename internal-packages/run-engine/src/run-queue/{fairDequeuingStrategy.server.ts => fairDequeuingStrategy.ts} (100%) diff --git a/internal-packages/redis-worker/package.json b/internal-packages/redis-worker/package.json index 2f3cc53e2e..4d25a44d4c 100644 --- a/internal-packages/redis-worker/package.json +++ b/internal-packages/redis-worker/package.json @@ -6,10 +6,9 @@ "types": "./src/index.ts", "type": "module", "dependencies": { - "@opentelemetry/api": "^1.9.0", + "@internal/tracing": "workspace:*", "@internal/redis": "workspace:*", "@trigger.dev/core": "workspace:*", - "ioredis": "^5.3.2", "lodash.omit": "^4.5.0", "nanoid": "^5.0.7", "p-limit": "^6.2.0", @@ -24,4 +23,4 @@ "typecheck": "tsc --noEmit", "test": "vitest --no-file-parallelism" } -} +} \ No newline at end of file diff --git a/internal-packages/redis-worker/src/index.ts b/internal-packages/redis-worker/src/index.ts index a5893efc83..d4c28d9125 100644 --- a/internal-packages/redis-worker/src/index.ts +++ b/internal-packages/redis-worker/src/index.ts @@ -1,2 +1,2 @@ -export * from "./queue"; -export * from "./worker"; +export * from "./queue.js"; +export * from "./worker.js"; diff --git a/internal-packages/redis-worker/src/queue.ts b/internal-packages/redis-worker/src/queue.ts index 37611848c1..697b82ff5c 100644 --- a/internal-packages/redis-worker/src/queue.ts +++ b/internal-packages/redis-worker/src/queue.ts @@ -1,6 +1,11 @@ -import { createRedisClient } from "@internal/redis"; +import { + createRedisClient, + type Redis, + type Callback, + type RedisOptions, + type Result, +} from "@internal/redis"; import { Logger } from "@trigger.dev/core/logger"; -import Redis, { type Callback, type RedisOptions, type Result } from "ioredis"; import { nanoid } from "nanoid"; import { z } from "zod"; @@ -436,7 +441,7 @@ export class SimpleQueue { } } -declare module "ioredis" { +declare module "@internal/redis" { interface RedisCommander { enqueueItem( //keys diff --git a/internal-packages/redis-worker/src/telemetry.ts b/internal-packages/redis-worker/src/telemetry.ts deleted file mode 100644 index d52c437204..0000000000 --- a/internal-packages/redis-worker/src/telemetry.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { SpanOptions, SpanStatusCode, Span, Tracer } from "@opentelemetry/api"; - -export async function startSpan( - tracer: Tracer, - name: string, - fn: (span: Span) => Promise, - options?: SpanOptions -): Promise { - return tracer.startActiveSpan(name, options ?? {}, async (span) => { - try { - return await fn(span); - } catch (error) { - if (error instanceof Error) { - span.recordException(error); - } else if (typeof error === "string") { - span.recordException(new Error(error)); - } else { - span.recordException(new Error(String(error))); - } - - span.setStatus({ - code: SpanStatusCode.ERROR, - message: error instanceof Error ? error.message : String(error), - }); - - throw error; - } finally { - span.end(); - } - }); -} diff --git a/internal-packages/redis-worker/src/worker.test.ts b/internal-packages/redis-worker/src/worker.test.ts index f8ad28577a..2a138b49a0 100644 --- a/internal-packages/redis-worker/src/worker.test.ts +++ b/internal-packages/redis-worker/src/worker.test.ts @@ -4,7 +4,6 @@ import { describe } from "node:test"; import { expect } from "vitest"; import { z } from "zod"; import { Worker } from "./worker.js"; -import Redis from "ioredis"; import { createRedisClient } from "@internal/redis"; describe("Worker", () => { diff --git a/internal-packages/redis-worker/src/worker.ts b/internal-packages/redis-worker/src/worker.ts index c7da0e046c..d4fca68c6d 100644 --- a/internal-packages/redis-worker/src/worker.ts +++ b/internal-packages/redis-worker/src/worker.ts @@ -1,13 +1,11 @@ -import { SpanKind, trace, Tracer } from "@opentelemetry/api"; +import { SpanKind, startSpan, trace, Tracer } from "@internal/tracing"; import { Logger } from "@trigger.dev/core/logger"; import { calculateNextRetryDelay } from "@trigger.dev/core/v3"; import { type RetryOptions } from "@trigger.dev/core/v3/schemas"; -import { type RedisOptions } from "ioredis"; +import { Redis, type RedisOptions } from "@internal/redis"; import { z } from "zod"; import { AnyQueueItem, SimpleQueue } from "./queue.js"; -import Redis from "ioredis"; import { nanoid } from "nanoid"; -import { startSpan } from "./telemetry.js"; import pLimit from "p-limit"; import { createRedisClient } from "@internal/redis"; diff --git a/internal-packages/redis-worker/tsconfig.json b/internal-packages/redis-worker/tsconfig.json index 46ba33d89f..be98ae04b8 100644 --- a/internal-packages/redis-worker/tsconfig.json +++ b/internal-packages/redis-worker/tsconfig.json @@ -2,8 +2,8 @@ "compilerOptions": { "target": "ES2019", "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "CommonJS", - "moduleResolution": "Node", + "module": "Node16", + "moduleResolution": "Node16", "moduleDetection": "force", "verbatimModuleSyntax": false, "types": ["vitest/globals"], diff --git a/internal-packages/redis/src/index.ts b/internal-packages/redis/src/index.ts index 5eb631cb9d..13264773b9 100644 --- a/internal-packages/redis/src/index.ts +++ b/internal-packages/redis/src/index.ts @@ -1,7 +1,7 @@ import { Redis, RedisOptions } from "ioredis"; import { Logger } from "@trigger.dev/core/logger"; -export { Redis, type Callback, type RedisOptions, type Result } from "ioredis"; +export { Redis, type Callback, type RedisOptions, type Result, type RedisCommander } from "ioredis"; const defaultOptions: Partial = { retryStrategy: (times: number) => { diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index 23f29d246e..3c440c3857 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -12,7 +12,6 @@ "@trigger.dev/core": "workspace:*", "@trigger.dev/database": "workspace:*", "assert-never": "^1.2.1", - "ioredis": "^5.3.2", "nanoid": "^3.3.4", "redlock": "5.0.0-beta.2", "zod": "3.23.8", diff --git a/internal-packages/run-engine/src/engine/eventBus.ts b/internal-packages/run-engine/src/engine/eventBus.ts index 871115ed6a..0ad687f27c 100644 --- a/internal-packages/run-engine/src/engine/eventBus.ts +++ b/internal-packages/run-engine/src/engine/eventBus.ts @@ -1,5 +1,5 @@ import { TaskRunExecutionStatus, TaskRunStatus } from "@trigger.dev/database"; -import { AuthenticatedEnvironment } from "../shared"; +import { AuthenticatedEnvironment } from "../shared/index.js"; import { FlushedRunMetadata, TaskRunError } from "@trigger.dev/core/v3"; export type EventBusEvents = { diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index fb14a2f9a7..9cdbf387c4 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -1,6 +1,6 @@ -import { createRedisClient } from "@internal/redis"; +import { createRedisClient, Redis } from "@internal/redis"; import { Worker } from "@internal/redis-worker"; -import { Attributes, Span, SpanKind, trace, Tracer } from "@opentelemetry/api"; +import { Attributes, Span, SpanKind, trace, Tracer } from "@internal/tracing"; import { assertExhaustive } from "@trigger.dev/core"; import { Logger } from "@trigger.dev/core/logger"; import { @@ -48,29 +48,29 @@ import { TaskRunStatus, Waitpoint, } from "@trigger.dev/database"; -import assertNever from "assert-never"; -import { Redis } from "ioredis"; +import { assertNever } from "assert-never"; import { nanoid } from "nanoid"; import { EventEmitter } from "node:events"; import { z } from "zod"; -import { RunQueue } from "../run-queue"; -import { SimpleWeightedChoiceStrategy } from "../run-queue/simpleWeightedPriorityStrategy"; -import { MinimalAuthenticatedEnvironment } from "../shared"; -import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; -import { getRunWithBackgroundWorkerTasks } from "./db/worker"; -import { runStatusFromError } from "./errors"; -import { EventBusEvents } from "./eventBus"; -import { executionResultFromSnapshot, getLatestExecutionSnapshot } from "./executionSnapshots"; -import { RunLocker } from "./locking"; -import { getMachinePreset } from "./machinePresets"; +import { RunQueue } from "../run-queue/index.js"; +import { FairDequeuingStrategy } from "../run-queue/fairDequeuingStrategy.js"; +import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; +import { MAX_TASK_RUN_ATTEMPTS } from "./consts.js"; +import { getRunWithBackgroundWorkerTasks } from "./db/worker.js"; +import { runStatusFromError } from "./errors.js"; +import { EventBusEvents } from "./eventBus.js"; +import { executionResultFromSnapshot, getLatestExecutionSnapshot } from "./executionSnapshots.js"; +import { RunLocker } from "./locking.js"; +import { getMachinePreset } from "./machinePresets.js"; import { isCheckpointable, isDequeueableExecutionStatus, isExecuting, isFinalRunStatus, isPendingExecuting, -} from "./statuses"; -import { HeartbeatTimeouts, RunEngineOptions, TriggerParams } from "./types"; +} from "./statuses.js"; +import { HeartbeatTimeouts, RunEngineOptions, TriggerParams } from "./types.js"; +import { RunQueueShortKeyProducer } from "../run-queue/keyProducer.js"; import { retryOutcomeFromCompletion } from "./retrying"; const workerCatalog = { @@ -153,11 +153,16 @@ export class RunEngine { ); this.runLock = new RunLocker({ redis: this.runLockRedis }); + const keys = new RunQueueShortKeyProducer("rq:"); + this.runQueue = new RunQueue({ name: "rq", tracer: trace.getTracer("rq"), - queuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 36 }), - envQueuePriorityStrategy: new SimpleWeightedChoiceStrategy({ queueSelectionCount: 12 }), + keys, + queuePriorityStrategy: new FairDequeuingStrategy({ + keys, + redis: { ...options.queue.redis, keyPrefix: `${options.queue.redis.keyPrefix}runqueue:` }, + }), defaultEnvConcurrency: options.queue?.defaultEnvConcurrency ?? 10, logger: new Logger("RunQueue", "debug"), redis: { ...options.queue.redis, keyPrefix: `${options.queue.redis.keyPrefix}runqueue:` }, diff --git a/internal-packages/run-engine/src/engine/locking.ts b/internal-packages/run-engine/src/engine/locking.ts index cd3aecc7c6..90f1464824 100644 --- a/internal-packages/run-engine/src/engine/locking.ts +++ b/internal-packages/run-engine/src/engine/locking.ts @@ -1,6 +1,6 @@ -import Redis from "ioredis"; import Redlock, { RedlockAbortSignal } from "redlock"; import { AsyncLocalStorage } from "async_hooks"; +import { Redis } from "@internal/redis"; interface LockContext { resources: string; diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index c3b78f0086..9c9d181619 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -3,7 +3,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts index d2869886d0..28338e49cf 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -4,7 +4,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect, describe } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts index e867b6a694..297883c777 100644 --- a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts +++ b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts index a62747ca0c..7b73cd13c6 100644 --- a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts @@ -5,7 +5,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/delays.test.ts b/internal-packages/run-engine/src/engine/tests/delays.test.ts index be937127e9..e62fef8f2d 100644 --- a/internal-packages/run-engine/src/engine/tests/delays.test.ts +++ b/internal-packages/run-engine/src/engine/tests/delays.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts index 3f2cfdd9dd..9ed3d1a2f9 100644 --- a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts +++ b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts @@ -3,7 +3,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index b606845490..c4112cf35c 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect, describe } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts index 03da6c548e..80b95d3e13 100644 --- a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts +++ b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts index c0e0f4a459..ecc36ff8da 100644 --- a/internal-packages/run-engine/src/engine/tests/priority.test.ts +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -3,7 +3,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts index 1eef5dd838..da4dbbc69b 100644 --- a/internal-packages/run-engine/src/engine/tests/trigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -4,7 +4,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { EventBusEventArgs } from "../eventBus.js"; import { RunEngine } from "../index.js"; diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 92c2cb12bc..02d75ee15a 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -4,7 +4,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/ttl.test.ts b/internal-packages/run-engine/src/engine/tests/ttl.test.ts index 11e3225038..e48597ae1a 100644 --- a/internal-packages/run-engine/src/engine/tests/ttl.test.ts +++ b/internal-packages/run-engine/src/engine/tests/ttl.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, assertNonNullable, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index a6bf6cf8b2..040f3a9213 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -1,9 +1,9 @@ import { type WorkerConcurrencyOptions } from "@internal/redis-worker"; -import { Tracer } from "@opentelemetry/api"; +import { Tracer } from "@internal/tracing"; import { MachinePreset, MachinePresetName, QueueOptions, RetryOptions } from "@trigger.dev/core/v3"; import { PrismaClient } from "@trigger.dev/database"; -import { type RedisOptions } from "ioredis"; -import { MinimalAuthenticatedEnvironment } from "../shared"; +import { type RedisOptions } from "@internal/redis"; +import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; export type RunEngineOptions = { prisma: PrismaClient; diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts similarity index 100% rename from internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.server.ts rename to internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index 755daa0b8d..eebd940979 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -6,7 +6,7 @@ import { setTimeout } from "node:timers/promises"; import { RunQueue } from "./index.js"; import { InputPayload } from "./types.js"; import { createRedisClient } from "@internal/redis"; -import { FairDequeuingStrategy } from "./fairDequeuingStrategy.server.js"; +import { FairDequeuingStrategy } from "./fairDequeuingStrategy.js"; import { RunQueueShortKeyProducer } from "./keyProducer.js"; const testOptions = { @@ -278,6 +278,14 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -381,6 +389,14 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -448,6 +464,14 @@ describe("RunQueue", () => { redisTest("Acking", { timeout: 5_000 }, async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -524,6 +548,14 @@ describe("RunQueue", () => { redisTest("Ack (before dequeue)", { timeout: 5_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -578,6 +610,14 @@ describe("RunQueue", () => { redisTest("Nacking", { timeout: 15_000 }, async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -669,6 +709,14 @@ describe("RunQueue", () => { async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -783,6 +831,14 @@ describe("RunQueue", () => { retryOptions: { maxAttempts: 1, }, + queuePriorityStrategy: new FairDequeuingStrategy({ + redis: { + keyPrefix: "runqueue:test:", + host: redisContainer.getHost(), + port: redisContainer.getPort(), + }, + keys: testOptions.keys, + }), redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index a0458daa5c..049212901e 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -1306,7 +1306,7 @@ redis.call('SET', envConcurrencyLimitKey, envConcurrencyLimit) } } -declare module "ioredis" { +declare module "@internal/redis" { interface RedisCommander { enqueueMessage( //keys diff --git a/internal-packages/tracing/src/index.ts b/internal-packages/tracing/src/index.ts index 18c5eec2e6..9fe3532776 100644 --- a/internal-packages/tracing/src/index.ts +++ b/internal-packages/tracing/src/index.ts @@ -7,7 +7,7 @@ export * from "@opentelemetry/semantic-conventions"; export type { Tracer, Attributes } from "@opentelemetry/api"; import { trace, context, propagation, SpanKind } from "@opentelemetry/api"; -export { trace, context, propagation, type Span, SpanKind, type SpanOptions }; +export { trace, context, propagation, type Span, SpanKind, type SpanOptions, SpanStatusCode }; export function getTracer(name: string): Tracer { return trace.getTracer(name); diff --git a/internal-packages/zod-worker/package.json b/internal-packages/zod-worker/package.json index 712a110a9c..444ea2fc10 100644 --- a/internal-packages/zod-worker/package.json +++ b/internal-packages/zod-worker/package.json @@ -5,7 +5,7 @@ "main": "./src/index.ts", "types": "./src/index.ts", "dependencies": { - "@opentelemetry/api": "^1.9.0", + "@internal/tracing": "workspace:*", "@trigger.dev/core": "workspace:*", "@trigger.dev/database": "workspace:*", "graphile-worker": "0.16.6", diff --git a/internal-packages/zod-worker/src/index.ts b/internal-packages/zod-worker/src/index.ts index c597500e54..2428a103fa 100644 --- a/internal-packages/zod-worker/src/index.ts +++ b/internal-packages/zod-worker/src/index.ts @@ -1,4 +1,4 @@ -import { SpanKind, SpanStatusCode, trace } from "@opentelemetry/api"; +import { SpanKind, SpanStatusCode, trace } from "@internal/tracing"; import { flattenAttributes } from "@trigger.dev/core/v3"; import type { CronItem, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 0e22ce80e5..ac4cabe86f 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -959,15 +959,12 @@ importers: '@internal/redis': specifier: workspace:* version: link:../redis - '@opentelemetry/api': - specifier: ^1.9.0 - version: 1.9.0 + '@internal/tracing': + specifier: workspace:* + version: link:../tracing '@trigger.dev/core': specifier: workspace:* version: link:../../packages/core - ioredis: - specifier: ^5.3.2 - version: 5.3.2 lodash.omit: specifier: ^4.5.0 version: 4.5.0 @@ -1014,9 +1011,6 @@ importers: assert-never: specifier: ^1.2.1 version: 1.2.1 - ioredis: - specifier: ^5.3.2 - version: 5.3.2 nanoid: specifier: ^3.3.4 version: 3.3.7 @@ -1092,9 +1086,9 @@ importers: internal-packages/zod-worker: dependencies: - '@opentelemetry/api': - specifier: ^1.9.0 - version: 1.9.0 + '@internal/tracing': + specifier: workspace:* + version: link:../tracing '@trigger.dev/core': specifier: workspace:* version: link:../../packages/core From 614f74fd86e72f369f31233e8dc6ea57125aeb14 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Thu, 6 Mar 2025 16:21:39 +0000 Subject: [PATCH 04/12] WIP --- .vscode/launch.json | 8 + .../run-engine/src/engine/index.ts | 4 +- .../src/engine/tests/waitpoints.test.ts | 2 +- internal-packages/run-engine/src/index.ts | 4 +- .../src/run-queue/fairDequeuingStrategy.ts | 10 +- .../run-engine/src/run-queue/index.test.ts | 7 +- .../run-engine/src/run-queue/index.ts | 150 ++++++------------ .../src/run-queue/keyProducer.test.ts | 64 +++----- .../run-engine/src/run-queue/keyProducer.ts | 44 ++--- .../run-engine/src/run-queue/types.ts | 3 - 10 files changed, 106 insertions(+), 190 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 40f97bb421..8242758d34 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -141,6 +141,14 @@ "command": "pnpm run test --filter @internal/run-engine", "cwd": "${workspaceFolder}", "sourceMaps": true + }, + { + "type": "node-terminal", + "request": "launch", + "name": "Debug RunQueue tests", + "command": "pnpm run test ./src/run-queue/index.test.ts", + "cwd": "${workspaceFolder}/internal-packages/run-engine", + "sourceMaps": true } ] } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 9cdbf387c4..e06dd472f8 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -70,7 +70,7 @@ import { isPendingExecuting, } from "./statuses.js"; import { HeartbeatTimeouts, RunEngineOptions, TriggerParams } from "./types.js"; -import { RunQueueShortKeyProducer } from "../run-queue/keyProducer.js"; +import { RunQueueFullKeyProducer } from "../run-queue/keyProducer.js"; import { retryOutcomeFromCompletion } from "./retrying"; const workerCatalog = { @@ -153,7 +153,7 @@ export class RunEngine { ); this.runLock = new RunLocker({ redis: this.runLockRedis }); - const keys = new RunQueueShortKeyProducer("rq:"); + const keys = new RunQueueFullKeyProducer(); this.runQueue = new RunQueue({ name: "rq", diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 1d09e336d9..771d070781 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -4,7 +4,7 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/index.ts b/internal-packages/run-engine/src/index.ts index c7cf00b11e..89bd08196d 100644 --- a/internal-packages/run-engine/src/index.ts +++ b/internal-packages/run-engine/src/index.ts @@ -1,2 +1,2 @@ -export { RunEngine, RunDuplicateIdempotencyKeyError } from "./engine/index"; -export type { EventBusEventArgs } from "./engine/eventBus"; +export { RunEngine, RunDuplicateIdempotencyKeyError } from "./engine/index.js"; +export type { EventBusEventArgs } from "./engine/eventBus.js"; diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts index c3e2279f2f..23ae2b7dea 100644 --- a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts +++ b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts @@ -36,7 +36,7 @@ export type FairDequeuingStrategyBiases = { export type FairDequeuingStrategyOptions = { redis: RedisOptions; keys: RunQueueKeyProducer; - defaultEnvConcurrency?: number; + defaultEnvConcurrencyLimit?: number; parentQueueLimit?: number; tracer?: Tracer; seed?: string; @@ -97,7 +97,7 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { > = new Map(); private _redis: Redis; - private _defaultEnvConcurrency: number; + private _defaultEnvConcurrencyLimit: number; private _parentQueueLimit: number; constructor(private options: FairDequeuingStrategyOptions) { @@ -115,7 +115,7 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { this._rng = seedrandom(options.seed); this._redis = createRedisClient(options.redis); - this._defaultEnvConcurrency = options.defaultEnvConcurrency ?? 10; + this._defaultEnvConcurrencyLimit = options.defaultEnvConcurrencyLimit ?? 10; this._parentQueueLimit = options.parentQueueLimit ?? 100; } @@ -561,13 +561,13 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { const value = await this._redis.get(key); if (!value) { - return this._defaultEnvConcurrency; + return this._defaultEnvConcurrencyLimit; } return Number(value); }); - return result.val ?? this._defaultEnvConcurrency; + return result.val ?? this._defaultEnvConcurrencyLimit; }); } diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index eebd940979..a014fb46b1 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -7,7 +7,7 @@ import { RunQueue } from "./index.js"; import { InputPayload } from "./types.js"; import { createRedisClient } from "@internal/redis"; import { FairDequeuingStrategy } from "./fairDequeuingStrategy.js"; -import { RunQueueShortKeyProducer } from "./keyProducer.js"; +import { RunQueueFullKeyProducer } from "./keyProducer.js"; const testOptions = { name: "rq", @@ -23,7 +23,7 @@ const testOptions = { maxTimeoutInMs: 1_000, randomize: true, }, - keys: new RunQueueShortKeyProducer("rq:"), + keys: new RunQueueFullKeyProducer(), }; const authenticatedEnvProd = { @@ -383,7 +383,7 @@ describe("RunQueue", () => { } ); - redisTest( + redisTest.only( "Dequeue multiple messages from the queue", { timeout: 5_000 }, async ({ redisContainer }) => { @@ -408,6 +408,7 @@ describe("RunQueue", () => { // Create 20 messages with different runIds and some with different queues const messages = Array.from({ length: 20 }, (_, i) => ({ ...messageProd, + taskIdentifier: i < 15 ? "task/my-task" : "task/other-task", // Mix up the queues runId: `r${i + 1}`, queue: i < 15 ? "task/my-task" : "task/other-task", // Mix up the queues })); diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 049212901e..929c0efec2 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -259,10 +259,11 @@ export class RunQueue { return this.#trace( "dequeueMessageInSharedQueue", async (span) => { - const envQueues = await this.queuePriorityStrategy.distributeFairQueuesFromParentQueue( - masterQueue, - consumerId - ); + const envQueues = + await this.options.queuePriorityStrategy.distributeFairQueuesFromParentQueue( + masterQueue, + consumerId + ); span.setAttribute("environment_count", envQueues.length); @@ -275,76 +276,57 @@ export class RunQueue { const messages: DequeuedMessage[] = []; - // Keep track of queues we've tried that didn't return a message - const emptyQueues = new Set(); - - // Continue until we've hit max count or tried all queues - while (messages.length < maxCount) { - // Calculate how many more messages we need - const remainingCount = maxCount - messages.length; - if (remainingCount <= 0) break; - - // Find all available queues across environments that we haven't marked as empty - const availableEnvQueues = envQueues - .map((env) => ({ - env: env, - queues: env.queues.filter((queue) => !emptyQueues.has(queue)), - })) - .filter((env) => env.queues.length > 0); + // Each env starts with its list of candidate queues + const tenantQueues: Record = {}; - if (availableEnvQueues.length === 0) break; + // Initialize tenantQueues with the queues for each env + for (const env of envQueues) { + tenantQueues[env.envId] = [...env.queues]; // Create a copy of the queues array + } - attemptedEnvs += availableEnvQueues.length; + // Continue until we've hit max count or all tenants have empty queue lists + while ( + messages.length < maxCount && + Object.values(tenantQueues).some((queues) => queues.length > 0) + ) { + for (const env of envQueues) { + attemptedEnvs++; + + // Skip if this tenant has no more queues + if (tenantQueues[env.envId].length === 0) { + continue; + } - // Create a dequeue operation for each environment, taking one queue from each - const dequeueOperations = availableEnvQueues.map(({ env, queues }) => { - const queue = queues[0]; + // Pop the next queue (using round-robin order) + const queue = tenantQueues[env.envId].shift()!; attemptedQueues++; - return { - queue, - operation: this.#callDequeueMessage({ - messageQueue: queue, - concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), - currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), - envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), - envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), - projectCurrentConcurrencyKey: - this.keys.projectCurrentConcurrencyKeyFromQueue(queue), - messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), - envQueueKey: this.keys.envQueueKeyFromQueue(queue), - taskCurrentConcurrentKeyPrefix: - this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), - }), - }; - }); - - // Execute all dequeue operations in parallel - const results = await Promise.all( - dequeueOperations.map(async ({ queue, operation }) => { - const message = await operation; - return { queue, message }; - }) - ); + // Attempt to dequeue from this queue + const message = await this.#callDequeueMessage({ + messageQueue: queue, + concurrencyLimitKey: this.keys.concurrencyLimitKeyFromQueue(queue), + currentConcurrencyKey: this.keys.currentConcurrencyKeyFromQueue(queue), + envConcurrencyLimitKey: this.keys.envConcurrencyLimitKeyFromQueue(queue), + envCurrentConcurrencyKey: this.keys.envCurrentConcurrencyKeyFromQueue(queue), + projectCurrentConcurrencyKey: this.keys.projectCurrentConcurrencyKeyFromQueue(queue), + messageKeyPrefix: this.keys.messageKeyPrefixFromQueue(queue), + envQueueKey: this.keys.envQueueKeyFromQueue(queue), + taskCurrentConcurrentKeyPrefix: + this.keys.taskIdentifierCurrentConcurrencyKeyPrefixFromQueue(queue), + }); - // Process results - let foundAnyMessage = false; - for (const { queue, message } of results) { if (message) { messages.push(message); - foundAnyMessage = true; - } else { - // Mark this queue as empty - emptyQueues.add(queue); + // Re-add this queue at the end, since it might have more messages + tenantQueues[env.envId].push(queue); } - } - - // If we couldn't get a message from any queue in any env, break - if (!foundAnyMessage) break; + // If message is null, do not re-add the queue in this cycle - // If we've marked all queues as empty, break - const totalQueues = envQueues.reduce((sum, env) => sum + env.queues.length, 0); - if (emptyQueues.size >= totalQueues) break; + // If we've reached maxCount, break out of the loop + if (messages.length >= maxCount) { + break; + } + } } span.setAttributes({ @@ -635,42 +617,6 @@ export class RunQueue { ); } - queueConcurrencyScanStream( - count: number = 100, - onEndCallback?: () => void, - onErrorCallback?: (error: Error) => void - ) { - const pattern = this.keys.queueCurrentConcurrencyScanPattern(); - - this.logger.debug("Starting queue concurrency scan stream", { - pattern, - component: "runqueue", - operation: "queueConcurrencyScanStream", - service: this.name, - count, - }); - - const redis = this.redis.duplicate(); - - const stream = redis.scanStream({ - match: pattern, - type: "set", - count, - }); - - stream.on("end", () => { - onEndCallback?.(); - redis.quit(); - }); - - stream.on("error", (error) => { - onErrorCallback?.(error); - redis.quit(); - }); - - return { stream, redis }; - } - async quit() { await this.subscriber.unsubscribe(); await this.subscriber.quit(); @@ -1103,9 +1049,9 @@ local earliestMessage = redis.call('ZRANGE', childQueue, 0, 0, 'WITHSCORES') for _, parentQueue in ipairs(decodedPayload.masterQueues) do local prefixedParentQueue = keyPrefix .. parentQueue if #earliestMessage == 0 then - redis.call('ZREM', prefixedParentQueue, childQueue) + redis.call('ZREM', prefixedParentQueue, childQueueName) else - redis.call('ZADD', prefixedParentQueue, earliestMessage[2], childQueue) + redis.call('ZADD', prefixedParentQueue, earliestMessage[2], childQueueName) end end diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts index aa0ce54f43..274c0f7c6b 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts @@ -1,28 +1,10 @@ import { describe } from "node:test"; import { expect, it } from "vitest"; -import { RunQueueShortKeyProducer } from "./keyProducer.js"; +import { RunQueueFullKeyProducer } from "./keyProducer.js"; describe("KeyProducer", () => { - it("sharedQueueScanPattern", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); - const pattern = keyProducer.masterQueueScanPattern("main"); - expect(pattern).toBe("test:*main"); - }); - - it("queueCurrentConcurrencyScanPattern", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); - const pattern = keyProducer.queueCurrentConcurrencyScanPattern(); - expect(pattern).toBe("test:{org:*}:proj:*:env:*:queue:*:currentConcurrency"); - }); - - it("stripKeyPrefix", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); - const key = keyProducer.stripKeyPrefix("test:abc"); - expect(key).toBe("abc"); - }); - it("queueConcurrencyLimitKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.queueConcurrencyLimitKey( { id: "e1234", @@ -37,7 +19,7 @@ describe("KeyProducer", () => { }); it("envConcurrencyLimitKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.envConcurrencyLimitKey({ id: "e1234", type: "PRODUCTION", @@ -49,7 +31,7 @@ describe("KeyProducer", () => { }); it("queueKey (no concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.queueKey( { id: "e1234", @@ -64,7 +46,7 @@ describe("KeyProducer", () => { }); it("queueKey (w concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.queueKey( { id: "e1234", @@ -80,7 +62,7 @@ describe("KeyProducer", () => { }); it("concurrencyLimitKeyFromQueue (w concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -97,7 +79,7 @@ describe("KeyProducer", () => { }); it("concurrencyLimitKeyFromQueue (no concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -113,7 +95,7 @@ describe("KeyProducer", () => { }); it("currentConcurrencyKeyFromQueue (w concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -132,7 +114,7 @@ describe("KeyProducer", () => { }); it("currentConcurrencyKeyFromQueue (no concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -148,7 +130,7 @@ describe("KeyProducer", () => { }); it("currentConcurrencyKey (w concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.currentConcurrencyKey( { id: "e1234", @@ -166,7 +148,7 @@ describe("KeyProducer", () => { }); it("currentConcurrencyKey (no concurrency)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.currentConcurrencyKey( { id: "e1234", @@ -182,7 +164,7 @@ describe("KeyProducer", () => { }); it("taskIdentifierCurrentConcurrencyKeyPrefixFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -198,7 +180,7 @@ describe("KeyProducer", () => { }); it("taskIdentifierCurrentConcurrencyKeyFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -214,7 +196,7 @@ describe("KeyProducer", () => { }); it("taskIdentifierCurrentConcurrencyKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.taskIdentifierCurrentConcurrencyKey( { id: "e1234", @@ -229,7 +211,7 @@ describe("KeyProducer", () => { }); it("projectCurrentConcurrencyKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.projectCurrentConcurrencyKey({ id: "e1234", type: "PRODUCTION", @@ -241,7 +223,7 @@ describe("KeyProducer", () => { }); it("projectCurrentConcurrencyKeyFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.projectCurrentConcurrencyKeyFromQueue( "{org:o1234}:proj:p1234:currentConcurrency" ); @@ -249,7 +231,7 @@ describe("KeyProducer", () => { }); it("disabledConcurrencyLimitKeyFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -265,7 +247,7 @@ describe("KeyProducer", () => { }); it("envConcurrencyLimitKeyFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -281,7 +263,7 @@ describe("KeyProducer", () => { }); it("envCurrentConcurrencyKeyFromQueue", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -297,7 +279,7 @@ describe("KeyProducer", () => { }); it("envCurrentConcurrencyKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.envCurrentConcurrencyKey({ id: "e1234", type: "PRODUCTION", @@ -309,13 +291,13 @@ describe("KeyProducer", () => { }); it("messageKey", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const key = keyProducer.messageKey("o1234", "m1234"); expect(key).toBe("{org:o1234}:message:m1234"); }); it("extractComponentsFromQueue (no concurrencyKey)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", @@ -337,7 +319,7 @@ describe("KeyProducer", () => { }); it("extractComponentsFromQueue (w concurrencyKey)", () => { - const keyProducer = new RunQueueShortKeyProducer("test:"); + const keyProducer = new RunQueueFullKeyProducer(); const queueKey = keyProducer.queueKey( { id: "e1234", diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.ts b/internal-packages/run-engine/src/run-queue/keyProducer.ts index 69d1e4d66d..3acdcb9747 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.ts @@ -15,25 +15,7 @@ const constants = { RESERVE_CONCURRENCY_PART: "reserveConcurrency", } as const; -export class RunQueueShortKeyProducer implements RunQueueKeyProducer { - constructor(private _prefix: string) {} - - masterQueueScanPattern(masterQueue: string) { - return `${this._prefix}*${masterQueue}`; - } - - queueCurrentConcurrencyScanPattern() { - return `${this._prefix}{${constants.ORG_PART}:*}:${constants.PROJECT_PART}:*:${constants.ENV_PART}:*:${constants.QUEUE_PART}:*:${constants.CURRENT_CONCURRENCY_PART}`; - } - - stripKeyPrefix(key: string): string { - if (key.startsWith(this._prefix)) { - return key.slice(this._prefix.length); - } - - return key; - } - +export class RunQueueFullKeyProducer implements RunQueueKeyProducer { queueConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment, queue: string) { return [this.queueKey(env, queue), constants.CONCURRENCY_LIMIT_PART].join(":"); } @@ -108,8 +90,13 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { } envCurrentConcurrencyKeyFromQueue(queue: string) { - const { orgId, envId } = this.descriptorFromQueue(queue); - return `{${constants.ORG_PART}:${orgId}}:${constants.ENV_PART}:${envId}:${constants.CURRENT_CONCURRENCY_PART}`; + const { orgId, envId, projectId } = this.descriptorFromQueue(queue); + + return this.envCurrentConcurrencyKey({ + orgId, + projectId, + envId, + }); } envCurrentConcurrencyKey(env: EnvDescriptor): string; @@ -120,12 +107,14 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { if ("id" in envOrDescriptor) { return [ this.orgKeySection(envOrDescriptor.organization.id), + this.projKeySection(envOrDescriptor.project.id), this.envKeySection(envOrDescriptor.id), constants.CURRENT_CONCURRENCY_PART, ].join(":"); } else { return [ this.orgKeySection(envOrDescriptor.orgId), + this.projKeySection(envOrDescriptor.projectId), this.envKeySection(envOrDescriptor.envId), constants.CURRENT_CONCURRENCY_PART, ].join(":"); @@ -140,12 +129,14 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { if ("id" in envOrDescriptor) { return [ this.orgKeySection(envOrDescriptor.organization.id), + this.projKeySection(envOrDescriptor.project.id), this.envKeySection(envOrDescriptor.id), constants.RESERVE_CONCURRENCY_PART, ].join(":"); } else { return [ this.orgKeySection(envOrDescriptor.orgId), + this.projKeySection(envOrDescriptor.projectId), this.envKeySection(envOrDescriptor.envId), constants.RESERVE_CONCURRENCY_PART, ].join(":"); @@ -215,7 +206,7 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { } descriptorFromQueue(queue: string) { - const parts = this.normalizeQueue(queue).split(":"); + const parts = queue.split(":"); return { orgId: parts[1].replace("{", "").replace("}", ""), projectId: parts[3], @@ -248,13 +239,4 @@ export class RunQueueShortKeyProducer implements RunQueueKeyProducer { private taskIdentifierSection(taskIdentifier: string) { return `${constants.TASK_PART}:${taskIdentifier}`; } - - // This removes the leading prefix from the queue name if it exists - private normalizeQueue(queue: string) { - if (queue.startsWith(this._prefix)) { - return queue.slice(this._prefix.length); - } - - return queue; - } } diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 27efb25d64..72f417add2 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -37,8 +37,6 @@ export type EnvDescriptor = { }; export interface RunQueueKeyProducer { - masterQueueScanPattern(masterQueue: string): string; - queueCurrentConcurrencyScanPattern(): string; //queue queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string): string; envQueueKey(env: MinimalAuthenticatedEnvironment): string; @@ -78,7 +76,6 @@ export interface RunQueueKeyProducer { messageKeyPrefixFromQueue(queue: string): string; messageKey(orgId: string, messageId: string): string; //utils - stripKeyPrefix(key: string): string; orgIdFromQueue(queue: string): string; envIdFromQueue(queue: string): string; projectIdFromQueue(queue: string): string; From 8989906ede770f68f063a522fcc61062553e3260 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 09:15:32 +0000 Subject: [PATCH 05/12] Get run engine tests to pass --- .vscode/launch.json | 2 +- .../src/engine/tests/batchTrigger.test.ts | 314 +++-- .../engine/tests/batchTriggerAndWait.test.ts | 676 +++++----- .../src/engine/tests/cancelling.test.ts | 191 ++- .../src/engine/tests/checkpoints.test.ts | 2 + .../src/engine/tests/delays.test.ts | 184 ++- .../src/engine/tests/dequeuing.test.ts | 170 ++- .../src/engine/tests/heartbeats.test.ts | 605 +++++---- .../src/engine/tests/notDeployed.test.ts | 4 +- .../src/engine/tests/priority.test.ts | 154 ++- .../src/engine/tests/trigger.test.ts | 6 +- .../src/engine/tests/triggerAndWait.test.ts | 5 +- .../run-engine/src/engine/tests/ttl.test.ts | 4 +- .../src/engine/tests/waitpoints.test.ts | 1147 ++++++++--------- .../src/run-queue/fairDequeuingStrategy.ts | 2 +- .../run-engine/src/run-queue/index.test.ts | 2 +- .../run-engine/src/run-queue/index.ts | 37 +- .../src/run-queue/keyProducer.test.ts | 4 +- 18 files changed, 1741 insertions(+), 1768 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 8242758d34..ab091cb534 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -146,7 +146,7 @@ "type": "node-terminal", "request": "launch", "name": "Debug RunQueue tests", - "command": "pnpm run test ./src/run-queue/index.test.ts", + "command": "pnpm run test ./src/engine/tests/waitpoints.test.ts", "cwd": "${workspaceFolder}/internal-packages/run-engine", "sourceMaps": true } diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index 9c9d181619..04c38801cb 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -9,174 +9,172 @@ import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine batchTrigger", () => { - containerTest( - "Batch trigger shares a batch", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + containerTest("Batch trigger shares a batch", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0005, }, - tracer: trace.getTracer("test", "0.0.0"), + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + const batch = await prisma.batchTaskRun.create({ + data: { + friendlyId: generateFriendlyId("batch"), + runtimeEnvironmentId: authenticatedEnvironment.id, + }, }); - try { - const taskIdentifier = "test-task"; + //trigger the runs + const run1 = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + batch: { id: batch.id, index: 0 }, + }, + prisma + ); + + const run2 = await engine.trigger( + { + number: 2, + friendlyId: "run_1235", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + batch: { id: batch.id, index: 1 }, + }, + prisma + ); + + expect(run1).toBeDefined(); + expect(run1.friendlyId).toBe("run_1234"); + expect(run1.batchId).toBe(batch.id); + + expect(run2).toBeDefined(); + expect(run2.friendlyId).toBe("run_1235"); + expect(run2.batchId).toBe(batch.id); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(2); + + //dequeue + const [d1, d2] = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run1.masterQueue, + maxRunCount: 10, + }); - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //attempts + const attempt1 = await engine.startRunAttempt({ + runId: d1.run.id, + snapshotId: d1.snapshot.id, + }); + const attempt2 = await engine.startRunAttempt({ + runId: d2.run.id, + snapshotId: d2.snapshot.id, + }); - const batch = await prisma.batchTaskRun.create({ - data: { - friendlyId: generateFriendlyId("batch"), - runtimeEnvironmentId: authenticatedEnvironment.id, - }, - }); - - //trigger the runs - const run1 = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - batch: { id: batch.id, index: 0 }, - }, - prisma - ); - - const run2 = await engine.trigger( - { - number: 2, - friendlyId: "run_1235", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - batch: { id: batch.id, index: 1 }, - }, - prisma - ); - - expect(run1).toBeDefined(); - expect(run1.friendlyId).toBe("run_1234"); - expect(run1.batchId).toBe(batch.id); - - expect(run2).toBeDefined(); - expect(run2.friendlyId).toBe("run_1235"); - expect(run2.batchId).toBe(batch.id); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); - expect(queueLength).toBe(2); - - //dequeue - const [d1, d2] = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run1.masterQueue, - maxRunCount: 10, - }); - - //attempts - const attempt1 = await engine.startRunAttempt({ - runId: d1.run.id, - snapshotId: d1.snapshot.id, - }); - const attempt2 = await engine.startRunAttempt({ - runId: d2.run.id, - snapshotId: d2.snapshot.id, - }); - - //complete the runs - const result1 = await engine.completeRunAttempt({ - runId: attempt1.run.id, - snapshotId: attempt1.snapshot.id, - completion: { - ok: true, - id: attempt1.run.id, - output: `{"foo":"bar"}`, - outputType: "application/json", - }, - }); - const result2 = await engine.completeRunAttempt({ - runId: attempt2.run.id, - snapshotId: attempt2.snapshot.id, - completion: { - ok: true, - id: attempt2.run.id, - output: `{"baz":"qux"}`, - outputType: "application/json", - }, - }); + //complete the runs + const result1 = await engine.completeRunAttempt({ + runId: attempt1.run.id, + snapshotId: attempt1.snapshot.id, + completion: { + ok: true, + id: attempt1.run.id, + output: `{"foo":"bar"}`, + outputType: "application/json", + }, + }); + const result2 = await engine.completeRunAttempt({ + runId: attempt2.run.id, + snapshotId: attempt2.snapshot.id, + completion: { + ok: true, + id: attempt2.run.id, + output: `{"baz":"qux"}`, + outputType: "application/json", + }, + }); - //the batch won't complete immediately - const batchAfter1 = await prisma.batchTaskRun.findUnique({ - where: { - id: batch.id, - }, - }); - expect(batchAfter1?.status).toBe("PENDING"); + //the batch won't complete immediately + const batchAfter1 = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter1?.status).toBe("PENDING"); - await setTimeout(3_000); + await setTimeout(3_000); - //the batch should complete - const batchAfter2 = await prisma.batchTaskRun.findUnique({ - where: { - id: batch.id, - }, - }); - expect(batchAfter2?.status).toBe("COMPLETED"); - } finally { - engine.quit(); - } + //the batch should complete + const batchAfter2 = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter2?.status).toBe("COMPLETED"); + } finally { + engine.quit(); } - ); + }); }); diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts index 28338e49cf..9bd99e3ac3 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -10,366 +10,360 @@ import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine batchTriggerAndWait", () => { - containerTest( - "batchTriggerAndWait (no idempotency)", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 20, + containerTest("batchTriggerAndWait (no idempotency)", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 20, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, }, - queue: { - redis: redisOptions, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + const childTask = "child-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + + //create a batch + const batch = await prisma.batchTaskRun.create({ + data: { + friendlyId: generateFriendlyId("batch"), + runtimeEnvironmentId: authenticatedEnvironment.id, }, - runLock: { - redis: redisOptions, + }); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, + prisma + ); + + //dequeue parent + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(initialExecutionData); + const attemptResult = await engine.startRunAttempt({ + runId: parentRun.id, + snapshotId: initialExecutionData.snapshot.id, + }); + + //block using the batch + await engine.blockRunWithCreatedBatch({ + runId: parentRun.id, + batchId: batch.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, + }); + + const afterBlockedByBatch = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(afterBlockedByBatch); + expect(afterBlockedByBatch.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + const child1 = await engine.trigger( + { + number: 1, + friendlyId: "run_c1234", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + batch: { id: batch.id, index: 0 }, + }, + prisma + ); + + const parentAfterChild1 = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentAfterChild1); + expect(parentAfterChild1.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + const child2 = await engine.trigger( + { + number: 2, + friendlyId: "run_c12345", + environment: authenticatedEnvironment, + taskIdentifier: childTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t123456", + spanId: "s123456", + masterQueue: "main", + queueName: `task/${childTask}`, + isTest: false, + tags: [], + resumeParentOnCompletion: true, + parentTaskRunId: parentRun.id, + batch: { id: batch.id, index: 1 }, + }, + prisma + ); + + const parentAfterChild2 = await engine.getRunExecutionData({ runId: parentRun.id }); + assertNonNullable(parentAfterChild2); + expect(parentAfterChild2.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //check the waitpoint blocking the parent run + const runWaitpoints = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, }, - tracer: trace.getTracer("test", "0.0.0"), + orderBy: { + createdAt: "asc", + }, + }); + expect(runWaitpoints.length).toBe(3); + const child1Waitpoint = runWaitpoints.find( + (w) => w.waitpoint.completedByTaskRunId === child1.id + ); + expect(child1Waitpoint?.waitpoint.type).toBe("RUN"); + expect(child1Waitpoint?.waitpoint.completedByTaskRunId).toBe(child1.id); + expect(child1Waitpoint?.batchId).toBe(batch.id); + expect(child1Waitpoint?.batchIndex).toBe(0); + const child2Waitpoint = runWaitpoints.find( + (w) => w.waitpoint.completedByTaskRunId === child2.id + ); + expect(child2Waitpoint?.waitpoint.type).toBe("RUN"); + expect(child2Waitpoint?.waitpoint.completedByTaskRunId).toBe(child2.id); + expect(child2Waitpoint?.batchId).toBe(batch.id); + expect(child2Waitpoint?.batchIndex).toBe(1); + const batchWaitpoint = runWaitpoints.find((w) => w.waitpoint.type === "BATCH"); + expect(batchWaitpoint?.waitpoint.type).toBe("BATCH"); + expect(batchWaitpoint?.waitpoint.completedByBatchId).toBe(batch.id); + + await engine.unblockRunForCreatedBatch({ + runId: parentRun.id, + batchId: batch.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, }); - try { - const parentTask = "parent-task"; - const childTask = "child-task"; + //dequeue and start the 1st child + const dequeuedChild = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: child1.masterQueue, + maxRunCount: 1, + }); - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask, childTask]); + expect(dequeuedChild.length).toBe(1); - //create a batch - const batch = await prisma.batchTaskRun.create({ - data: { - friendlyId: generateFriendlyId("batch"), - runtimeEnvironmentId: authenticatedEnvironment.id, - }, - }); - - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier: parentTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${parentTask}`, - isTest: false, - tags: [], - }, - prisma - ); + const childAttempt1 = await engine.startRunAttempt({ + runId: dequeuedChild[0].run.id, + snapshotId: dequeuedChild[0].snapshot.id, + }); - //dequeue parent - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: parentRun.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const initialExecutionData = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(initialExecutionData); - const attemptResult = await engine.startRunAttempt({ - runId: parentRun.id, - snapshotId: initialExecutionData.snapshot.id, - }); - - //block using the batch - await engine.blockRunWithCreatedBatch({ - runId: parentRun.id, - batchId: batch.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - organizationId: authenticatedEnvironment.organizationId, - }); - - const afterBlockedByBatch = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(afterBlockedByBatch); - expect(afterBlockedByBatch.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - const child1 = await engine.trigger( - { - number: 1, - friendlyId: "run_c1234", - environment: authenticatedEnvironment, - taskIdentifier: childTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${childTask}`, - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, - batch: { id: batch.id, index: 0 }, - }, - prisma - ); + // complete the 1st child + await engine.completeRunAttempt({ + runId: childAttempt1.run.id, + snapshotId: childAttempt1.snapshot.id, + completion: { + id: child1.id, + ok: true, + output: '{"foo":"bar"}', + outputType: "application/json", + }, + }); - const parentAfterChild1 = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentAfterChild1); - expect(parentAfterChild1.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - const child2 = await engine.trigger( - { - number: 2, - friendlyId: "run_c12345", - environment: authenticatedEnvironment, - taskIdentifier: childTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t123456", - spanId: "s123456", - masterQueue: "main", - queueName: `task/${childTask}`, - isTest: false, - tags: [], - resumeParentOnCompletion: true, - parentTaskRunId: parentRun.id, - batch: { id: batch.id, index: 1 }, - }, - prisma - ); + //child snapshot + const childExecutionDataAfter = await engine.getRunExecutionData({ + runId: childAttempt1.run.id, + }); + assertNonNullable(childExecutionDataAfter); + expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); - const parentAfterChild2 = await engine.getRunExecutionData({ runId: parentRun.id }); - assertNonNullable(parentAfterChild2); - expect(parentAfterChild2.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + const child1WaitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: child1Waitpoint?.waitpointId, + }, + }); + expect(child1WaitpointAfter?.completedAt).not.toBeNull(); + expect(child1WaitpointAfter?.status).toBe("COMPLETED"); + expect(child1WaitpointAfter?.output).toBe('{"foo":"bar"}'); - //check the waitpoint blocking the parent run - const runWaitpoints = await prisma.taskRunWaitpoint.findMany({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - orderBy: { - createdAt: "asc", - }, - }); - expect(runWaitpoints.length).toBe(3); - const child1Waitpoint = runWaitpoints.find( - (w) => w.waitpoint.completedByTaskRunId === child1.id - ); - expect(child1Waitpoint?.waitpoint.type).toBe("RUN"); - expect(child1Waitpoint?.waitpoint.completedByTaskRunId).toBe(child1.id); - expect(child1Waitpoint?.batchId).toBe(batch.id); - expect(child1Waitpoint?.batchIndex).toBe(0); - const child2Waitpoint = runWaitpoints.find( - (w) => w.waitpoint.completedByTaskRunId === child2.id - ); - expect(child2Waitpoint?.waitpoint.type).toBe("RUN"); - expect(child2Waitpoint?.waitpoint.completedByTaskRunId).toBe(child2.id); - expect(child2Waitpoint?.batchId).toBe(batch.id); - expect(child2Waitpoint?.batchIndex).toBe(1); - const batchWaitpoint = runWaitpoints.find((w) => w.waitpoint.type === "BATCH"); - expect(batchWaitpoint?.waitpoint.type).toBe("BATCH"); - expect(batchWaitpoint?.waitpoint.completedByBatchId).toBe(batch.id); - - await engine.unblockRunForCreatedBatch({ - runId: parentRun.id, - batchId: batch.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - }); - - //dequeue and start the 1st child - const dequeuedChild = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: child1.masterQueue, - maxRunCount: 1, - }); - - expect(dequeuedChild.length).toBe(1); - - const childAttempt1 = await engine.startRunAttempt({ - runId: dequeuedChild[0].run.id, - snapshotId: dequeuedChild[0].snapshot.id, - }); - - // complete the 1st child - await engine.completeRunAttempt({ - runId: childAttempt1.run.id, - snapshotId: childAttempt1.snapshot.id, - completion: { - id: child1.id, - ok: true, - output: '{"foo":"bar"}', - outputType: "application/json", - }, - }); - - //child snapshot - const childExecutionDataAfter = await engine.getRunExecutionData({ - runId: childAttempt1.run.id, - }); - assertNonNullable(childExecutionDataAfter); - expect(childExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); - - const child1WaitpointAfter = await prisma.waitpoint.findFirst({ - where: { - id: child1Waitpoint?.waitpointId, - }, - }); - expect(child1WaitpointAfter?.completedAt).not.toBeNull(); - expect(child1WaitpointAfter?.status).toBe("COMPLETED"); - expect(child1WaitpointAfter?.output).toBe('{"foo":"bar"}'); + await setTimeout(500); - await setTimeout(500); + const runWaitpointsAfterFirstChild = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointsAfterFirstChild.length).toBe(3); - const runWaitpointsAfterFirstChild = await prisma.taskRunWaitpoint.findMany({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointsAfterFirstChild.length).toBe(3); - - //parent snapshot - const parentExecutionDataAfterFirstChildComplete = await engine.getRunExecutionData({ - runId: parentRun.id, - }); - assertNonNullable(parentExecutionDataAfterFirstChildComplete); - expect(parentExecutionDataAfterFirstChildComplete.snapshot.executionStatus).toBe( - "EXECUTING_WITH_WAITPOINTS" - ); - expect(parentExecutionDataAfterFirstChildComplete.batch?.id).toBe(batch.id); - expect(parentExecutionDataAfterFirstChildComplete.completedWaitpoints.length).toBe(0); - - expect(await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment)).toBe(1); - - //dequeue and start the 2nd child - const dequeuedChild2 = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: child2.masterQueue, - maxRunCount: 1, - }); - - expect(dequeuedChild2.length).toBe(1); - - const childAttempt2 = await engine.startRunAttempt({ - runId: child2.id, - snapshotId: dequeuedChild2[0].snapshot.id, - }); - await engine.completeRunAttempt({ - runId: child2.id, - snapshotId: childAttempt2.snapshot.id, - completion: { - id: child2.id, - ok: true, - output: '{"baz":"qux"}', - outputType: "application/json", - }, - }); + //parent snapshot + const parentExecutionDataAfterFirstChildComplete = await engine.getRunExecutionData({ + runId: parentRun.id, + }); + assertNonNullable(parentExecutionDataAfterFirstChildComplete); + expect(parentExecutionDataAfterFirstChildComplete.snapshot.executionStatus).toBe( + "EXECUTING_WITH_WAITPOINTS" + ); + expect(parentExecutionDataAfterFirstChildComplete.batch?.id).toBe(batch.id); + expect(parentExecutionDataAfterFirstChildComplete.completedWaitpoints.length).toBe(0); + + expect(await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment)).toBe(1); + + //dequeue and start the 2nd child + const dequeuedChild2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: child2.masterQueue, + maxRunCount: 1, + }); - //child snapshot - const child2ExecutionDataAfter = await engine.getRunExecutionData({ runId: child1.id }); - assertNonNullable(child2ExecutionDataAfter); - expect(child2ExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); + expect(dequeuedChild2.length).toBe(1); - const child2WaitpointAfter = await prisma.waitpoint.findFirst({ - where: { - id: child2Waitpoint?.waitpointId, - }, - }); - expect(child2WaitpointAfter?.completedAt).not.toBeNull(); - expect(child2WaitpointAfter?.status).toBe("COMPLETED"); - expect(child2WaitpointAfter?.output).toBe('{"baz":"qux"}'); + const childAttempt2 = await engine.startRunAttempt({ + runId: child2.id, + snapshotId: dequeuedChild2[0].snapshot.id, + }); + await engine.completeRunAttempt({ + runId: child2.id, + snapshotId: childAttempt2.snapshot.id, + completion: { + id: child2.id, + ok: true, + output: '{"baz":"qux"}', + outputType: "application/json", + }, + }); - await setTimeout(500); + //child snapshot + const child2ExecutionDataAfter = await engine.getRunExecutionData({ runId: child1.id }); + assertNonNullable(child2ExecutionDataAfter); + expect(child2ExecutionDataAfter.snapshot.executionStatus).toBe("FINISHED"); - const runWaitpointsAfterSecondChild = await prisma.taskRunWaitpoint.findMany({ - where: { - taskRunId: parentRun.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointsAfterSecondChild.length).toBe(0); - - //parent snapshot - const parentExecutionDataAfterSecondChildComplete = await engine.getRunExecutionData({ - runId: parentRun.id, - }); - assertNonNullable(parentExecutionDataAfterSecondChildComplete); - expect(parentExecutionDataAfterSecondChildComplete.snapshot.executionStatus).toBe( - "EXECUTING" + const child2WaitpointAfter = await prisma.waitpoint.findFirst({ + where: { + id: child2Waitpoint?.waitpointId, + }, + }); + expect(child2WaitpointAfter?.completedAt).not.toBeNull(); + expect(child2WaitpointAfter?.status).toBe("COMPLETED"); + expect(child2WaitpointAfter?.output).toBe('{"baz":"qux"}'); + + await setTimeout(500); + + const runWaitpointsAfterSecondChild = await prisma.taskRunWaitpoint.findMany({ + where: { + taskRunId: parentRun.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointsAfterSecondChild.length).toBe(0); + + //parent snapshot + const parentExecutionDataAfterSecondChildComplete = await engine.getRunExecutionData({ + runId: parentRun.id, + }); + assertNonNullable(parentExecutionDataAfterSecondChildComplete); + expect(parentExecutionDataAfterSecondChildComplete.snapshot.executionStatus).toBe( + "EXECUTING" + ); + expect(parentExecutionDataAfterSecondChildComplete.batch?.id).toBe(batch.id); + expect(parentExecutionDataAfterSecondChildComplete.completedWaitpoints.length).toBe(3); + + const completedWaitpoint0 = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find((w) => w.index === 0); + assertNonNullable(completedWaitpoint0); + expect(completedWaitpoint0.id).toBe(child1Waitpoint!.waitpointId); + expect(completedWaitpoint0.completedByTaskRun?.id).toBe(child1.id); + expect(completedWaitpoint0.completedByTaskRun?.batch?.id).toBe(batch.id); + expect(completedWaitpoint0.output).toBe('{"foo":"bar"}'); + expect(completedWaitpoint0.index).toBe(0); + + const completedWaitpoint1 = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find((w) => w.index === 1); + assertNonNullable(completedWaitpoint1); + expect(completedWaitpoint1.id).toBe(child2Waitpoint!.waitpointId); + expect(completedWaitpoint1.completedByTaskRun?.id).toBe(child2.id); + expect(completedWaitpoint1.completedByTaskRun?.batch?.id).toBe(batch.id); + expect(completedWaitpoint1.index).toBe(1); + expect(completedWaitpoint1.output).toBe('{"baz":"qux"}'); + + const batchWaitpointAfter = + parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( + (w) => w.type === "BATCH" ); - expect(parentExecutionDataAfterSecondChildComplete.batch?.id).toBe(batch.id); - expect(parentExecutionDataAfterSecondChildComplete.completedWaitpoints.length).toBe(3); - - const completedWaitpoint0 = - parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( - (w) => w.index === 0 - ); - assertNonNullable(completedWaitpoint0); - expect(completedWaitpoint0.id).toBe(child1Waitpoint!.waitpointId); - expect(completedWaitpoint0.completedByTaskRun?.id).toBe(child1.id); - expect(completedWaitpoint0.completedByTaskRun?.batch?.id).toBe(batch.id); - expect(completedWaitpoint0.output).toBe('{"foo":"bar"}'); - expect(completedWaitpoint0.index).toBe(0); - - const completedWaitpoint1 = - parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( - (w) => w.index === 1 - ); - assertNonNullable(completedWaitpoint1); - expect(completedWaitpoint1.id).toBe(child2Waitpoint!.waitpointId); - expect(completedWaitpoint1.completedByTaskRun?.id).toBe(child2.id); - expect(completedWaitpoint1.completedByTaskRun?.batch?.id).toBe(batch.id); - expect(completedWaitpoint1.index).toBe(1); - expect(completedWaitpoint1.output).toBe('{"baz":"qux"}'); - - const batchWaitpointAfter = - parentExecutionDataAfterSecondChildComplete.completedWaitpoints.find( - (w) => w.type === "BATCH" - ); - expect(batchWaitpointAfter?.id).toBe(batchWaitpoint?.waitpointId); - expect(batchWaitpointAfter?.completedByBatch?.id).toBe(batch.id); - expect(batchWaitpointAfter?.index).toBeUndefined(); - - const batchAfter = await prisma.batchTaskRun.findUnique({ - where: { - id: batch.id, - }, - }); - expect(batchAfter?.status === "COMPLETED"); - } finally { - engine.quit(); - } + expect(batchWaitpointAfter?.id).toBe(batchWaitpoint?.waitpointId); + expect(batchWaitpointAfter?.completedByBatch?.id).toBe(batch.id); + expect(batchWaitpointAfter?.index).toBeUndefined(); + + const batchAfter = await prisma.batchTaskRun.findUnique({ + where: { + id: batch.id, + }, + }); + expect(batchAfter?.status === "COMPLETED"); + } finally { + engine.quit(); } - ); + }); }); diff --git a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts index 297883c777..c5288cfdc2 100644 --- a/internal-packages/run-engine/src/engine/tests/cancelling.test.ts +++ b/internal-packages/run-engine/src/engine/tests/cancelling.test.ts @@ -10,10 +10,11 @@ import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "../eventBus.js"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine cancelling", () => { containerTest( "Cancelling a run with children (that is executing)", - { timeout: 15_000 }, async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -228,109 +229,105 @@ describe("RunEngine cancelling", () => { } ); - containerTest( - "Cancelling a run (not executing)", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + containerTest("Cancelling a run (not executing)", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const parentTask = "parent-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask]); + + //trigger the run + const parentRun = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier: parentTask, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: `task/${parentTask}`, + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: parentRun.masterQueue, + maxRunCount: 10, }); - try { - const parentTask = "parent-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, [parentTask]); - - //trigger the run - const parentRun = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier: parentTask, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: `task/${parentTask}`, - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: parentRun.masterQueue, - maxRunCount: 10, - }); - - let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; - engine.eventBus.on("runCancelled", (result) => { - cancelledEventData.push(result); - }); - - //cancel the parent run - const result = await engine.cancelRun({ - runId: parentRun.id, - completedAt: new Date(), - reason: "Cancelled by the user", - }); - expect(result.snapshot.executionStatus).toBe("FINISHED"); - - const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); - expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData?.run.status).toBe("CANCELED"); - - //check emitted event - expect(cancelledEventData.length).toBe(1); - const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); - assertNonNullable(parentEvent); - expect(parentEvent.run.spanId).toBe(parentRun.spanId); + let cancelledEventData: EventBusEventArgs<"runCancelled">[0][] = []; + engine.eventBus.on("runCancelled", (result) => { + cancelledEventData.push(result); + }); - //concurrency should have been released - const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( - authenticatedEnvironment - ); - expect(envConcurrencyCompleted).toBe(0); - } finally { - engine.quit(); - } + //cancel the parent run + const result = await engine.cancelRun({ + runId: parentRun.id, + completedAt: new Date(), + reason: "Cancelled by the user", + }); + expect(result.snapshot.executionStatus).toBe("FINISHED"); + + const executionData = await engine.getRunExecutionData({ runId: parentRun.id }); + expect(executionData?.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData?.run.status).toBe("CANCELED"); + + //check emitted event + expect(cancelledEventData.length).toBe(1); + const parentEvent = cancelledEventData.find((r) => r.run.id === parentRun.id); + assertNonNullable(parentEvent); + expect(parentEvent.run.spanId).toBe(parentRun.spanId); + + //concurrency should have been released + const envConcurrencyCompleted = await engine.runQueue.currentConcurrencyOfEnvironment( + authenticatedEnvironment + ); + expect(envConcurrencyCompleted).toBe(0); + } finally { + engine.quit(); } - ); + }); //todo bulk cancelling runs }); diff --git a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts index 7b73cd13c6..a5a1a8b3b4 100644 --- a/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/checkpoints.test.ts @@ -11,6 +11,8 @@ import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "../eventBus.js"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine checkpoints", () => { //todo checkpoint tests test("empty test", async () => {}); diff --git a/internal-packages/run-engine/src/engine/tests/delays.test.ts b/internal-packages/run-engine/src/engine/tests/delays.test.ts index e62fef8f2d..0d87d27e47 100644 --- a/internal-packages/run-engine/src/engine/tests/delays.test.ts +++ b/internal-packages/run-engine/src/engine/tests/delays.test.ts @@ -9,8 +9,10 @@ import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine delays", () => { - containerTest("Run start delayed", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Run start delayed", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -92,101 +94,97 @@ describe("RunEngine delays", () => { } }); - containerTest( - "Rescheduling a delayed run", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Rescheduling a delayed run", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - delayUntil: new Date(Date.now() + 200), - }, - prisma - ); - - //should be created but not queued yet - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); - - const rescheduleTo = new Date(Date.now() + 1_500); - const updatedRun = await engine.rescheduleRun({ runId: run.id, delayUntil: rescheduleTo }); - expect(updatedRun.delayUntil?.toISOString()).toBe(rescheduleTo.toISOString()); - - //wait so the initial delay passes - await setTimeout(1_000); - - //should still be created - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); - - //wait so the updated delay passes - await setTimeout(1_750); - - //should now be queued - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); - } finally { - engine.quit(); - } + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + delayUntil: new Date(Date.now() + 200), + }, + prisma + ); + + //should be created but not queued yet + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("RUN_CREATED"); + + const rescheduleTo = new Date(Date.now() + 1_500); + const updatedRun = await engine.rescheduleRun({ runId: run.id, delayUntil: rescheduleTo }); + expect(updatedRun.delayUntil?.toISOString()).toBe(rescheduleTo.toISOString()); + + //wait so the initial delay passes + await setTimeout(1_000); + + //should still be created + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("RUN_CREATED"); + + //wait so the updated delay passes + await setTimeout(1_750); + + //should now be queued + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); + } finally { + engine.quit(); } - ); + }); }); diff --git a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts index 9ed3d1a2f9..2b1b0da307 100644 --- a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts +++ b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts @@ -11,8 +11,10 @@ import { setTimeout } from "node:timers/promises"; import { MinimalAuthenticatedEnvironment } from "../../shared/index.js"; import { PrismaClientOrTransaction } from "@trigger.dev/database"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine dequeuing", () => { - containerTest("Dequeues 5 runs", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Dequeues 5 runs", async ({ prisma, redisOptions }) => { const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); const engine = new RunEngine({ @@ -77,98 +79,94 @@ describe("RunEngine dequeuing", () => { } }); - containerTest( - "Dequeues runs within machine constraints", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Dequeues runs within machine constraints", async ({ prisma, redisOptions }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0005, }, - tracer: trace.getTracer("test", "0.0.0"), + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier, { + preset: "small-1x", + }); + + //trigger the runs + const runs = await triggerRuns({ + engine, + environment: authenticatedEnvironment, + taskIdentifier, + prisma, + count: 20, + }); + expect(runs.length).toBe(20); + + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(20); + + //dequeue + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 5, + maxResources: { + cpu: 1.1, + memory: 3.8, + }, }); + expect(dequeued.length).toBe(2); - try { - const taskIdentifier = "test-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier, { - preset: "small-1x", - }); - - //trigger the runs - const runs = await triggerRuns({ - engine, - environment: authenticatedEnvironment, - taskIdentifier, - prisma, - count: 20, - }); - expect(runs.length).toBe(20); - - //check the queue length - const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); - expect(queueLength).toBe(20); - - //dequeue - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: "main", - maxRunCount: 5, - maxResources: { - cpu: 1.1, - memory: 3.8, - }, - }); - expect(dequeued.length).toBe(2); - - //check the queue length - const queueLength2 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); - expect(queueLength2).toBe(18); - - const dequeued2 = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: "main", - maxRunCount: 10, - maxResources: { - cpu: 4.7, - memory: 3.0, - }, - }); - expect(dequeued2.length).toBe(6); - - //check the queue length - const queueLength3 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); - expect(queueLength3).toBe(12); - } finally { - engine.quit(); - } + //check the queue length + const queueLength2 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength2).toBe(18); + + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 10, + maxResources: { + cpu: 4.7, + memory: 3.0, + }, + }); + expect(dequeued2.length).toBe(6); + + //check the queue length + const queueLength3 = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength3).toBe(12); + } finally { + engine.quit(); } - ); + }); }); async function triggerRuns({ diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index c4112cf35c..9d07e36729 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -9,251 +9,244 @@ import { expect, describe } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine heartbeats", () => { - containerTest( - "Attempt timeout then successfully attempted", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Attempt timeout then successfully attempted", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const pendingExecutingTimeout = 100; + const pendingExecutingTimeout = 100; - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - retryOptions: { - maxTimeoutInMs: 50, - }, - }, - runLock: { - redis: redisOptions, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + retryOptions: { + maxTimeoutInMs: 50, }, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, - }, - heartbeatTimeoutsMs: { - PENDING_EXECUTING: pendingExecutingTimeout, }, - tracer: trace.getTracer("test", "0.0.0"), - }); + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_EXECUTING: pendingExecutingTimeout, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); - //expect it to be pending with 0 consecutiveFailures - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //expect it to be pending with 0 consecutiveFailures + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - await setTimeout(pendingExecutingTimeout * 2); + await setTimeout(pendingExecutingTimeout * 2); - //expect it to be pending with 3 consecutiveFailures - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - await setTimeout(1_000); + await setTimeout(1_000); - //have to dequeue again - const dequeued2 = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - expect(dequeued2.length).toBe(1); + //have to dequeue again + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued2.length).toBe(1); - // create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued2[0].run.id, - snapshotId: dequeued2[0].snapshot.id, - }); - expect(attemptResult.run.id).toBe(run.id); - expect(attemptResult.run.status).toBe("EXECUTING"); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - } finally { - await engine.quit(); - } + // create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued2[0].run.id, + snapshotId: dequeued2[0].snapshot.id, + }); + expect(attemptResult.run.id).toBe(run.id); + expect(attemptResult.run.status).toBe("EXECUTING"); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + } finally { + await engine.quit(); } - ); + }); - containerTest( - "All start attempts timeout", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("All start attempts timeout", async ({ prisma, redisOptions }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const pendingExecutingTimeout = 100; + const pendingExecutingTimeout = 100; - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - retryOptions: { - //intentionally set the attempts to 2 and quick - maxAttempts: 2, - minTimeoutInMs: 50, - maxTimeoutInMs: 50, - }, - }, - runLock: { - redis: redisOptions, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + retryOptions: { + //intentionally set the attempts to 2 and quick + maxAttempts: 2, + minTimeoutInMs: 50, + maxTimeoutInMs: 50, }, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, - }, - heartbeatTimeoutsMs: { - PENDING_EXECUTING: pendingExecutingTimeout, }, + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + PENDING_EXECUTING: pendingExecutingTimeout, + }, - tracer: trace.getTracer("test", "0.0.0"), - }); + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); - //expect it to be pending - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //expect it to be pending + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - await setTimeout(500); + await setTimeout(500); - //expect it to be pending with 3 consecutiveFailures - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); + //expect it to be pending with 3 consecutiveFailures + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("QUEUED"); - //have to dequeue again - const dequeued2 = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - expect(dequeued2.length).toBe(1); + //have to dequeue again + const dequeued2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + expect(dequeued2.length).toBe(1); - //expect it to be pending - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + //expect it to be pending + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - await setTimeout(pendingExecutingTimeout * 3); + await setTimeout(pendingExecutingTimeout * 3); - //expect it to be pending with 3 consecutiveFailures - const executionData4 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData4); - expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); - expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); - } finally { - await engine.quit(); - } + //expect it to be pending with 3 consecutiveFailures + const executionData4 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData4); + expect(executionData4.snapshot.executionStatus).toBe("FINISHED"); + expect(executionData4.run.status).toBe("SYSTEM_FAILURE"); + } finally { + await engine.quit(); } - ); + }); containerTest( "Execution timeout (worker doesn't heartbeat)", - { timeout: 15_000 }, async ({ prisma, redisOptions }) => { const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -389,7 +382,7 @@ describe("RunEngine heartbeats", () => { } ); - containerTest("Pending cancel", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Pending cancel", async ({ prisma, redisOptions }) => { const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); const heartbeatTimeout = 100; @@ -491,122 +484,118 @@ describe("RunEngine heartbeats", () => { } }); - containerTest( - "Heartbeat keeps run alive", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Heartbeat keeps run alive", async ({ prisma, redisOptions }) => { + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const executingTimeout = 100; + const executingTimeout = 100; - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - heartbeatTimeoutsMs: { - EXECUTING: executingTimeout, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); + baseCostInCents: 0.0001, + }, + heartbeatTimeoutsMs: { + EXECUTING: executingTimeout, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); - //create an attempt - const attempt = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); + //create an attempt + const attempt = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); - //should be executing - const executionData = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData); - expect(executionData.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData.run.status).toBe("EXECUTING"); + //should be executing + const executionData = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData); + expect(executionData.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData.run.status).toBe("EXECUTING"); + + // Send heartbeats every 50ms (half the timeout) + for (let i = 0; i < 6; i++) { + await setTimeout(50); + await engine.heartbeatRun({ + runId: run.id, + snapshotId: attempt.snapshot.id, + }); + } - // Send heartbeats every 50ms (half the timeout) - for (let i = 0; i < 6; i++) { - await setTimeout(50); - await engine.heartbeatRun({ - runId: run.id, - snapshotId: attempt.snapshot.id, - }); - } - - // After 300ms (3x the timeout) the run should still be executing - // because we've been sending heartbeats - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData2.run.status).toBe("EXECUTING"); + // After 300ms (3x the timeout) the run should still be executing + // because we've been sending heartbeats + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2.run.status).toBe("EXECUTING"); - // Stop sending heartbeats and wait for timeout - await setTimeout(executingTimeout * 2); + // Stop sending heartbeats and wait for timeout + await setTimeout(executingTimeout * 3); - // Now it should have timed out and be queued - const executionData3 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData3); - expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); - } finally { - await engine.quit(); - } + // Now it should have timed out and be queued + const executionData3 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData3); + expect(executionData3.snapshot.executionStatus).toBe("QUEUED"); + } finally { + await engine.quit(); } - ); + }); }); diff --git a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts index 80b95d3e13..82211bd0bf 100644 --- a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts +++ b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts @@ -9,8 +9,10 @@ import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine not deployed", () => { - containerTest("Not yet deployed", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Not yet deployed", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts index ecc36ff8da..d2a9a5c809 100644 --- a/internal-packages/run-engine/src/engine/tests/priority.test.ts +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -11,97 +11,95 @@ import { PrismaClientOrTransaction } from "@trigger.dev/database"; import { MinimalAuthenticatedEnvironment } from "../../shared/index.js"; import { setTimeout } from "timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine priority", () => { - containerTest( - "Two runs execute in the correct order", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Two runs execute in the correct order", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0005, }, - tracer: trace.getTracer("test", "0.0.0"), - }); + baseCostInCents: 0.0005, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - const backgroundWorker = await setupBackgroundWorker( - prisma, - authenticatedEnvironment, - taskIdentifier - ); + //create background worker + const backgroundWorker = await setupBackgroundWorker( + prisma, + authenticatedEnvironment, + taskIdentifier + ); - //the order should be 4,3,1,0,2 - // 0 1 2 3 4 - const priorities = [undefined, 500, -1200, 1000, 4000]; + //the order should be 4,3,1,0,2 + // 0 1 2 3 4 + const priorities = [undefined, 500, -1200, 1000, 4000]; - //trigger the runs - const runs = await triggerRuns({ - engine, - environment: authenticatedEnvironment, - taskIdentifier, - prisma, - priorities, - }); - expect(runs.length).toBe(priorities.length); + //trigger the runs + const runs = await triggerRuns({ + engine, + environment: authenticatedEnvironment, + taskIdentifier, + prisma, + priorities, + }); + expect(runs.length).toBe(priorities.length); - //check the queue length - const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); - expect(queueLength).toBe(priorities.length); + //check the queue length + const queueLength = await engine.runQueue.lengthOfEnvQueue(authenticatedEnvironment); + expect(queueLength).toBe(priorities.length); - //dequeue (expect 4 items because of the negative priority) - const dequeue = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: "main", - maxRunCount: 20, - }); - expect(dequeue.length).toBe(4); - expect(dequeue[0].run.friendlyId).toBe(runs[4].friendlyId); - expect(dequeue[1].run.friendlyId).toBe(runs[3].friendlyId); - expect(dequeue[2].run.friendlyId).toBe(runs[1].friendlyId); - expect(dequeue[3].run.friendlyId).toBe(runs[0].friendlyId); + //dequeue (expect 4 items because of the negative priority) + const dequeue = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 20, + }); + expect(dequeue.length).toBe(4); + expect(dequeue[0].run.friendlyId).toBe(runs[4].friendlyId); + expect(dequeue[1].run.friendlyId).toBe(runs[3].friendlyId); + expect(dequeue[2].run.friendlyId).toBe(runs[1].friendlyId); + expect(dequeue[3].run.friendlyId).toBe(runs[0].friendlyId); - //wait 2 seconds (because of the negative priority) - await setTimeout(2_000); - const dequeue2 = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: "main", - maxRunCount: 20, - }); - expect(dequeue2.length).toBe(1); - expect(dequeue2[0].run.friendlyId).toBe(runs[2].friendlyId); - } finally { - engine.quit(); - } + //wait 2 seconds (because of the negative priority) + await setTimeout(2_000); + const dequeue2 = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: "main", + maxRunCount: 20, + }); + expect(dequeue2.length).toBe(1); + expect(dequeue2[0].run.friendlyId).toBe(runs[2].friendlyId); + } finally { + engine.quit(); } - ); + }); }); async function triggerRuns({ diff --git a/internal-packages/run-engine/src/engine/tests/trigger.test.ts b/internal-packages/run-engine/src/engine/tests/trigger.test.ts index da4dbbc69b..ba795c3348 100644 --- a/internal-packages/run-engine/src/engine/tests/trigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/trigger.test.ts @@ -9,8 +9,10 @@ import { expect } from "vitest"; import { EventBusEventArgs } from "../eventBus.js"; import { RunEngine } from "../index.js"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine trigger()", () => { - containerTest("Single run (success)", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Single run (success)", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -204,7 +206,7 @@ describe("RunEngine trigger()", () => { } }); - containerTest("Single run (failed)", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Single run (failed)", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); diff --git a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts index 02d75ee15a..401d03edcc 100644 --- a/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/triggerAndWait.test.ts @@ -9,8 +9,10 @@ import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine triggerAndWait", () => { - containerTest("triggerAndWait", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("triggerAndWait", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -198,7 +200,6 @@ describe("RunEngine triggerAndWait", () => { /** This happens if you `triggerAndWait` with an idempotencyKey if that run is in progress */ containerTest( "triggerAndWait two runs with shared awaited child", - { timeout: 15_000 }, async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); diff --git a/internal-packages/run-engine/src/engine/tests/ttl.test.ts b/internal-packages/run-engine/src/engine/tests/ttl.test.ts index e48597ae1a..e46b93f449 100644 --- a/internal-packages/run-engine/src/engine/tests/ttl.test.ts +++ b/internal-packages/run-engine/src/engine/tests/ttl.test.ts @@ -10,8 +10,10 @@ import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "../eventBus.js"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine ttl", () => { - containerTest("Run expiring (ttl)", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("Run expiring (ttl)", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); diff --git a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts index 771d070781..db0eb3ec73 100644 --- a/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts +++ b/internal-packages/run-engine/src/engine/tests/waitpoints.test.ts @@ -11,8 +11,10 @@ import { setTimeout } from "timers/promises"; import { EventBusEventArgs } from "../eventBus.js"; import { isWaitpointOutputTimeout } from "@trigger.dev/core/v3"; +vi.setConfig({ testTimeout: 60_000 }); + describe("RunEngine Waitpoints", () => { - containerTest("waitForDuration", { timeout: 15_000 }, async ({ prisma, redisOptions }) => { + containerTest("waitForDuration", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -130,9 +132,149 @@ describe("RunEngine Waitpoints", () => { } }); + containerTest("Waitpoints cleared if attempt fails", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //waitForDuration + const date = new Date(Date.now() + 60_000); + const { waitpoint } = await engine.createDateTimeWaitpoint({ + projectId: authenticatedEnvironment.project.id, + environmentId: authenticatedEnvironment.id, + completedAfter: date, + }); + expect(waitpoint.completedAfter!.toISOString()).toBe(date.toISOString()); + + const result = await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpoints: [waitpoint.id], + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.project.id, + organizationId: authenticatedEnvironment.organization.id, + }); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + //fail the attempt (user error) + const error = { + type: "BUILT_IN_ERROR" as const, + name: "UserError", + message: "This is a user error", + stackTrace: "Error: This is a user error\n at :1:1", + }; + const failResult = await engine.completeRunAttempt({ + runId: executionData!.run.id, + snapshotId: executionData!.snapshot.id, + completion: { + ok: false, + id: executionData!.run.id, + error, + retry: { + timestamp: Date.now(), + delay: 0, + }, + }, + }); + expect(failResult.attemptStatus).toBe("RETRY_IMMEDIATELY"); + expect(failResult.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(failResult.run.attemptNumber).toBe(1); + expect(failResult.run.status).toBe("RETRYING_AFTER_FAILURE"); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + assertNonNullable(executionData2); + expect(executionData2.snapshot.executionStatus).toBe("PENDING_EXECUTING"); + expect(executionData2.run.attemptNumber).toBe(1); + expect(executionData2.run.status).toBe("RETRYING_AFTER_FAILURE"); + expect(executionData2.completedWaitpoints.length).toBe(0); + + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); + } finally { + engine.quit(); + } + }); + containerTest( - "Waitpoints cleared if attempt fails", - { timeout: 15_000 }, + "Create, block, and complete a Manual waitpoint", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -207,57 +349,54 @@ describe("RunEngine Waitpoints", () => { }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - //waitForDuration - const date = new Date(Date.now() + 60_000); - const { waitpoint } = await engine.createDateTimeWaitpoint({ - projectId: authenticatedEnvironment.project.id, + //create a manual waitpoint + const result = await engine.createManualWaitpoint({ environmentId: authenticatedEnvironment.id, - completedAfter: date, + projectId: authenticatedEnvironment.projectId, }); - expect(waitpoint.completedAfter!.toISOString()).toBe(date.toISOString()); + expect(result.waitpoint.status).toBe("PENDING"); - const result = await engine.blockRunWithWaitpoint({ + //block the run + await engine.blockRunWithWaitpoint({ runId: run.id, - waitpoints: [waitpoint.id], + waitpoints: result.waitpoint.id, environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.project.id, - organizationId: authenticatedEnvironment.organization.id, + projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, }); const executionData = await engine.getRunExecutionData({ runId: run.id }); expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - //fail the attempt (user error) - const error = { - type: "BUILT_IN_ERROR" as const, - name: "UserError", - message: "This is a user error", - stackTrace: "Error: This is a user error\n at :1:1", - }; - const failResult = await engine.completeRunAttempt({ - runId: executionData!.run.id, - snapshotId: executionData!.snapshot.id, - completion: { - ok: false, - id: executionData!.run.id, - error, - retry: { - timestamp: Date.now(), - delay: 0, - }, + //check there is a waitpoint blocking the parent run + const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); + + let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; + engine.eventBus.on("workerNotification", (result) => { + event = result; + }); + + //complete the waitpoint + await engine.completeWaitpoint({ + id: result.waitpoint.id, }); - expect(failResult.attemptStatus).toBe("RETRY_IMMEDIATELY"); - expect(failResult.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - expect(failResult.run.attemptNumber).toBe(1); - expect(failResult.run.status).toBe("RETRYING_AFTER_FAILURE"); + + await setTimeout(200); + + assertNonNullable(event); + const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; + expect(notificationEvent.run.id).toBe(run.id); const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - assertNonNullable(executionData2); - expect(executionData2.snapshot.executionStatus).toBe("PENDING_EXECUTING"); - expect(executionData2.run.attemptNumber).toBe(1); - expect(executionData2.run.status).toBe("RETRYING_AFTER_FAILURE"); - expect(executionData2.completedWaitpoints.length).toBe(0); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); //check there are no waitpoints blocking the parent run const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ @@ -275,9 +414,124 @@ describe("RunEngine Waitpoints", () => { } ); + containerTest("Manual waitpoint failAfter", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", + machines: { + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, + }, + }, + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); + + try { + const taskIdentifier = "test-task"; + + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); + + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); + + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + + //create a manual waitpoint + const result = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + //fail after 200ms + timeout: new Date(Date.now() + 200), + }); + + //block the run + await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpoints: result.waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, + }); + + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + + await setTimeout(750); + + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + expect(executionData2?.completedWaitpoints.length).toBe(1); + expect(executionData2?.completedWaitpoints[0].outputIsError).toBe(true); + + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); + } finally { + engine.quit(); + } + }); + containerTest( - "Create, block, and complete a Manual waitpoint", - { timeout: 15_000 }, + "Race condition with multiple waitpoints completing simultaneously", async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -352,272 +606,10 @@ describe("RunEngine Waitpoints", () => { }); expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - //create a manual waitpoint - const result = await engine.createManualWaitpoint({ - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - }); - expect(result.waitpoint.status).toBe("PENDING"); + const iterationCount = 10; - //block the run - await engine.blockRunWithWaitpoint({ - runId: run.id, - waitpoints: result.waitpoint.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - organizationId: authenticatedEnvironment.organizationId, - }); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - //check there is a waitpoint blocking the parent run - const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); - - let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; - engine.eventBus.on("workerNotification", (result) => { - event = result; - }); - - //complete the waitpoint - await engine.completeWaitpoint({ - id: result.waitpoint.id, - }); - - await setTimeout(200); - - assertNonNullable(event); - const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; - expect(notificationEvent.run.id).toBe(run.id); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); - - //check there are no waitpoints blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpoint).toBeNull(); - } finally { - engine.quit(); - } - } - ); - - containerTest( - "Manual waitpoint failAfter", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - //create a manual waitpoint - const result = await engine.createManualWaitpoint({ - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - //fail after 200ms - timeout: new Date(Date.now() + 200), - }); - - //block the run - await engine.blockRunWithWaitpoint({ - runId: run.id, - waitpoints: result.waitpoint.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - organizationId: authenticatedEnvironment.organizationId, - }); - - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - - await setTimeout(750); - - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); - expect(executionData2?.completedWaitpoints.length).toBe(1); - expect(executionData2?.completedWaitpoints[0].outputIsError).toBe(true); - - //check there are no waitpoints blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpoint).toBeNull(); - } finally { - engine.quit(); - } - } - ); - - containerTest( - "Race condition with multiple waitpoints completing simultaneously", - { timeout: 60_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, - machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, - }, - baseCostInCents: 0.0001, - }, - tracer: trace.getTracer("test", "0.0.0"), - }); - - try { - const taskIdentifier = "test-task"; - - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); - - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); - - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - - const iterationCount = 10; - - for (let i = 0; i < iterationCount; i++) { - const waitpointCount = 5; + for (let i = 0; i < iterationCount; i++) { + const waitpointCount = 5; //create waitpoints const results = await Promise.all( @@ -690,7 +682,6 @@ describe("RunEngine Waitpoints", () => { containerTest( "Create a Manual waitpoint and let it timeout", - { timeout: 15_000 }, async ({ prisma, redisOptions }) => { //create environment const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); @@ -840,324 +831,316 @@ describe("RunEngine Waitpoints", () => { } ); - containerTest( - "Manual waitpoint with idempotency", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Manual waitpoint with idempotency", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), - }); + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - const idempotencyKey = "a-key"; + const idempotencyKey = "a-key"; - //create a manual waitpoint with timeout - const result = await engine.createManualWaitpoint({ - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - idempotencyKey, - }); - expect(result.waitpoint.status).toBe("PENDING"); - expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); - expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); + //create a manual waitpoint with timeout + const result = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + idempotencyKey, + }); + expect(result.waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); + expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); - //block the run - await engine.blockRunWithWaitpoint({ - runId: run.id, - waitpoints: result.waitpoint.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - organizationId: authenticatedEnvironment.organizationId, - }); + //block the run + await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpoints: result.waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, + }); - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - //check there is a waitpoint blocking the parent run - const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); + //check there is a waitpoint blocking the parent run + const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); - let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; - engine.eventBus.on("workerNotification", (result) => { - event = result; - }); + let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; + engine.eventBus.on("workerNotification", (result) => { + event = result; + }); - //complete the waitpoint - await engine.completeWaitpoint({ - id: result.waitpoint.id, - }); + //complete the waitpoint + await engine.completeWaitpoint({ + id: result.waitpoint.id, + }); - await setTimeout(200); + await setTimeout(200); - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); - assertNonNullable(event); - const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; - expect(notificationEvent.run.id).toBe(run.id); + assertNonNullable(event); + const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; + expect(notificationEvent.run.id).toBe(run.id); - //check there are no waitpoints blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpoint).toBeNull(); + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); - const waitpoint2 = await prisma.waitpoint.findUnique({ - where: { - id: result.waitpoint.id, - }, - }); - assertNonNullable(waitpoint2); - expect(waitpoint2.status).toBe("COMPLETED"); - expect(waitpoint2.outputIsError).toBe(false); - } finally { - engine.quit(); - } + const waitpoint2 = await prisma.waitpoint.findUnique({ + where: { + id: result.waitpoint.id, + }, + }); + assertNonNullable(waitpoint2); + expect(waitpoint2.status).toBe("COMPLETED"); + expect(waitpoint2.outputIsError).toBe(false); + } finally { + engine.quit(); } - ); + }); - containerTest( - "Manual waitpoint with idempotency and ttl", - { timeout: 15_000 }, - async ({ prisma, redisOptions }) => { - //create environment - const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); + containerTest("Manual waitpoint with idempotency and ttl", async ({ prisma, redisOptions }) => { + //create environment + const authenticatedEnvironment = await setupAuthenticatedEnvironment(prisma, "PRODUCTION"); - const engine = new RunEngine({ - prisma, - worker: { - redis: redisOptions, - workers: 1, - tasksPerWorker: 10, - pollIntervalMs: 100, - }, - queue: { - redis: redisOptions, - }, - runLock: { - redis: redisOptions, - }, + const engine = new RunEngine({ + prisma, + worker: { + redis: redisOptions, + workers: 1, + tasksPerWorker: 10, + pollIntervalMs: 100, + }, + queue: { + redis: redisOptions, + }, + runLock: { + redis: redisOptions, + }, + machines: { + defaultMachine: "small-1x", machines: { - defaultMachine: "small-1x", - machines: { - "small-1x": { - name: "small-1x" as const, - cpu: 0.5, - memory: 0.5, - centsPerMs: 0.0001, - }, + "small-1x": { + name: "small-1x" as const, + cpu: 0.5, + memory: 0.5, + centsPerMs: 0.0001, }, - baseCostInCents: 0.0001, }, - tracer: trace.getTracer("test", "0.0.0"), - }); + baseCostInCents: 0.0001, + }, + tracer: trace.getTracer("test", "0.0.0"), + }); - try { - const taskIdentifier = "test-task"; + try { + const taskIdentifier = "test-task"; - //create background worker - await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); + //create background worker + await setupBackgroundWorker(prisma, authenticatedEnvironment, taskIdentifier); - //trigger the run - const run = await engine.trigger( - { - number: 1, - friendlyId: "run_p1234", - environment: authenticatedEnvironment, - taskIdentifier, - payload: "{}", - payloadType: "application/json", - context: {}, - traceContext: {}, - traceId: "t12345", - spanId: "s12345", - masterQueue: "main", - queueName: "task/test-task", - isTest: false, - tags: [], - }, - prisma - ); + //trigger the run + const run = await engine.trigger( + { + number: 1, + friendlyId: "run_p1234", + environment: authenticatedEnvironment, + taskIdentifier, + payload: "{}", + payloadType: "application/json", + context: {}, + traceContext: {}, + traceId: "t12345", + spanId: "s12345", + masterQueue: "main", + queueName: "task/test-task", + isTest: false, + tags: [], + }, + prisma + ); - //dequeue the run - const dequeued = await engine.dequeueFromMasterQueue({ - consumerId: "test_12345", - masterQueue: run.masterQueue, - maxRunCount: 10, - }); + //dequeue the run + const dequeued = await engine.dequeueFromMasterQueue({ + consumerId: "test_12345", + masterQueue: run.masterQueue, + maxRunCount: 10, + }); - //create an attempt - const attemptResult = await engine.startRunAttempt({ - runId: dequeued[0].run.id, - snapshotId: dequeued[0].snapshot.id, - }); - expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); + //create an attempt + const attemptResult = await engine.startRunAttempt({ + runId: dequeued[0].run.id, + snapshotId: dequeued[0].snapshot.id, + }); + expect(attemptResult.snapshot.executionStatus).toBe("EXECUTING"); - const idempotencyKey = "a-key"; + const idempotencyKey = "a-key"; - //create a manual waitpoint with timeout - const result = await engine.createManualWaitpoint({ - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - idempotencyKey, - idempotencyKeyExpiresAt: new Date(Date.now() + 200), - }); - expect(result.waitpoint.status).toBe("PENDING"); - expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); - expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); + //create a manual waitpoint with timeout + const result = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + idempotencyKey, + idempotencyKeyExpiresAt: new Date(Date.now() + 200), + }); + expect(result.waitpoint.status).toBe("PENDING"); + expect(result.waitpoint.idempotencyKey).toBe(idempotencyKey); + expect(result.waitpoint.userProvidedIdempotencyKey).toBe(true); - const sameWaitpointResult = await engine.createManualWaitpoint({ - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - idempotencyKey, - idempotencyKeyExpiresAt: new Date(Date.now() + 200), - }); - expect(sameWaitpointResult.waitpoint.id).toBe(result.waitpoint.id); + const sameWaitpointResult = await engine.createManualWaitpoint({ + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + idempotencyKey, + idempotencyKeyExpiresAt: new Date(Date.now() + 200), + }); + expect(sameWaitpointResult.waitpoint.id).toBe(result.waitpoint.id); - //block the run - await engine.blockRunWithWaitpoint({ - runId: run.id, - waitpoints: result.waitpoint.id, - environmentId: authenticatedEnvironment.id, - projectId: authenticatedEnvironment.projectId, - organizationId: authenticatedEnvironment.organizationId, - }); + //block the run + await engine.blockRunWithWaitpoint({ + runId: run.id, + waitpoints: result.waitpoint.id, + environmentId: authenticatedEnvironment.id, + projectId: authenticatedEnvironment.projectId, + organizationId: authenticatedEnvironment.organizationId, + }); - const executionData = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); + const executionData = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData?.snapshot.executionStatus).toBe("EXECUTING_WITH_WAITPOINTS"); - //check there is a waitpoint blocking the parent run - const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); + //check there is a waitpoint blocking the parent run + const runWaitpointBefore = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpointBefore?.waitpointId).toBe(result.waitpoint.id); - let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; - engine.eventBus.on("workerNotification", (result) => { - event = result; - }); + let event: EventBusEventArgs<"workerNotification">[0] | undefined = undefined; + engine.eventBus.on("workerNotification", (result) => { + event = result; + }); - //complete the waitpoint - await engine.completeWaitpoint({ - id: result.waitpoint.id, - }); + //complete the waitpoint + await engine.completeWaitpoint({ + id: result.waitpoint.id, + }); - await setTimeout(200); + await setTimeout(200); - const executionData2 = await engine.getRunExecutionData({ runId: run.id }); - expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); + const executionData2 = await engine.getRunExecutionData({ runId: run.id }); + expect(executionData2?.snapshot.executionStatus).toBe("EXECUTING"); - assertNonNullable(event); - const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; - expect(notificationEvent.run.id).toBe(run.id); + assertNonNullable(event); + const notificationEvent = event as EventBusEventArgs<"workerNotification">[0]; + expect(notificationEvent.run.id).toBe(run.id); - //check there are no waitpoints blocking the parent run - const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ - where: { - taskRunId: run.id, - }, - include: { - waitpoint: true, - }, - }); - expect(runWaitpoint).toBeNull(); + //check there are no waitpoints blocking the parent run + const runWaitpoint = await prisma.taskRunWaitpoint.findFirst({ + where: { + taskRunId: run.id, + }, + include: { + waitpoint: true, + }, + }); + expect(runWaitpoint).toBeNull(); - const waitpoint2 = await prisma.waitpoint.findUnique({ - where: { - id: result.waitpoint.id, - }, - }); - assertNonNullable(waitpoint2); - expect(waitpoint2.status).toBe("COMPLETED"); - expect(waitpoint2.outputIsError).toBe(false); - } finally { - engine.quit(); - } + const waitpoint2 = await prisma.waitpoint.findUnique({ + where: { + id: result.waitpoint.id, + }, + }); + assertNonNullable(waitpoint2); + expect(waitpoint2.status).toBe("COMPLETED"); + expect(waitpoint2.outputIsError).toBe(false); + } finally { + engine.quit(); } - ); + }); }); diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts index 23ae2b7dea..c4f7a898b7 100644 --- a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts +++ b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts @@ -115,7 +115,7 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { this._rng = seedrandom(options.seed); this._redis = createRedisClient(options.redis); - this._defaultEnvConcurrencyLimit = options.defaultEnvConcurrencyLimit ?? 10; + this._defaultEnvConcurrencyLimit = options.defaultEnvConcurrencyLimit ?? 100; this._parentQueueLimit = options.parentQueueLimit ?? 100; } diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index a014fb46b1..d8ef1bb36e 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -383,7 +383,7 @@ describe("RunQueue", () => { } ); - redisTest.only( + redisTest( "Dequeue multiple messages from the queue", { timeout: 5_000 }, async ({ redisContainer }) => { diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 929c0efec2..85f1c86c9b 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -467,6 +467,7 @@ export class RunQueue { taskConcurrencyKey, "dlq", messageId, + messageQueue, JSON.stringify(message.masterQueues), this.options.redis.keyPrefix ?? "" ); @@ -503,6 +504,7 @@ export class RunQueue { taskConcurrencyKey, //args messageId, + messageQueue, JSON.stringify(message), String(messageScore), JSON.stringify(message.masterQueues), @@ -914,6 +916,7 @@ export class RunQueue { envQueueKey, taskConcurrencyKey, messageId, + messageQueue, JSON.stringify(masterQueues), this.options.redis.keyPrefix ?? "" ); @@ -1073,8 +1076,9 @@ local taskCurrentConcurrencyKey = KEYS[7] -- Args: local messageId = ARGV[1] -local parentQueues = cjson.decode(ARGV[2]) -local keyPrefix = ARGV[3] +local messageQueueName = ARGV[2] +local parentQueues = cjson.decode(ARGV[3]) +local keyPrefix = ARGV[4] -- Remove the message from the message key redis.call('DEL', messageKey) @@ -1088,9 +1092,9 @@ local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') for _, parentQueue in ipairs(parentQueues) do local prefixedParentQueue = keyPrefix .. parentQueue if #earliestMessage == 0 then - redis.call('ZREM', prefixedParentQueue, messageQueue) + redis.call('ZREM', prefixedParentQueue, messageQueueName) else - redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueue) + redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueueName) end end @@ -1116,10 +1120,11 @@ local taskConcurrencyKey = KEYS[7] -- Args: local messageId = ARGV[1] -local messageData = ARGV[2] -local messageScore = tonumber(ARGV[3]) -local parentQueues = cjson.decode(ARGV[4]) -local keyPrefix = ARGV[5] +local messageQueueName = ARGV[2] +local messageData = ARGV[3] +local messageScore = tonumber(ARGV[4]) +local parentQueues = cjson.decode(ARGV[5]) +local keyPrefix = ARGV[6] -- Update the message data redis.call('SET', messageKey, messageData) @@ -1139,9 +1144,9 @@ local earliestMessage = redis.call('ZRANGE', messageQueueKey, 0, 0, 'WITHSCORES' for _, parentQueue in ipairs(parentQueues) do local prefixedParentQueue = keyPrefix .. parentQueue if #earliestMessage == 0 then - redis.call('ZREM', prefixedParentQueue, messageQueueKey) + redis.call('ZREM', prefixedParentQueue, messageQueueName) else - redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueueKey) + redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueueName) end end `, @@ -1162,8 +1167,9 @@ local deadLetterQueueKey = KEYS[8] -- Args: local messageId = ARGV[1] -local parentQueues = cjson.decode(ARGV[2]) -local keyPrefix = ARGV[3] +local messageQueueName = ARGV[2] +local parentQueues = cjson.decode(ARGV[3]) +local keyPrefix = ARGV[4] -- Remove the message from the queue redis.call('ZREM', messageQueue, messageId) @@ -1174,9 +1180,9 @@ local earliestMessage = redis.call('ZRANGE', messageQueue, 0, 0, 'WITHSCORES') for _, parentQueue in ipairs(parentQueues) do local prefixedParentQueue = keyPrefix .. parentQueue if #earliestMessage == 0 then - redis.call('ZREM', prefixedParentQueue, messageQueue) + redis.call('ZREM', prefixedParentQueue, messageQueueName) else - redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueue) + redis.call('ZADD', prefixedParentQueue, earliestMessage[2], messageQueueName) end end @@ -1301,6 +1307,7 @@ declare module "@internal/redis" { envQueueKey: string, taskConcurrencyKey: string, messageId: string, + messageQueueName: string, masterQueues: string, keyPrefix: string, callback?: Callback @@ -1315,6 +1322,7 @@ declare module "@internal/redis" { envQueueKey: string, taskConcurrencyKey: string, messageId: string, + messageQueueName: string, messageData: string, messageScore: string, masterQueues: string, @@ -1332,6 +1340,7 @@ declare module "@internal/redis" { taskConcurrencyKey: string, deadLetterQueueKey: string, messageId: string, + messageQueueName: string, masterQueues: string, keyPrefix: string, callback?: Callback diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts index 274c0f7c6b..0f6b14e17d 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.test.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.test.ts @@ -275,7 +275,7 @@ describe("KeyProducer", () => { "task/task-name" ); const key = keyProducer.envCurrentConcurrencyKeyFromQueue(queueKey); - expect(key).toBe("{org:o1234}:env:e1234:currentConcurrency"); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:currentConcurrency"); }); it("envCurrentConcurrencyKey", () => { @@ -287,7 +287,7 @@ describe("KeyProducer", () => { project: { id: "p1234" }, organization: { id: "o1234" }, }); - expect(key).toBe("{org:o1234}:env:e1234:currentConcurrency"); + expect(key).toBe("{org:o1234}:proj:p1234:env:e1234:currentConcurrency"); }); it("messageKey", () => { From a3b7d4c050301c44eeef41f42f201732c52e8a1e Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 09:47:44 +0000 Subject: [PATCH 06/12] Adding tests for the fair dequeueing strat in the run engine --- .../run-engine/src/run-queue/constants.ts | 4 + .../run-queue/fairDequeuingStrategy.test.ts | 1189 +++++++++++++++++ .../run-engine/src/run-queue/keyProducer.ts | 35 +- .../run-engine/src/run-queue/types.ts | 10 +- .../run-engine/src/shared/index.ts | 4 +- internal-packages/run-engine/vitest.config.ts | 1 + 6 files changed, 1235 insertions(+), 8 deletions(-) create mode 100644 internal-packages/run-engine/src/run-queue/constants.ts create mode 100644 internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts diff --git a/internal-packages/run-engine/src/run-queue/constants.ts b/internal-packages/run-engine/src/run-queue/constants.ts new file mode 100644 index 0000000000..22e1928fb7 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/constants.ts @@ -0,0 +1,4 @@ +export const RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET = 31_556_952 * 1000; // 1 year +export const RUN_QUEUE_RETRY_PRIORITY_TIMESTAMP_OFFSET = 15_778_476 * 1000; // 6 months +export const RUN_QUEUE_DELAYED_REQUEUE_THRESHOLD_IN_MS = 500; +export const RUN_QUEUE_SCHEDULED_REQUEUE_AVAILABLE_AT_THRESHOLD_IN_MS = 500; diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts new file mode 100644 index 0000000000..4f99ff90e8 --- /dev/null +++ b/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts @@ -0,0 +1,1189 @@ +import { redisTest } from "@internal/testcontainers"; +import { describe, expect, vi } from "vitest"; +import { FairDequeuingStrategy } from "./fairDequeuingStrategy.js"; +import { RunQueueFullKeyProducer } from "./keyProducer.js"; +import { createRedisClient, Redis, RedisOptions } from "@internal/redis"; +import { EnvQueues, RunQueueKeyProducer } from "./types.js"; +import { RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET } from "./constants.js"; + +vi.setConfig({ testTimeout: 60_000 }); // 30 seconds timeout + +describe("FairDequeuingStrategy", () => { + redisTest( + "should distribute a single queue from a single env", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "test-seed-1", // for deterministic shuffling + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: Date.now() - 1000, // 1 second ago + queueId: "queue-1", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + const result = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + + expect(result).toHaveLength(1); + expect(result[0]).toEqual({ + envId: "env-1", + queues: [keyProducer.queueKey("org-1", "proj-1", "env-1", "queue-1")], + }); + } + ); + + redisTest("should respect env concurrency limits", async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 2, + parentQueueLimit: 100, + seed: "test-seed-3", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: Date.now() - 1000, + queueId: "queue-1", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + await setupConcurrency({ + redis, + keyProducer, + env: { envId: "env-1", projectId: "proj-1", orgId: "org-1", currentConcurrency: 2, limit: 2 }, + }); + + const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1"); + expect(result).toHaveLength(0); + }); + + redisTest( + "should give extra concurrency when the env has reserve concurrency", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 2, + parentQueueLimit: 100, + seed: "test-seed-3", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: Date.now() - 1000, + queueId: "queue-1", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: "env-1", + projectId: "proj-1", + orgId: "org-1", + currentConcurrency: 2, + limit: 2, + reserveConcurrency: 1, + }, + }); + + const result = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + expect(result).toHaveLength(1); + expect(result[0]).toEqual({ + envId: "env-1", + queues: [keyProducer.queueKey("org-1", "proj-1", "env-1", "queue-1")], + }); + } + ); + + redisTest("should respect parentQueueLimit", async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 2, // Only take 2 queues + seed: "test-seed-6", + }); + + const now = Date.now(); + + // Setup 3 queues but parentQueueLimit is 2 + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 3000, + queueId: "queue-1", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 2000, + queueId: "queue-2", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000, + queueId: "queue-3", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + const result = await strategy.distributeFairQueuesFromParentQueue("parent-queue", "consumer-1"); + + expect(result).toHaveLength(1); + const queue1 = keyProducer.queueKey("org-1", "proj-1", "env-1", "queue-1"); + const queue2 = keyProducer.queueKey("org-1", "proj-1", "env-1", "queue-2"); + expect(result[0]).toEqual({ + envId: "env-1", + queues: [queue1, queue2], + }); + }); + + redisTest( + "should reuse snapshots across calls for the same consumer", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 10, + seed: "test-seed-reuse-1", + reuseSnapshotCount: 1, + }); + + const now = Date.now(); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 3000, + queueId: "queue-1", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 2000, + queueId: "queue-2", + orgId: "org-2", + projectId: "proj-2", + envId: "env-2", + }); + + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000, + queueId: "queue-3", + orgId: "org-3", + projectId: "proj-3", + envId: "env-3", + }); + + const startDistribute1 = performance.now(); + + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + const result = flattenResults(envResult); + + const distribute1Duration = performance.now() - startDistribute1; + + console.log("First distribution took", distribute1Duration, "ms"); + + expect(result).toHaveLength(3); + // Should only get the two oldest queues + const queue1 = keyProducer.queueKey("org-1", "proj-1", "env-1", "queue-1"); + const queue2 = keyProducer.queueKey("org-2", "proj-2", "env-2", "queue-2"); + const queue3 = keyProducer.queueKey("org-3", "proj-3", "env-3", "queue-3"); + expect(result).toEqual([queue2, queue1, queue3]); + + const startDistribute2 = performance.now(); + + const result2 = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + + const distribute2Duration = performance.now() - startDistribute2; + + console.log("Second distribution took", distribute2Duration, "ms"); + + // Make sure the second call is more than 9 times faster than the first + expect(distribute2Duration).toBeLessThan(distribute1Duration / 9); + + const startDistribute3 = performance.now(); + + const result3 = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + + const distribute3Duration = performance.now() - startDistribute3; + + console.log("Third distribution took", distribute3Duration, "ms"); + + // Make sure the third call is more than 4 times the second + expect(distribute3Duration).toBeGreaterThan(distribute2Duration * 4); + } + ); + + redisTest( + "should fairly distribute queues across environments over time", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "test-seed-5", + }); + + const now = Date.now(); + + // Test configuration + const orgs = ["org-1", "org-2", "org-3"]; + const envsPerOrg = 3; // Each org has 3 environments + const queuesPerEnv = 5; // Each env has 5 queues + const iterations = 1000; + + // Setup queues + for (const orgId of orgs) { + for (let envNum = 1; envNum <= envsPerOrg; envNum++) { + const envId = `env-${orgId}-${envNum}`; + const projectId = `proj-${orgId}-${envNum}`; + + for (let queueNum = 1; queueNum <= queuesPerEnv; queueNum++) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + // Vary the ages slightly + score: now - Math.random() * 10000, + queueId: `queue-${orgId}-${envId}-${queueNum}`, + orgId, + projectId, + envId, + }); + } + + // Setup reasonable concurrency limits + await setupConcurrency({ + redis, + keyProducer, + env: { envId, projectId, orgId, currentConcurrency: 1, limit: 5 }, + }); + } + } + + // Track distribution statistics + type PositionStats = { + firstPosition: number; // Count of times this env/org was first + positionSums: number; // Sum of positions (for averaging) + appearances: number; // Total number of appearances + }; + + const envStats: Record = {}; + const orgStats: Record = {}; + + // Initialize stats objects + for (const orgId of orgs) { + orgStats[orgId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; + for (let envNum = 1; envNum <= envsPerOrg; envNum++) { + const envId = `env-${orgId}-${envNum}`; + envStats[envId] = { firstPosition: 0, positionSums: 0, appearances: 0 }; + } + } + + // Run multiple iterations + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + `consumer-${i % 3}` // Simulate 3 different consumers + ); + const result = flattenResults(envResult); + + // Track positions of queues + result.forEach((queueId, position) => { + const orgId = keyProducer.orgIdFromQueue(queueId); + const envId = keyProducer.envIdFromQueue(queueId); + + // Update org stats + orgStats[orgId].appearances++; + orgStats[orgId].positionSums += position; + if (position === 0) orgStats[orgId].firstPosition++; + + // Update env stats + envStats[envId].appearances++; + envStats[envId].positionSums += position; + if (position === 0) envStats[envId].firstPosition++; + }); + } + + // Calculate and log statistics + console.log("\nOrganization Statistics:"); + for (const [orgId, stats] of Object.entries(orgStats)) { + const avgPosition = stats.positionSums / stats.appearances; + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + console.log(`${orgId}: + First Position: ${firstPositionPercentage.toFixed(2)}% + Average Position: ${avgPosition.toFixed(2)} + Total Appearances: ${stats.appearances}`); + } + + console.log("\nEnvironment Statistics:"); + for (const [envId, stats] of Object.entries(envStats)) { + const avgPosition = stats.positionSums / stats.appearances; + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + console.log(`${envId}: + First Position: ${firstPositionPercentage.toFixed(2)}% + Average Position: ${avgPosition.toFixed(2)} + Total Appearances: ${stats.appearances}`); + } + + // Verify fairness of first position distribution + const expectedFirstPositionPercentage = 100 / orgs.length; + const firstPositionStdDevOrgs = calculateStandardDeviation( + Object.values(orgStats).map((stats) => (stats.firstPosition / iterations) * 100) + ); + + const expectedEnvFirstPositionPercentage = 100 / (orgs.length * envsPerOrg); + const firstPositionStdDevEnvs = calculateStandardDeviation( + Object.values(envStats).map((stats) => (stats.firstPosition / iterations) * 100) + ); + + // Assert reasonable fairness for first position + expect(firstPositionStdDevOrgs).toBeLessThan(5); // Allow 5% standard deviation for orgs + expect(firstPositionStdDevEnvs).toBeLessThan(5); // Allow 5% standard deviation for envs + + // Verify that each org and env gets a fair chance at first position + for (const [orgId, stats] of Object.entries(orgStats)) { + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + expect(firstPositionPercentage).toBeGreaterThan(expectedFirstPositionPercentage * 0.7); // Within 30% of expected + expect(firstPositionPercentage).toBeLessThan(expectedFirstPositionPercentage * 1.3); + } + + for (const [envId, stats] of Object.entries(envStats)) { + const firstPositionPercentage = (stats.firstPosition / iterations) * 100; + expect(firstPositionPercentage).toBeGreaterThan(expectedEnvFirstPositionPercentage * 0.7); // Within 30% of expected + expect(firstPositionPercentage).toBeLessThan(expectedEnvFirstPositionPercentage * 1.3); + } + + // Verify average positions are reasonably distributed + const avgPositionsOrgs = Object.values(orgStats).map( + (stats) => stats.positionSums / stats.appearances + ); + const avgPositionsEnvs = Object.values(envStats).map( + (stats) => stats.positionSums / stats.appearances + ); + + const avgPositionStdDevOrgs = calculateStandardDeviation(avgPositionsOrgs); + const avgPositionStdDevEnvs = calculateStandardDeviation(avgPositionsEnvs); + + expect(avgPositionStdDevOrgs).toBeLessThan(1); // Average positions should be fairly consistent + expect(avgPositionStdDevEnvs).toBeLessThan(1); + } + ); + + redisTest( + "should shuffle environments while maintaining age order within environments", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "fixed-seed", + }); + + const now = Date.now(); + + // Setup three environments, each with two queues of different ages + await Promise.all([ + // env-1: one old queue (3000ms old) and one new queue (1000ms old) + setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 3000, + queueId: "queue-1-old", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }), + setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000, + queueId: "queue-1-new", + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }), + + // env-2: same pattern + setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 3000, + queueId: "queue-2-old", + orgId: "org-1", + projectId: "proj-1", + envId: "env-2", + }), + setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000, + queueId: "queue-2-new", + orgId: "org-1", + projectId: "proj-1", + envId: "env-2", + }), + ]); + + // Setup basic concurrency settings + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: "env-1", + projectId: "proj-1", + orgId: "org-1", + currentConcurrency: 0, + limit: 5, + }, + }); + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: "env-2", + projectId: "proj-1", + orgId: "org-1", + currentConcurrency: 0, + limit: 5, + }, + }); + + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + const result = flattenResults(envResult); + + // Group queues by environment + const queuesByEnv = result.reduce( + (acc, queueId) => { + const envId = keyProducer.envIdFromQueue(queueId); + if (!acc[envId]) { + acc[envId] = []; + } + acc[envId].push(queueId); + return acc; + }, + {} as Record + ); + + // Verify that: + // 1. We got all queues + expect(result).toHaveLength(4); + + // 2. Queues are grouped by environment + for (const envQueues of Object.values(queuesByEnv)) { + expect(envQueues).toHaveLength(2); + + // 3. Within each environment, older queue comes before newer queue + const [firstQueue, secondQueue] = envQueues; + expect(firstQueue).toContain("old"); + expect(secondQueue).toContain("new"); + } + } + ); + + redisTest( + "should bias shuffling based on concurrency limits and available capacity", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const now = Date.now(); + + // Setup three environments with different concurrency settings + const envSetups = [ + { + envId: "env-1", + orgId: "org-1", + projectId: "proj-1", + limit: 100, + current: 20, // Lots of available capacity + queueCount: 3, + }, + { + envId: "env-2", + orgId: "org-1", + projectId: "proj-1", + limit: 50, + current: 40, // Less available capacity + queueCount: 3, + }, + { + envId: "env-3", + orgId: "org-1", + projectId: "proj-1", + limit: 10, + current: 5, // Some available capacity + queueCount: 3, + }, + ]; + + // Setup queues and concurrency for each environment + for (const setup of envSetups) { + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: setup.envId, + projectId: setup.projectId, + orgId: setup.orgId, + currentConcurrency: setup.current, + limit: setup.limit, + }, + }); + + for (let i = 0; i < setup.queueCount; i++) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - 1000 * (i + 1), + queueId: `queue-${i}`, + orgId: "org-1", + projectId: "proj-1", + envId: setup.envId, + }); + } + } + + // Create multiple strategies with different seeds + const numStrategies = 5; + const strategies = Array.from( + { length: numStrategies }, + (_, i) => + new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: `test-seed-${i}`, + biases: { + concurrencyLimitBias: 0.8, + availableCapacityBias: 0.5, + queueAgeRandomization: 0.0, + }, + }) + ); + + // Run iterations across all strategies + const iterationsPerStrategy = 100; + const allResults: Record[] = []; + + for (const strategy of strategies) { + const firstPositionCounts: Record = {}; + + for (let i = 0; i < iterationsPerStrategy; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + `consumer-${i % 3}` + ); + const result = flattenResults(envResult); + + expect(result.length).toBeGreaterThan(0); + + const firstEnv = keyProducer.envIdFromQueue(result[0]); + firstPositionCounts[firstEnv] = (firstPositionCounts[firstEnv] || 0) + 1; + } + + allResults.push(firstPositionCounts); + } + + // Calculate average distributions across all strategies + const avgDistribution: Record = {}; + const envIds = ["env-1", "env-2", "env-3"]; + + for (const envId of envIds) { + const sum = allResults.reduce((acc, result) => acc + (result[envId] || 0), 0); + avgDistribution[envId] = sum / numStrategies; + } + + // Log individual strategy results and the average + console.log("\nResults by strategy:"); + allResults.forEach((result, i) => { + console.log(`Strategy ${i + 1}:`, result); + }); + + console.log("\nAverage distribution:", avgDistribution); + + // Calculate percentages from average distribution + const totalCount = Object.values(avgDistribution).reduce((sum, count) => sum + count, 0); + const highLimitPercentage = (avgDistribution["env-1"] / totalCount) * 100; + const lowLimitPercentage = (avgDistribution["env-3"] / totalCount) * 100; + + console.log("\nPercentages:"); + console.log("High limit percentage:", highLimitPercentage); + console.log("Low limit percentage:", lowLimitPercentage); + + // Verify distribution across all strategies + expect(highLimitPercentage).toBeLessThan(60); + expect(lowLimitPercentage).toBeGreaterThan(10); + expect(highLimitPercentage).toBeGreaterThan(lowLimitPercentage); + } + ); + + redisTest( + "should respect ageInfluence parameter for queue ordering", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const now = Date.now(); + + // Setup queues with different ages in the same environment + const queueAges = [ + { id: "queue-1", age: 5000 }, // oldest + { id: "queue-2", age: 3000 }, + { id: "queue-3", age: 1000 }, // newest + ]; + + // Helper function to run iterations with a specific age influence + async function runWithQueueAgeRandomization(queueAgeRandomization: number) { + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "fixed-seed", + biases: { + concurrencyLimitBias: 0, + availableCapacityBias: 0, + queueAgeRandomization, + }, + }); + + const positionCounts: Record = { + "queue-1": [0, 0, 0], + "queue-2": [0, 0, 0], + "queue-3": [0, 0, 0], + }; + + const iterations = 1000; + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + "consumer-1" + ); + const result = flattenResults(envResult); + + result.forEach((queueId, position) => { + const baseQueueId = queueId.split(":").pop()!; + positionCounts[baseQueueId][position]++; + }); + } + + return positionCounts; + } + + // Setup test data + for (const { id, age } of queueAges) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - age, + queueId: id, + orgId: "org-1", + projectId: "proj-1", + envId: "env-1", + }); + } + + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: "env-1", + projectId: "proj-1", + orgId: "org-1", + currentConcurrency: 0, + limit: 5, + }, + }); + + // Test with different age influence values + const strictAge = await runWithQueueAgeRandomization(0); // Strict age-based ordering + const mixed = await runWithQueueAgeRandomization(0.5); // Mix of age and random + const fullyRandom = await runWithQueueAgeRandomization(1); // Completely random + + console.log("Distribution with strict age ordering (0.0):", strictAge); + console.log("Distribution with mixed ordering (0.5):", mixed); + console.log("Distribution with random ordering (1.0):", fullyRandom); + + // With strict age ordering (0.0), oldest should always be first + expect(strictAge["queue-1"][0]).toBe(1000); // Always in first position + expect(strictAge["queue-3"][0]).toBe(0); // Never in first position + + // With fully random (1.0), positions should still allow for some age bias + const randomFirstPositionSpread = Math.abs( + fullyRandom["queue-1"][0] - fullyRandom["queue-3"][0] + ); + expect(randomFirstPositionSpread).toBeLessThan(200); // Allow for larger spread in distribution + + // With mixed (0.5), should show preference for age but not absolute + expect(mixed["queue-1"][0]).toBeGreaterThan(mixed["queue-3"][0]); // Older preferred + expect(mixed["queue-3"][0]).toBeGreaterThan(0); // But newer still gets chances + } + ); + + redisTest( + "should respect maximumEnvCount and select envs based on queue ages", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "test-seed-max-orgs", + maximumEnvCount: 2, // Only select top 2 orgs + }); + + const now = Date.now(); + + // Setup 4 envs with different queue age profiles + const envSetups = [ + { + envId: "env-1", + orgId: "org-1", + projectId: "proj-1", + queues: [ + { age: 1000 }, // Average age: 1000 + ], + }, + { + envId: "env-2", + orgId: "org-1", + projectId: "proj-1", + queues: [ + { age: 5000 }, // Average age: 5000 + { age: 5000 }, + ], + }, + { + envId: "env-3", + orgId: "org-1", + projectId: "proj-1", + queues: [ + { age: 2000 }, // Average age: 2000 + { age: 2000 }, + ], + }, + { + envId: "env-4", + orgId: "org-1", + projectId: "proj-1", + queues: [ + { age: 500 }, // Average age: 500 + { age: 500 }, + ], + }, + ]; + + // Setup queues and concurrency for each org + for (const setup of envSetups) { + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: setup.envId, + projectId: setup.projectId, + orgId: setup.orgId, + currentConcurrency: 0, + limit: 5, + }, + }); + + for (let i = 0; i < setup.queues.length; i++) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - setup.queues[i].age, + queueId: `queue-${setup.envId}-${i}`, + orgId: `org-${setup.envId}`, + projectId: `proj-${setup.envId}`, + envId: setup.envId, + }); + } + } + + // Run multiple iterations to verify consistent behavior + const iterations = 100; + const selectedEnvCounts: Record = {}; + + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + `consumer-${i}` + ); + const result = flattenResults(envResult); + + // Track which orgs were included in the result + const selectedEnvs = new Set(result.map((queueId) => keyProducer.envIdFromQueue(queueId))); + + // Verify we never get more than maximumOrgCount orgs + expect(selectedEnvs.size).toBeLessThanOrEqual(2); + + for (const envId of selectedEnvs) { + selectedEnvCounts[envId] = (selectedEnvCounts[envId] || 0) + 1; + } + } + + console.log("Environment selection counts:", selectedEnvCounts); + + // org-2 should be selected most often (highest average age) + expect(selectedEnvCounts["env-2"]).toBeGreaterThan(selectedEnvCounts["env-4"] || 0); + + // org-4 should be selected least often (lowest average age) + const env4Count = selectedEnvCounts["env-4"] || 0; + expect(env4Count).toBeLessThan(selectedEnvCounts["env-2"]); + + // Verify that envs with higher average queue age are selected more frequently + const sortedEnvs = Object.entries(selectedEnvCounts).sort((a, b) => b[1] - a[1]); + console.log("Sorted environment frequencies:", sortedEnvs); + + // The top 2 most frequently selected orgs should be env-2 and env-3 + // as they have the highest average queue ages + const topTwoEnvs = new Set([sortedEnvs[0][0], sortedEnvs[1][0]]); + expect(topTwoEnvs).toContain("env-2"); // Highest average age + expect(topTwoEnvs).toContain("env-3"); // Second highest average age + + // Calculate selection percentages + const totalSelections = Object.values(selectedEnvCounts).reduce((a, b) => a + b, 0); + const selectionPercentages = Object.entries(selectedEnvCounts).reduce( + (acc, [orgId, count]) => { + acc[orgId] = (count / totalSelections) * 100; + return acc; + }, + {} as Record + ); + + console.log("Environment selection percentages:", selectionPercentages); + + // Verify that env-2 (highest average age) gets selected in at least 40% of iterations + expect(selectionPercentages["env-2"]).toBeGreaterThan(40); + + // Verify that env-4 (lowest average age) gets selected in less than 20% of iterations + expect(selectionPercentages["env-4"] || 0).toBeLessThan(20); + } + ); + + redisTest( + "should not overly bias picking environments when queue have priority offset ages", + async ({ redisOptions: redis }) => { + const keyProducer = new RunQueueFullKeyProducer(); + const strategy = new FairDequeuingStrategy({ + redis, + keys: keyProducer, + defaultEnvConcurrencyLimit: 5, + parentQueueLimit: 100, + seed: "test-seed-max-orgs", + maximumEnvCount: 2, // Only select top 2 orgs + }); + + const now = Date.now(); + + // Setup 4 envs with different queue age profiles + const envSetups = [ + { + envId: "env-1", + queues: [ + { age: 1000 }, // Average age: 1000 + ], + }, + { + envId: "env-2", + queues: [ + { age: 5000 + RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET }, // Average age: 5000 + 1 year + { age: 5000 + RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET }, + ], + }, + { + envId: "env-3", + queues: [ + { age: 2000 }, // Average age: 2000 + { age: 2000 }, + ], + }, + { + envId: "env-4", + queues: [ + { age: 500 }, // Average age: 500 + { age: 500 }, + ], + }, + ]; + + // Setup queues and concurrency for each org + for (const setup of envSetups) { + await setupConcurrency({ + redis, + keyProducer, + env: { + envId: setup.envId, + projectId: "proj-1", + orgId: "org-1", + currentConcurrency: 0, + limit: 5, + }, + }); + + for (let i = 0; i < setup.queues.length; i++) { + await setupQueue({ + redis, + keyProducer, + parentQueue: "parent-queue", + score: now - setup.queues[i].age, + queueId: `queue-${setup.envId}-${i}`, + orgId: `org-${setup.envId}`, + projectId: `proj-${setup.envId}`, + envId: setup.envId, + }); + } + } + + // Run multiple iterations to verify consistent behavior + const iterations = 100; + const selectedEnvCounts: Record = {}; + + for (let i = 0; i < iterations; i++) { + const envResult = await strategy.distributeFairQueuesFromParentQueue( + "parent-queue", + `consumer-${i}` + ); + const result = flattenResults(envResult); + + // Track which orgs were included in the result + const selectedEnvs = new Set(result.map((queueId) => keyProducer.envIdFromQueue(queueId))); + + // Verify we never get more than maximumOrgCount orgs + expect(selectedEnvs.size).toBeLessThanOrEqual(2); + + for (const envId of selectedEnvs) { + selectedEnvCounts[envId] = (selectedEnvCounts[envId] || 0) + 1; + } + } + + console.log("Environment selection counts:", selectedEnvCounts); + + // org-2 should be selected most often (highest average age) + expect(selectedEnvCounts["env-2"]).toBeGreaterThan(selectedEnvCounts["env-4"] || 0); + + // org-4 should be selected least often (lowest average age) + const env4Count = selectedEnvCounts["env-4"] || 0; + expect(env4Count).toBeLessThan(selectedEnvCounts["env-2"]); + + // Verify that envs with higher average queue age are selected more frequently + const sortedEnvs = Object.entries(selectedEnvCounts).sort((a, b) => b[1] - a[1]); + console.log("Sorted environment frequencies:", sortedEnvs); + + // The top 2 most frequently selected orgs should be env-2 and env-3 + // as they have the highest average queue ages + const topTwoEnvs = new Set([sortedEnvs[0][0], sortedEnvs[1][0]]); + expect(topTwoEnvs).toContain("env-2"); // Highest average age + expect(topTwoEnvs).toContain("env-3"); // Second highest average age + + // Calculate selection percentages + const totalSelections = Object.values(selectedEnvCounts).reduce((a, b) => a + b, 0); + const selectionPercentages = Object.entries(selectedEnvCounts).reduce( + (acc, [orgId, count]) => { + acc[orgId] = (count / totalSelections) * 100; + return acc; + }, + {} as Record + ); + + console.log("Environment selection percentages:", selectionPercentages); + + // Verify that env-2 (highest average age) gets selected in at least 40% of iterations + expect(selectionPercentages["env-2"]).toBeGreaterThan(40); + + // Verify that env-4 (lowest average age) gets selected in less than 20% of iterations + expect(selectionPercentages["env-4"] || 0).toBeLessThan(20); + } + ); +}); + +// Helper function to flatten results for counting +function flattenResults(results: Array): string[] { + return results.flatMap((envQueue) => envQueue.queues); +} + +type SetupQueueOptions = { + parentQueue: string; + redis: RedisOptions; + score: number; + queueId: string; + orgId: string; + projectId: string; + envId: string; + keyProducer: RunQueueKeyProducer; +}; + +/** + * Adds a queue to Redis with the given parameters + */ +async function setupQueue({ + redis, + keyProducer, + parentQueue, + score, + queueId, + orgId, + projectId, + envId, +}: SetupQueueOptions) { + const $redis = createRedisClient(redis); + // Add the queue to the parent queue's sorted set + const queue = keyProducer.queueKey(orgId, projectId, envId, queueId); + + await $redis.zadd(parentQueue, score, queue); +} + +type SetupConcurrencyOptions = { + redis: RedisOptions; + keyProducer: RunQueueKeyProducer; + env: { + envId: string; + projectId: string; + orgId: string; + currentConcurrency: number; + limit?: number; + reserveConcurrency?: number; + }; +}; + +/** + * Sets up concurrency-related Redis keys for orgs and envs + */ +async function setupConcurrency({ redis, keyProducer, env }: SetupConcurrencyOptions) { + const $redis = createRedisClient(redis); + // Set env concurrency limit + if (typeof env.limit === "number") { + await $redis.set(keyProducer.envConcurrencyLimitKey(env), env.limit.toString()); + } + + if (env.currentConcurrency > 0) { + // Set current concurrency by adding dummy members to the set + const envCurrentKey = keyProducer.envCurrentConcurrencyKey(env); + + // Add dummy running job IDs to simulate current concurrency + const dummyJobs = Array.from( + { length: env.currentConcurrency }, + (_, i) => `dummy-job-${i}-${Date.now()}` + ); + + await $redis.sadd(envCurrentKey, ...dummyJobs); + } + + if (env.reserveConcurrency && env.reserveConcurrency > 0) { + // Set reserved concurrency by adding dummy members to the set + const envReservedKey = keyProducer.envReserveConcurrencyKey(env); + + // Add dummy reserved job IDs to simulate reserved concurrency + const dummyJobs = Array.from( + { length: env.reserveConcurrency }, + (_, i) => `dummy-reserved-job-${i}-${Date.now()}` + ); + + await $redis.sadd(envReservedKey, ...dummyJobs); + } +} + +/** + * Calculates the standard deviation of a set of numbers. + * Standard deviation measures the amount of variation of a set of values from their mean. + * A low standard deviation indicates that the values tend to be close to the mean. + * + * @param values Array of numbers to calculate standard deviation for + * @returns The standard deviation of the values + */ +function calculateStandardDeviation(values: number[]): number { + // If there are no values or only one value, the standard deviation is 0 + if (values.length <= 1) { + return 0; + } + + // Calculate the mean (average) of the values + const mean = values.reduce((sum, value) => sum + value, 0) / values.length; + + // Calculate the sum of squared differences from the mean + const squaredDifferences = values.map((value) => Math.pow(value - mean, 2)); + const sumOfSquaredDifferences = squaredDifferences.reduce((sum, value) => sum + value, 0); + + // Calculate the variance (average of squared differences) + const variance = sumOfSquaredDifferences / (values.length - 1); // Using n-1 for sample standard deviation + + // Standard deviation is the square root of the variance + return Math.sqrt(variance); +} diff --git a/internal-packages/run-engine/src/run-queue/keyProducer.ts b/internal-packages/run-engine/src/run-queue/keyProducer.ts index 3acdcb9747..cebdacea5c 100644 --- a/internal-packages/run-engine/src/run-queue/keyProducer.ts +++ b/internal-packages/run-engine/src/run-queue/keyProducer.ts @@ -40,12 +40,37 @@ export class RunQueueFullKeyProducer implements RunQueueKeyProducer { } } - queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string) { + queueKey( + orgId: string, + projId: string, + envId: string, + queue: string, + concurrencyKey?: string + ): string; + queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string): string; + queueKey( + envOrOrgId: MinimalAuthenticatedEnvironment | string, + projIdOrQueue: string, + envIdConcurrencyKey?: string, + queue?: string, + concurrencyKey?: string + ): string { + if (typeof envOrOrgId !== "string") { + return [ + this.orgKeySection(envOrOrgId.organization.id), + this.projKeySection(envOrOrgId.project.id), + this.envKeySection(envOrOrgId.id), + this.queueSection(projIdOrQueue), + ] + .concat(envIdConcurrencyKey ? this.concurrencyKeySection(envIdConcurrencyKey) : []) + .join(":"); + } + return [ - this.orgKeySection(env.organization.id), - this.projKeySection(env.project.id), - this.envKeySection(env.id), - this.queueSection(queue), + this.orgKeySection(envOrOrgId), + this.projKeySection(projIdOrQueue), + this.envKeySection(envIdConcurrencyKey!), + this.queueSection(queue!), ] .concat(concurrencyKey ? this.concurrencyKeySection(concurrencyKey) : []) .join(":"); diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 72f417add2..50c4bcf8eb 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -1,6 +1,6 @@ import { z } from "zod"; import { RuntimeEnvironmentType } from "../../../database/src/index.js"; -import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; +import type { MinimalAuthenticatedEnvironment } from "../shared/index.js"; export const InputPayload = z.object({ runId: z.string(), @@ -38,7 +38,15 @@ export type EnvDescriptor = { export interface RunQueueKeyProducer { //queue + queueKey( + orgId: string, + projId: string, + envId: string, + queue: string, + concurrencyKey?: string + ): string; queueKey(env: MinimalAuthenticatedEnvironment, queue: string, concurrencyKey?: string): string; + envQueueKey(env: MinimalAuthenticatedEnvironment): string; envQueueKeyFromQueue(queue: string): string; queueConcurrencyLimitKey(env: MinimalAuthenticatedEnvironment, queue: string): string; diff --git a/internal-packages/run-engine/src/shared/index.ts b/internal-packages/run-engine/src/shared/index.ts index c327c63f37..3e541bcd5f 100644 --- a/internal-packages/run-engine/src/shared/index.ts +++ b/internal-packages/run-engine/src/shared/index.ts @@ -1,5 +1,5 @@ -import { Attributes } from "@internal/tracing"; -import { Prisma } from "@trigger.dev/database"; +import type { Attributes } from "@internal/tracing"; +import type { Prisma } from "@trigger.dev/database"; export type AuthenticatedEnvironment = Prisma.RuntimeEnvironmentGetPayload<{ include: { project: true; organization: true; orgMember: true }; diff --git a/internal-packages/run-engine/vitest.config.ts b/internal-packages/run-engine/vitest.config.ts index 735133dda9..e10e77f70e 100644 --- a/internal-packages/run-engine/vitest.config.ts +++ b/internal-packages/run-engine/vitest.config.ts @@ -2,6 +2,7 @@ import { defineConfig } from "vitest/config"; export default defineConfig({ test: { + reporters: process.env.GITHUB_ACTIONS ? ["verbose", "github-actions"] : ["verbose"], include: ["**/*.test.ts"], globals: true, isolate: true, From 5d7e490132df56733cc54193c0a9294ed16e6253 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 10:20:15 +0000 Subject: [PATCH 07/12] Configure the new queue selection strategy in the webapp and get it all building and typechecks passing --- apps/webapp/app/env.server.ts | 6 +++++ apps/webapp/app/v3/runEngine.server.ts | 11 +++++++++ apps/webapp/remix.config.js | 3 +++ apps/webapp/tsconfig.json | 4 +++- .../run-engine/src/engine/index.ts | 5 ++-- .../run-engine/src/engine/locking.ts | 10 ++++---- .../run-engine/src/engine/types.ts | 5 ++++ ....ts => fairQueueSelectionStrategy.test.ts} | 24 +++++++++---------- ...ategy.ts => fairQueueSelectionStrategy.ts} | 22 ++++++++--------- .../run-engine/src/run-queue/index.test.ts | 22 ++++++++--------- .../run-engine/src/run-queue/index.ts | 17 +++++++------ .../run-engine/src/run-queue/types.ts | 2 +- internal-packages/run-engine/tsconfig.json | 14 +---------- 13 files changed, 81 insertions(+), 64 deletions(-) rename internal-packages/run-engine/src/run-queue/{fairDequeuingStrategy.test.ts => fairQueueSelectionStrategy.test.ts} (98%) rename internal-packages/run-engine/src/run-queue/{fairDequeuingStrategy.ts => fairQueueSelectionStrategy.ts} (97%) diff --git a/apps/webapp/app/env.server.ts b/apps/webapp/app/env.server.ts index 9a108a9406..45118358c3 100644 --- a/apps/webapp/app/env.server.ts +++ b/apps/webapp/app/env.server.ts @@ -419,6 +419,12 @@ const EnvironmentSchema = z.object({ RUN_ENGINE_TIMEOUT_EXECUTING: z.coerce.number().int().default(60_000), RUN_ENGINE_TIMEOUT_EXECUTING_WITH_WAITPOINTS: z.coerce.number().int().default(60_000), RUN_ENGINE_DEBUG_WORKER_NOTIFICATIONS: z.coerce.boolean().default(false), + RUN_ENGINE_PARENT_QUEUE_LIMIT: z.coerce.number().int().default(1000), + RUN_ENGINE_CONCURRENCY_LIMIT_BIAS: z.coerce.number().default(0.75), + RUN_ENGINE_AVAILABLE_CAPACITY_BIAS: z.coerce.number().default(0.3), + RUN_ENGINE_QUEUE_AGE_RANDOMIZATION_BIAS: z.coerce.number().default(0.25), + RUN_ENGINE_REUSE_SNAPSHOT_COUNT: z.coerce.number().int().default(0), + RUN_ENGINE_MAXIMUM_ENV_COUNT: z.coerce.number().int().optional(), RUN_ENGINE_WORKER_REDIS_HOST: z .string() diff --git a/apps/webapp/app/v3/runEngine.server.ts b/apps/webapp/app/v3/runEngine.server.ts index 2176925c53..382302ff16 100644 --- a/apps/webapp/app/v3/runEngine.server.ts +++ b/apps/webapp/app/v3/runEngine.server.ts @@ -43,6 +43,17 @@ function createRunEngine() { enableAutoPipelining: true, ...(env.RUN_ENGINE_RUN_QUEUE_REDIS_TLS_DISABLED === "true" ? {} : { tls: {} }), }, + queueSelectionStrategyOptions: { + parentQueueLimit: env.RUN_ENGINE_PARENT_QUEUE_LIMIT, + biases: { + concurrencyLimitBias: env.RUN_ENGINE_CONCURRENCY_LIMIT_BIAS, + availableCapacityBias: env.RUN_ENGINE_AVAILABLE_CAPACITY_BIAS, + queueAgeRandomization: env.RUN_ENGINE_QUEUE_AGE_RANDOMIZATION_BIAS, + }, + reuseSnapshotCount: env.RUN_ENGINE_REUSE_SNAPSHOT_COUNT, + maximumEnvCount: env.RUN_ENGINE_MAXIMUM_ENV_COUNT, + tracer, + }, }, runLock: { redis: { diff --git a/apps/webapp/remix.config.js b/apps/webapp/remix.config.js index 296921e575..519c134a98 100644 --- a/apps/webapp/remix.config.js +++ b/apps/webapp/remix.config.js @@ -26,6 +26,9 @@ module.exports = { "superjson", "prismjs/components/prism-json", "prismjs/components/prism-typescript", + "@internal/run-engine", + "@internal/redis", + "@internal/tracing", ], browserNodeBuiltinsPolyfill: { modules: { path: true, os: true, crypto: true } }, watchPaths: async () => { diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index f56eadb0c6..c38fbe44f0 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -40,7 +40,9 @@ "@internal/redis-worker": ["../../internal-packages/redis-worker/src/index"], "@internal/redis-worker/*": ["../../internal-packages/redis-worker/src/*"], "@internal/redis": ["../../internal-packages/redis/src/index"], - "@internal/redis/*": ["../../internal-packages/redis/src/*"] + "@internal/redis/*": ["../../internal-packages/redis/src/*"], + "@internal/tracing": ["../../internal-packages/tracing/src/index"], + "@internal/tracing/*": ["../../internal-packages/tracing/src/*"] }, "noEmit": true } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index e06dd472f8..31670d442c 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -53,7 +53,7 @@ import { nanoid } from "nanoid"; import { EventEmitter } from "node:events"; import { z } from "zod"; import { RunQueue } from "../run-queue/index.js"; -import { FairDequeuingStrategy } from "../run-queue/fairDequeuingStrategy.js"; +import { FairQueueSelectionStrategy } from "../run-queue/fairQueueSelectionStrategy.js"; import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; import { MAX_TASK_RUN_ATTEMPTS } from "./consts.js"; import { getRunWithBackgroundWorkerTasks } from "./db/worker.js"; @@ -159,9 +159,10 @@ export class RunEngine { name: "rq", tracer: trace.getTracer("rq"), keys, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ keys, redis: { ...options.queue.redis, keyPrefix: `${options.queue.redis.keyPrefix}runqueue:` }, + defaultEnvConcurrencyLimit: options.queue?.defaultEnvConcurrency ?? 10, }), defaultEnvConcurrency: options.queue?.defaultEnvConcurrency ?? 10, logger: new Logger("RunQueue", "debug"), diff --git a/internal-packages/run-engine/src/engine/locking.ts b/internal-packages/run-engine/src/engine/locking.ts index 90f1464824..1ffed51ba1 100644 --- a/internal-packages/run-engine/src/engine/locking.ts +++ b/internal-packages/run-engine/src/engine/locking.ts @@ -1,14 +1,16 @@ -import Redlock, { RedlockAbortSignal } from "redlock"; +// import { default: Redlock } from "redlock"; +const { default: Redlock } = require("redlock"); import { AsyncLocalStorage } from "async_hooks"; import { Redis } from "@internal/redis"; +import * as redlock from "redlock"; interface LockContext { resources: string; - signal: RedlockAbortSignal; + signal: redlock.RedlockAbortSignal; } export class RunLocker { - private redlock: Redlock; + private redlock: InstanceType; private asyncLocalStorage: AsyncLocalStorage; constructor(options: { redis: Redis }) { @@ -26,7 +28,7 @@ export class RunLocker { async lock( resources: string[], duration: number, - routine: (signal: RedlockAbortSignal) => Promise + routine: (signal: redlock.RedlockAbortSignal) => Promise ): Promise { const currentContext = this.asyncLocalStorage.getStore(); const joinedResources = resources.sort().join(","); diff --git a/internal-packages/run-engine/src/engine/types.ts b/internal-packages/run-engine/src/engine/types.ts index 040f3a9213..aea71d605b 100644 --- a/internal-packages/run-engine/src/engine/types.ts +++ b/internal-packages/run-engine/src/engine/types.ts @@ -4,6 +4,7 @@ import { MachinePreset, MachinePresetName, QueueOptions, RetryOptions } from "@t import { PrismaClient } from "@trigger.dev/database"; import { type RedisOptions } from "@internal/redis"; import { MinimalAuthenticatedEnvironment } from "../shared/index.js"; +import { FairQueueSelectionStrategyOptions } from "../run-queue/fairQueueSelectionStrategy.js"; export type RunEngineOptions = { prisma: PrismaClient; @@ -21,6 +22,10 @@ export type RunEngineOptions = { redis: RedisOptions; retryOptions?: RetryOptions; defaultEnvConcurrency?: number; + queueSelectionStrategyOptions?: Pick< + FairQueueSelectionStrategyOptions, + "parentQueueLimit" | "tracer" | "biases" | "reuseSnapshotCount" | "maximumEnvCount" + >; }; runLock: { redis: RedisOptions; diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts similarity index 98% rename from internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts rename to internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts index 4f99ff90e8..0a2f053ff9 100644 --- a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.test.ts +++ b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts @@ -1,6 +1,6 @@ import { redisTest } from "@internal/testcontainers"; import { describe, expect, vi } from "vitest"; -import { FairDequeuingStrategy } from "./fairDequeuingStrategy.js"; +import { FairQueueSelectionStrategy } from "./fairQueueSelectionStrategy.js"; import { RunQueueFullKeyProducer } from "./keyProducer.js"; import { createRedisClient, Redis, RedisOptions } from "@internal/redis"; import { EnvQueues, RunQueueKeyProducer } from "./types.js"; @@ -13,7 +13,7 @@ describe("FairDequeuingStrategy", () => { "should distribute a single queue from a single env", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -47,7 +47,7 @@ describe("FairDequeuingStrategy", () => { redisTest("should respect env concurrency limits", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 2, @@ -80,7 +80,7 @@ describe("FairDequeuingStrategy", () => { "should give extra concurrency when the env has reserve concurrency", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 2, @@ -126,7 +126,7 @@ describe("FairDequeuingStrategy", () => { redisTest("should respect parentQueueLimit", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -185,7 +185,7 @@ describe("FairDequeuingStrategy", () => { "should reuse snapshots across calls for the same consumer", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -282,7 +282,7 @@ describe("FairDequeuingStrategy", () => { "should fairly distribute queues across environments over time", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -440,7 +440,7 @@ describe("FairDequeuingStrategy", () => { "should shuffle environments while maintaining age order within environments", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -623,7 +623,7 @@ describe("FairDequeuingStrategy", () => { const strategies = Array.from( { length: numStrategies }, (_, i) => - new FairDequeuingStrategy({ + new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -708,7 +708,7 @@ describe("FairDequeuingStrategy", () => { // Helper function to run iterations with a specific age influence async function runWithQueueAgeRandomization(queueAgeRandomization: number) { - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -799,7 +799,7 @@ describe("FairDequeuingStrategy", () => { "should respect maximumEnvCount and select envs based on queue ages", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, @@ -942,7 +942,7 @@ describe("FairDequeuingStrategy", () => { "should not overly bias picking environments when queue have priority offset ages", async ({ redisOptions: redis }) => { const keyProducer = new RunQueueFullKeyProducer(); - const strategy = new FairDequeuingStrategy({ + const strategy = new FairQueueSelectionStrategy({ redis, keys: keyProducer, defaultEnvConcurrencyLimit: 5, diff --git a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.ts similarity index 97% rename from internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts rename to internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.ts index c4f7a898b7..eb65c41513 100644 --- a/internal-packages/run-engine/src/run-queue/fairDequeuingStrategy.ts +++ b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.ts @@ -1,17 +1,17 @@ +import { createRedisClient, Redis, type RedisOptions } from "@internal/redis"; +import { startSpan, type Tracer } from "@internal/tracing"; import { createCache, DefaultStatefulContext, Namespace, Cache as UnkeyCache } from "@unkey/cache"; import { MemoryStore } from "@unkey/cache/stores"; import { randomUUID } from "crypto"; +import seedrandom from "seedrandom"; import { EnvDescriptor, EnvQueues, - RunQueueFairDequeueStrategy, RunQueueKeyProducer, + RunQueueSelectionStrategy, } from "./types.js"; -import seedrandom from "seedrandom"; -import { startSpan, type Tracer } from "@internal/tracing"; -import { Redis, type RedisOptions, createRedisClient } from "@internal/redis"; -export type FairDequeuingStrategyBiases = { +export type FairQueueSelectionStrategyBiases = { /** * How much to bias towards environments with higher concurrency limits * 0 = no bias, 1 = full bias based on limit differences @@ -33,7 +33,7 @@ export type FairDequeuingStrategyBiases = { queueAgeRandomization: number; }; -export type FairDequeuingStrategyOptions = { +export type FairQueueSelectionStrategyOptions = { redis: RedisOptions; keys: RunQueueKeyProducer; defaultEnvConcurrencyLimit?: number; @@ -44,7 +44,7 @@ export type FairDequeuingStrategyOptions = { * Configure biasing for environment shuffling * If not provided, no biasing will be applied (completely random shuffling) */ - biases?: FairDequeuingStrategyBiases; + biases?: FairQueueSelectionStrategyBiases; reuseSnapshotCount?: number; maximumEnvCount?: number; }; @@ -79,13 +79,13 @@ const emptyFairQueueSnapshot: FairQueueSnapshot = { queues: [], }; -const defaultBiases: FairDequeuingStrategyBiases = { +const defaultBiases: FairQueueSelectionStrategyBiases = { concurrencyLimitBias: 0, availableCapacityBias: 0, queueAgeRandomization: 0, // Default to completely age-based ordering }; -export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { +export class FairQueueSelectionStrategy implements RunQueueSelectionStrategy { private _cache: UnkeyCache<{ concurrencyLimit: number; }>; @@ -100,7 +100,7 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { private _defaultEnvConcurrencyLimit: number; private _parentQueueLimit: number; - constructor(private options: FairDequeuingStrategyOptions) { + constructor(private options: FairQueueSelectionStrategyOptions) { const ctx = new DefaultStatefulContext(); const memory = new MemoryStore({ persistentMap: new Map() }); @@ -612,7 +612,7 @@ export class FairDequeuingStrategy implements RunQueueFairDequeueStrategy { } } -export class NoopFairDequeuingStrategy implements RunQueueFairDequeueStrategy { +export class NoopFairDequeuingStrategy implements RunQueueSelectionStrategy { async distributeFairQueuesFromParentQueue( parentQueue: string, consumerId: string diff --git a/internal-packages/run-engine/src/run-queue/index.test.ts b/internal-packages/run-engine/src/run-queue/index.test.ts index d8ef1bb36e..7fcae56eeb 100644 --- a/internal-packages/run-engine/src/run-queue/index.test.ts +++ b/internal-packages/run-engine/src/run-queue/index.test.ts @@ -6,7 +6,7 @@ import { setTimeout } from "node:timers/promises"; import { RunQueue } from "./index.js"; import { InputPayload } from "./types.js"; import { createRedisClient } from "@internal/redis"; -import { FairDequeuingStrategy } from "./fairDequeuingStrategy.js"; +import { FairQueueSelectionStrategy } from "./fairQueueSelectionStrategy.js"; import { RunQueueFullKeyProducer } from "./keyProducer.js"; const testOptions = { @@ -70,7 +70,7 @@ describe("RunQueue", () => { redisTest("Get/set Queue concurrency limit", { timeout: 15_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -120,7 +120,7 @@ describe("RunQueue", () => { redisTest("Update env concurrency limits", { timeout: 5_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -160,7 +160,7 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -278,7 +278,7 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -389,7 +389,7 @@ describe("RunQueue", () => { async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -465,7 +465,7 @@ describe("RunQueue", () => { redisTest("Acking", { timeout: 5_000 }, async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -549,7 +549,7 @@ describe("RunQueue", () => { redisTest("Ack (before dequeue)", { timeout: 5_000 }, async ({ redisContainer }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -611,7 +611,7 @@ describe("RunQueue", () => { redisTest("Nacking", { timeout: 15_000 }, async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -710,7 +710,7 @@ describe("RunQueue", () => { async ({ redisContainer, redisOptions }) => { const queue = new RunQueue({ ...testOptions, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), @@ -832,7 +832,7 @@ describe("RunQueue", () => { retryOptions: { maxAttempts: 1, }, - queuePriorityStrategy: new FairDequeuingStrategy({ + queueSelectionStrategy: new FairQueueSelectionStrategy({ redis: { keyPrefix: "runqueue:test:", host: redisContainer.getHost(), diff --git a/internal-packages/run-engine/src/run-queue/index.ts b/internal-packages/run-engine/src/run-queue/index.ts index 85f1c86c9b..374d224aff 100644 --- a/internal-packages/run-engine/src/run-queue/index.ts +++ b/internal-packages/run-engine/src/run-queue/index.ts @@ -20,7 +20,7 @@ import { InputPayload, OutputPayload, RunQueueKeyProducer, - RunQueueFairDequeueStrategy, + RunQueueSelectionStrategy, } from "./types.js"; import { createRedisClient, @@ -46,7 +46,7 @@ export type RunQueueOptions = { defaultEnvConcurrency: number; windowSize?: number; keys: RunQueueKeyProducer; - queuePriorityStrategy: RunQueueFairDequeueStrategy; + queueSelectionStrategy: RunQueueSelectionStrategy; verbose?: boolean; logger: Logger; retryOptions?: RetryOptions; @@ -75,7 +75,7 @@ export class RunQueue { private logger: Logger; private redis: Redis; public keys: RunQueueKeyProducer; - private queuePriorityStrategy: RunQueueFairDequeueStrategy; + private queueSelectionStrategy: RunQueueSelectionStrategy; constructor(private readonly options: RunQueueOptions) { this.retryOptions = options.retryOptions ?? defaultRetrySettings; @@ -90,7 +90,7 @@ export class RunQueue { this.logger = options.logger; this.keys = options.keys; - this.queuePriorityStrategy = options.queuePriorityStrategy; + this.queueSelectionStrategy = options.queueSelectionStrategy; this.subscriber = createRedisClient(options.redis, { onError: (error) => { @@ -259,11 +259,10 @@ export class RunQueue { return this.#trace( "dequeueMessageInSharedQueue", async (span) => { - const envQueues = - await this.options.queuePriorityStrategy.distributeFairQueuesFromParentQueue( - masterQueue, - consumerId - ); + const envQueues = await this.queueSelectionStrategy.distributeFairQueuesFromParentQueue( + masterQueue, + consumerId + ); span.setAttribute("environment_count", envQueues.length); diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index 50c4bcf8eb..b703d318ea 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -95,7 +95,7 @@ export type EnvQueues = { queues: string[]; }; -export interface RunQueueFairDequeueStrategy { +export interface RunQueueSelectionStrategy { distributeFairQueuesFromParentQueue( parentQueue: string, consumerId: string diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json index ee2b56fe80..c3ea60e197 100644 --- a/internal-packages/run-engine/tsconfig.json +++ b/internal-packages/run-engine/tsconfig.json @@ -13,19 +13,7 @@ "preserveWatchOutput": true, "skipLibCheck": true, "noEmit": true, - "strict": true, - "paths": { - "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], - "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], - "@internal/redis-worker": ["../../internal-packages/redis-worker/src/index"], - "@internal/redis-worker/*": ["../../internal-packages/redis-worker/src/*"], - "@internal/redis": ["../../internal-packages/redis/src/index"], - "@internal/redis/*": ["../../internal-packages/redis/src/*"], - "@trigger.dev/core": ["../../packages/core/src/index"], - "@trigger.dev/core/*": ["../../packages/core/src/*"], - "@trigger.dev/database": ["../database/src/index"], - "@trigger.dev/database/*": ["../database/src/*"] - } + "strict": true }, "exclude": ["node_modules"] } From fb73cdd7e0e90d09756b3f9a7b476e5f0d8ce009 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 12:50:07 +0000 Subject: [PATCH 08/12] webapp now uses built packages, building redis-worker, run-engine, database, using better tsconfig setups for tests, moving isomorphic code into core/v3/isomorphic --- .eslintrc.js | 14 ---------- apps/webapp/.eslintrc | 7 +---- .../app/components/run/TriggerDetail.tsx | 2 +- apps/webapp/app/entry.server.tsx | 2 +- apps/webapp/app/hooks/useSyncTraceRuns.ts | 2 +- .../HttpEndpointPresenter.server.ts | 2 +- .../app/presenters/v3/SpanPresenter.server.ts | 2 +- .../presenters/v3/TaskListPresenter.server.ts | 2 +- ...ndpointSlug.schedules.$id.registrations.ts | 5 +++- .../api.v1.$endpointSlug.sources.$id.ts | 2 +- ...intSlug.triggers.$id.registrations.$key.ts | 2 +- ...endpointSlug.triggers.$id.registrations.ts | 2 +- .../route.ts | 5 +++- .../routes/api.v1.endpointindex.$indexId.ts | 5 +++- .../api.v1.event-dispatchers.ephemeral.ts | 7 +---- .../app/routes/api.v1.events.$eventId.ts | 2 +- apps/webapp/app/routes/api.v1.events.bulk.ts | 2 +- apps/webapp/app/routes/api.v1.events.ts | 2 +- ...$httpEndpointId.env.$envType.$shortcode.ts | 2 +- .../app/routes/api.v1.jobs.$jobSlug.invoke.ts | 2 +- .../routes/api.v1.runs.$runId.logs/route.ts | 2 +- .../api.v1.runs.$runId.statuses.$id/route.ts | 2 +- .../app/routes/api.v1.runs.$runId.statuses.ts | 2 +- .../route.ts | 10 ++++--- .../route.ts | 2 +- .../routes/api.v1.runs.$runId.tasks/route.ts | 7 ++++- ...ts.tokens.$waitpointFriendlyId.complete.ts | 2 +- .../app/routes/api.v1.waitpoints.tokens.ts | 2 +- .../webapp/app/routes/api.v1.webhooks.$key.ts | 2 +- .../api.v2.$endpointSlug.sources.$id.ts | 2 +- ...intSlug.triggers.$id.registrations.$key.ts | 2 +- .../app/routes/api.v2.events.$eventId.ts | 2 +- .../app/routes/api.v2.runs.$runId.statuses.ts | 2 +- .../app/routes/engine.v1.dev.dequeue.ts | 2 +- ...e.v1.dev.runs.$runFriendlyId.logs.debug.ts | 4 +-- ...s.$snapshotFriendlyId.attempts.complete.ts | 7 +---- ...hots.$snapshotFriendlyId.attempts.start.ts | 2 +- ...snapshots.$snapshotFriendlyId.heartbeat.ts | 10 ++----- ...ev.runs.$runFriendlyId.snapshots.latest.ts | 2 +- ...ne.v1.runs.$runFriendlyId.wait.duration.ts | 2 +- ...points.tokens.$waitpointFriendlyId.wait.ts | 2 +- ...ployments.$deploymentFriendlyId.dequeue.ts | 2 +- ...-actions.runs.$runFriendlyId.logs.debug.ts | 4 +-- ...urces.batches.$batchId.check-completion.ts | 2 +- .../route.tsx | 4 +-- .../endpoints/indexEndpoint.server.ts | 2 +- ... => performEndpointIndexService.server.ts} | 0 ...Consumer.ts => sqsEventConsumer.server.ts} | 0 ...ts => HandleHttpEndpointService.server.ts} | 0 .../triggers/registerWebhook.server.ts | 2 +- apps/webapp/app/services/worker.server.ts | 2 +- apps/webapp/app/utils/delays.ts | 2 +- apps/webapp/app/v3/friendlyIdentifiers.ts | 2 +- apps/webapp/app/v3/handleSocketIo.server.ts | 2 +- .../app/v3/marqs/devQueueConsumer.server.ts | 2 +- .../app/v3/models/workerDeployment.server.ts | 2 +- .../webapp/app/v3/runEngineHandlers.server.ts | 2 +- .../app/v3/services/batchTriggerV4.server.ts | 2 +- .../changeCurrentDeployment.server.ts | 2 +- .../services/createBackgroundWorker.server.ts | 2 +- .../v3/services/createCheckpoint.server.ts | 2 +- .../createDeployedBackgroundWorker.server.ts | 3 +-- ...createDeploymentBackgroundWorker.server.ts | 2 +- .../v3/services/enqueueDelayedRun.server.ts | 2 +- .../v3/services/finalizeDeployment.server.ts | 1 - .../app/v3/services/triggerTaskV1.server.ts | 2 +- .../app/v3/services/triggerTaskV2.server.ts | 7 ++++- .../worker/workerGroupTokenService.server.ts | 5 +++- apps/webapp/remix.config.js | 13 ++------- apps/webapp/tsconfig.json | 27 +++---------------- internal-packages/database/package.json | 14 ++++++---- internal-packages/redis-worker/package.json | 22 +++++++++++---- .../redis-worker/tsconfig.build.json | 21 +++++++++++++++ internal-packages/redis-worker/tsconfig.json | 27 +++---------------- .../redis-worker/tsconfig.src.json | 19 +++++++++++++ .../redis-worker/tsconfig.test.json | 20 ++++++++++++++ internal-packages/run-engine/package.json | 22 +++++++++++---- .../run-engine/src/engine/db/worker.ts | 2 +- .../src/engine/executionSnapshots.ts | 2 +- .../run-engine/src/engine/index.ts | 2 +- .../src/engine/tests/batchTrigger.test.ts | 2 +- .../engine/tests/batchTriggerAndWait.test.ts | 2 +- .../src/engine/tests/dequeuing.test.ts | 2 +- .../src/engine/tests/priority.test.ts | 2 +- .../fairQueueSelectionStrategy.test.ts | 4 +-- .../run-engine/src/run-queue/types.ts | 2 +- .../run-engine/tsconfig.build.json | 21 +++++++++++++++ internal-packages/run-engine/tsconfig.json | 19 +++---------- .../run-engine/tsconfig.src.json | 19 +++++++++++++ .../run-engine/tsconfig.test.json | 20 ++++++++++++++ internal-packages/testcontainers/src/setup.ts | 2 +- packages/core/package.json | 19 +++++++++++-- packages/core/src/v3/apps/index.ts | 5 ---- .../src/v3/{apps => isomorphic}/consts.ts | 0 .../src/v3/{apps => isomorphic}/duration.ts | 0 .../src/v3/{apps => isomorphic}/friendlyId.ts | 0 packages/core/src/v3/isomorphic/index.ts | 5 ++++ .../v3/{apps => isomorphic}/maxDuration.ts | 0 .../src/v3/{apps => isomorphic}/queueName.ts | 0 pnpm-lock.yaml | 9 +++++++ 100 files changed, 313 insertions(+), 212 deletions(-) delete mode 100644 .eslintrc.js rename apps/webapp/app/services/endpoints/{performEndpointIndexService.ts => performEndpointIndexService.server.ts} (100%) rename apps/webapp/app/services/events/{sqsEventConsumer.ts => sqsEventConsumer.server.ts} (100%) rename apps/webapp/app/services/httpendpoint/{HandleHttpEndpointService.ts => HandleHttpEndpointService.server.ts} (100%) create mode 100644 internal-packages/redis-worker/tsconfig.build.json create mode 100644 internal-packages/redis-worker/tsconfig.src.json create mode 100644 internal-packages/redis-worker/tsconfig.test.json create mode 100644 internal-packages/run-engine/tsconfig.build.json create mode 100644 internal-packages/run-engine/tsconfig.src.json create mode 100644 internal-packages/run-engine/tsconfig.test.json rename packages/core/src/v3/{apps => isomorphic}/consts.ts (100%) rename packages/core/src/v3/{apps => isomorphic}/duration.ts (100%) rename packages/core/src/v3/{apps => isomorphic}/friendlyId.ts (100%) create mode 100644 packages/core/src/v3/isomorphic/index.ts rename packages/core/src/v3/{apps => isomorphic}/maxDuration.ts (100%) rename packages/core/src/v3/{apps => isomorphic}/queueName.ts (100%) diff --git a/.eslintrc.js b/.eslintrc.js deleted file mode 100644 index af28391649..0000000000 --- a/.eslintrc.js +++ /dev/null @@ -1,14 +0,0 @@ -module.exports = { - root: true, - // This tells ESLint to load the config from the package `eslint-config-custom` - extends: ["custom"], - settings: { - next: { - rootDir: ["apps/*/"], - }, - }, - parserOptions: { - sourceType: "module", - ecmaVersion: 2020, - }, -}; diff --git a/apps/webapp/.eslintrc b/apps/webapp/.eslintrc index 3211737dec..187c257f8b 100644 --- a/apps/webapp/.eslintrc +++ b/apps/webapp/.eslintrc @@ -1,10 +1,5 @@ { - "plugins": [ - "@trigger.dev/eslint-plugin", - "react-hooks", - "@typescript-eslint/eslint-plugin", - "import" - ], + "plugins": ["react-hooks", "@typescript-eslint/eslint-plugin", "import"], "parser": "@typescript-eslint/parser", "overrides": [ { diff --git a/apps/webapp/app/components/run/TriggerDetail.tsx b/apps/webapp/app/components/run/TriggerDetail.tsx index aee57b22ea..4c7a393936 100644 --- a/apps/webapp/app/components/run/TriggerDetail.tsx +++ b/apps/webapp/app/components/run/TriggerDetail.tsx @@ -11,7 +11,7 @@ import { RunPanelIconSection, RunPanelProperties, } from "./RunCard"; -import { DisplayProperty } from "@trigger.dev/core"; +import type { DisplayProperty } from "@trigger.dev/core"; export function TriggerDetail({ trigger, diff --git a/apps/webapp/app/entry.server.tsx b/apps/webapp/app/entry.server.tsx index e18038df55..08e1a92b42 100644 --- a/apps/webapp/app/entry.server.tsx +++ b/apps/webapp/app/entry.server.tsx @@ -14,7 +14,7 @@ import { OperatingSystemContextProvider, OperatingSystemPlatform, } from "./components/primitives/OperatingSystemProvider"; -import { getSharedSqsEventConsumer } from "./services/events/sqsEventConsumer"; +import { getSharedSqsEventConsumer } from "./services/events/sqsEventConsumer.server"; import { singleton } from "./utils/singleton"; const ABORT_DELAY = 30000; diff --git a/apps/webapp/app/hooks/useSyncTraceRuns.ts b/apps/webapp/app/hooks/useSyncTraceRuns.ts index 6a341636b5..3257bb6c10 100644 --- a/apps/webapp/app/hooks/useSyncTraceRuns.ts +++ b/apps/webapp/app/hooks/useSyncTraceRuns.ts @@ -1,4 +1,4 @@ -import { Prettify } from "@trigger.dev/core"; +import type { Prettify } from "@trigger.dev/core"; import { TaskRun } from "@trigger.dev/database"; import { SyncedShapeData, useSyncedShape } from "./useSyncedShape"; diff --git a/apps/webapp/app/presenters/HttpEndpointPresenter.server.ts b/apps/webapp/app/presenters/HttpEndpointPresenter.server.ts index fe791481ee..242d6478a9 100644 --- a/apps/webapp/app/presenters/HttpEndpointPresenter.server.ts +++ b/apps/webapp/app/presenters/HttpEndpointPresenter.server.ts @@ -1,7 +1,7 @@ import { z } from "zod"; import { PrismaClient, prisma } from "~/db.server"; import { sortEnvironments } from "~/utils/environmentSort"; -import { httpEndpointUrl } from "~/services/httpendpoint/HandleHttpEndpointService"; +import { httpEndpointUrl } from "~/services/httpendpoint/HandleHttpEndpointService.server"; import { getSecretStore } from "~/services/secrets/secretStore.server"; import { projectPath } from "~/utils/pathBuilder"; diff --git a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts index d679e6f163..82df22ae6a 100644 --- a/apps/webapp/app/presenters/v3/SpanPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/SpanPresenter.server.ts @@ -11,7 +11,7 @@ import { eventRepository } from "~/v3/eventRepository.server"; import { machinePresetFromName } from "~/v3/machinePresets.server"; import { FINAL_ATTEMPT_STATUSES, isFailedRunStatus, isFinalRunStatus } from "~/v3/taskStatus"; import { BasePresenter } from "./basePresenter.server"; -import { getMaxDuration } from "@trigger.dev/core/v3/apps"; +import { getMaxDuration } from "@trigger.dev/core/v3/isomorphic"; import { logger } from "~/services/logger.server"; import { getTaskEventStoreTableForRun, TaskEventStoreTable } from "~/v3/taskEventStore.server"; import { Pi } from "lucide-react"; diff --git a/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts b/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts index c4260bf312..1aed45d4d5 100644 --- a/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts +++ b/apps/webapp/app/presenters/v3/TaskListPresenter.server.ts @@ -20,7 +20,7 @@ import { logger } from "~/services/logger.server"; import { BasePresenter } from "./basePresenter.server"; import { TaskRunStatus } from "~/database-types"; import { concurrencyTracker } from "~/v3/services/taskRunConcurrencyTracker.server"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/isomorphic"; export type Task = { slug: string; diff --git a/apps/webapp/app/routes/api.v1.$endpointSlug.schedules.$id.registrations.ts b/apps/webapp/app/routes/api.v1.$endpointSlug.schedules.$id.registrations.ts index 65f0979fa2..c5b33a067c 100644 --- a/apps/webapp/app/routes/api.v1.$endpointSlug.schedules.$id.registrations.ts +++ b/apps/webapp/app/routes/api.v1.$endpointSlug.schedules.$id.registrations.ts @@ -1,6 +1,9 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { RegisterScheduleBodySchema, RegisterScheduleResponseBodySchema } from "@trigger.dev/core"; +import { + RegisterScheduleBodySchema, + RegisterScheduleResponseBodySchema, +} from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.$endpointSlug.sources.$id.ts b/apps/webapp/app/routes/api.v1.$endpointSlug.sources.$id.ts index 5b70726407..efc855e770 100644 --- a/apps/webapp/app/routes/api.v1.$endpointSlug.sources.$id.ts +++ b/apps/webapp/app/routes/api.v1.$endpointSlug.sources.$id.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { UpdateTriggerSourceBodyV1Schema } from "@trigger.dev/core"; +import { UpdateTriggerSourceBodyV1Schema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.$key.ts b/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.$key.ts index c021d3b287..9f3f3f06ad 100644 --- a/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.$key.ts +++ b/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.$key.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { RegisterTriggerBodySchemaV1 } from "@trigger.dev/core"; +import { RegisterTriggerBodySchemaV1 } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.ts b/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.ts index cd707a5c86..0827ba84db 100644 --- a/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.ts +++ b/apps/webapp/app/routes/api.v1.$endpointSlug.triggers.$id.registrations.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { InitializeTriggerBodySchema } from "@trigger.dev/core"; +import { InitializeTriggerBodySchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.accounts.$accountId.connections.$clientSlug/route.ts b/apps/webapp/app/routes/api.v1.accounts.$accountId.connections.$clientSlug/route.ts index dc2c8edfb5..03491721d4 100644 --- a/apps/webapp/app/routes/api.v1.accounts.$accountId.connections.$clientSlug/route.ts +++ b/apps/webapp/app/routes/api.v1.accounts.$accountId.connections.$clientSlug/route.ts @@ -1,6 +1,9 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { CreateExternalConnectionBodySchema, ErrorWithStackSchema } from "@trigger.dev/core"; +import { + CreateExternalConnectionBodySchema, + ErrorWithStackSchema, +} from "@trigger.dev/core/schemas"; import { z } from "zod"; import { generateErrorMessage } from "zod-error"; import { authenticateApiRequest } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/routes/api.v1.endpointindex.$indexId.ts b/apps/webapp/app/routes/api.v1.endpointindex.$indexId.ts index 614399b589..5a34cc8038 100644 --- a/apps/webapp/app/routes/api.v1.endpointindex.$indexId.ts +++ b/apps/webapp/app/routes/api.v1.endpointindex.$indexId.ts @@ -1,4 +1,7 @@ -import { GetEndpointIndexResponse, GetEndpointIndexResponseSchema } from "@trigger.dev/core"; +import { + GetEndpointIndexResponse, + GetEndpointIndexResponseSchema, +} from "@trigger.dev/core/schemas"; import { ActionFunctionArgs, json } from "@remix-run/server-runtime"; import { z } from "zod"; import { prisma } from "~/db.server"; diff --git a/apps/webapp/app/routes/api.v1.event-dispatchers.ephemeral.ts b/apps/webapp/app/routes/api.v1.event-dispatchers.ephemeral.ts index 6c9597cd52..ec15929723 100644 --- a/apps/webapp/app/routes/api.v1.event-dispatchers.ephemeral.ts +++ b/apps/webapp/app/routes/api.v1.event-dispatchers.ephemeral.ts @@ -1,14 +1,9 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { - EphemeralEventDispatcherRequestBodySchema, - InvokeJobRequestBodySchema, -} from "@trigger.dev/core"; -import { z } from "zod"; +import { EphemeralEventDispatcherRequestBodySchema } from "@trigger.dev/core/schemas"; import { PrismaErrorSchema } from "~/db.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { CreateEphemeralEventDispatcherService } from "~/services/dispatchers/createEphemeralEventDispatcher.server"; -import { InvokeJobService } from "~/services/jobs/invokeJob.server"; import { logger } from "~/services/logger.server"; export async function action({ request, params }: ActionFunctionArgs) { diff --git a/apps/webapp/app/routes/api.v1.events.$eventId.ts b/apps/webapp/app/routes/api.v1.events.$eventId.ts index 2a115aebf7..b924cc0040 100644 --- a/apps/webapp/app/routes/api.v1.events.$eventId.ts +++ b/apps/webapp/app/routes/api.v1.events.$eventId.ts @@ -1,6 +1,6 @@ import type { LoaderFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { GetEvent } from "@trigger.dev/core"; +import { GetEvent } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { prisma } from "~/db.server"; import { runOriginalStatus } from "~/models/jobRun.server"; diff --git a/apps/webapp/app/routes/api.v1.events.bulk.ts b/apps/webapp/app/routes/api.v1.events.bulk.ts index a082c7ddbc..efca90a4b3 100644 --- a/apps/webapp/app/routes/api.v1.events.bulk.ts +++ b/apps/webapp/app/routes/api.v1.events.bulk.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { SendBulkEventsBodySchema } from "@trigger.dev/core"; +import { SendBulkEventsBodySchema } from "@trigger.dev/core/schemas"; import { generateErrorMessage } from "zod-error"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { IngestSendEvent } from "~/services/events/ingestSendEvent.server"; diff --git a/apps/webapp/app/routes/api.v1.events.ts b/apps/webapp/app/routes/api.v1.events.ts index 5fe2b960c5..fcd42842e9 100644 --- a/apps/webapp/app/routes/api.v1.events.ts +++ b/apps/webapp/app/routes/api.v1.events.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { SendEventBodySchema } from "@trigger.dev/core"; +import { SendEventBodySchema } from "@trigger.dev/core/schemas"; import { generateErrorMessage } from "zod-error"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { IngestSendEvent } from "~/services/events/ingestSendEvent.server"; diff --git a/apps/webapp/app/routes/api.v1.http-endpoints.$httpEndpointId.env.$envType.$shortcode.ts b/apps/webapp/app/routes/api.v1.http-endpoints.$httpEndpointId.env.$envType.$shortcode.ts index 918167b9eb..cf9ec9c5f2 100644 --- a/apps/webapp/app/routes/api.v1.http-endpoints.$httpEndpointId.env.$envType.$shortcode.ts +++ b/apps/webapp/app/routes/api.v1.http-endpoints.$httpEndpointId.env.$envType.$shortcode.ts @@ -2,7 +2,7 @@ import type { ActionFunctionArgs, LoaderFunctionArgs } from "@remix-run/server-r import { HandleHttpEndpointService, HttpEndpointParamsSchema, -} from "~/services/httpendpoint/HandleHttpEndpointService"; +} from "~/services/httpendpoint/HandleHttpEndpointService.server"; import { logger } from "~/services/logger.server"; export async function action({ request, params }: ActionFunctionArgs) { diff --git a/apps/webapp/app/routes/api.v1.jobs.$jobSlug.invoke.ts b/apps/webapp/app/routes/api.v1.jobs.$jobSlug.invoke.ts index 35a7f5e091..2e9dc4d05d 100644 --- a/apps/webapp/app/routes/api.v1.jobs.$jobSlug.invoke.ts +++ b/apps/webapp/app/routes/api.v1.jobs.$jobSlug.invoke.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { InvokeJobRequestBodySchema } from "@trigger.dev/core"; +import { InvokeJobRequestBodySchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { PrismaErrorSchema } from "~/db.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.logs/route.ts b/apps/webapp/app/routes/api.v1.runs.$runId.logs/route.ts index 9c3f155f54..57c58d7de9 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.logs/route.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.logs/route.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { LogMessageSchema } from "@trigger.dev/core"; +import { LogMessageSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { CreateRunLogService } from "./CreateRunLogService.server"; diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.statuses.$id/route.ts b/apps/webapp/app/routes/api.v1.runs.$runId.statuses.$id/route.ts index 1da8a7b6bc..98dacf0475 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.statuses.$id/route.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.statuses.$id/route.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { JobRunStatusRecordSchema, StatusUpdateSchema } from "@trigger.dev/core"; +import { JobRunStatusRecordSchema, StatusUpdateSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.statuses.ts b/apps/webapp/app/routes/api.v1.runs.$runId.statuses.ts index aaa3f29d10..f0a640ed3c 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.statuses.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.statuses.ts @@ -1,6 +1,6 @@ import type { LoaderFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { JobRunStatusRecordSchema } from "@trigger.dev/core"; +import { JobRunStatusRecordSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { prisma } from "~/db.server"; import { runOriginalStatus } from "~/models/jobRun.server"; diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.complete/route.ts b/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.complete/route.ts index 618f015f2f..63a9eb7ce2 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.complete/route.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.complete/route.ts @@ -1,11 +1,10 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import type { CompleteTaskBodyOutput } from "@trigger.dev/core"; +import type { CompleteTaskBodyOutput } from "@trigger.dev/core/schemas"; import { - API_VERSIONS, CompleteTaskBodyInputSchema, CompleteTaskBodyV2InputSchema, -} from "@trigger.dev/core"; +} from "@trigger.dev/core/schemas"; import { z } from "zod"; import type { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; @@ -14,6 +13,11 @@ import { startActiveSpan } from "~/v3/tracer.server"; import { parseRequestJsonAsync } from "~/utils/parseRequestJson.server"; import { FailRunTaskService } from "../api.v1.runs.$runId.tasks.$id.fail/FailRunTaskService.server"; +const API_VERSIONS = { + LAZY_LOADED_CACHED_TASKS: "2023-09-29", + SERIALIZED_TASK_OUTPUT: "2023-11-01", +}; + const ParamsSchema = z.object({ runId: z.string(), id: z.string(), diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.fail/route.ts b/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.fail/route.ts index 78e1cc39d8..6b9d03d445 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.fail/route.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.tasks.$id.fail/route.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { FailTaskBodyInputSchema } from "@trigger.dev/core"; +import { FailTaskBodyInputSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v1.runs.$runId.tasks/route.ts b/apps/webapp/app/routes/api.v1.runs.$runId.tasks/route.ts index fb4f6121d6..7485d72a61 100644 --- a/apps/webapp/app/routes/api.v1.runs.$runId.tasks/route.ts +++ b/apps/webapp/app/routes/api.v1.runs.$runId.tasks/route.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { API_VERSIONS, RunTaskBodyOutputSchema } from "@trigger.dev/core"; +import { RunTaskBodyOutputSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; @@ -9,6 +9,11 @@ import { ChangeRequestLazyLoadedCachedTasks } from "./ChangeRequestLazyLoadedCac import { startActiveSpan } from "~/v3/tracer.server"; import { parseRequestJsonAsync } from "~/utils/parseRequestJson.server"; +const API_VERSIONS = { + LAZY_LOADED_CACHED_TASKS: "2023-09-29", + SERIALIZED_TASK_OUTPUT: "2023-11-01", +}; + const ParamsSchema = z.object({ runId: z.string(), }); diff --git a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts index e7109f67b2..ace2e80cf6 100644 --- a/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts +++ b/apps/webapp/app/routes/api.v1.waitpoints.tokens.$waitpointFriendlyId.complete.ts @@ -5,7 +5,7 @@ import { conditionallyExportPacket, stringifyIO, } from "@trigger.dev/core/v3"; -import { WaitpointId } from "@trigger.dev/core/v3/apps"; +import { WaitpointId } from "@trigger.dev/core/v3/isomorphic"; import { z } from "zod"; import { $replica } from "~/db.server"; import { env } from "~/env.server"; diff --git a/apps/webapp/app/routes/api.v1.waitpoints.tokens.ts b/apps/webapp/app/routes/api.v1.waitpoints.tokens.ts index 7603551fe1..23da42ca0b 100644 --- a/apps/webapp/app/routes/api.v1.waitpoints.tokens.ts +++ b/apps/webapp/app/routes/api.v1.waitpoints.tokens.ts @@ -3,7 +3,7 @@ import { CreateWaitpointTokenRequestBody, CreateWaitpointTokenResponseBody, } from "@trigger.dev/core/v3"; -import { WaitpointId } from "@trigger.dev/core/v3/apps"; +import { WaitpointId } from "@trigger.dev/core/v3/isomorphic"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; import { parseDelay } from "~/utils/delays"; import { resolveIdempotencyKeyTTL } from "~/utils/idempotencyKeys.server"; diff --git a/apps/webapp/app/routes/api.v1.webhooks.$key.ts b/apps/webapp/app/routes/api.v1.webhooks.$key.ts index 572898aaae..c1d943a6a1 100644 --- a/apps/webapp/app/routes/api.v1.webhooks.$key.ts +++ b/apps/webapp/app/routes/api.v1.webhooks.$key.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { UpdateWebhookBodySchema } from "@trigger.dev/core"; +import { UpdateWebhookBodySchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v2.$endpointSlug.sources.$id.ts b/apps/webapp/app/routes/api.v2.$endpointSlug.sources.$id.ts index 40d5761111..b746e34e69 100644 --- a/apps/webapp/app/routes/api.v2.$endpointSlug.sources.$id.ts +++ b/apps/webapp/app/routes/api.v2.$endpointSlug.sources.$id.ts @@ -1,6 +1,6 @@ import type { ActionFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { UpdateTriggerSourceBodyV2Schema } from "@trigger.dev/core"; +import { UpdateTriggerSourceBodyV2Schema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/api.v2.$endpointSlug.triggers.$id.registrations.$key.ts b/apps/webapp/app/routes/api.v2.$endpointSlug.triggers.$id.registrations.$key.ts index a59ae2fd50..ace9aac30d 100644 --- a/apps/webapp/app/routes/api.v2.$endpointSlug.triggers.$id.registrations.$key.ts +++ b/apps/webapp/app/routes/api.v2.$endpointSlug.triggers.$id.registrations.$key.ts @@ -4,7 +4,7 @@ import { REGISTER_SOURCE_EVENT_V2, RegisterSourceEventV2, RegisterTriggerBodySchemaV2, -} from "@trigger.dev/core"; +} from "@trigger.dev/core/schemas"; import { z } from "zod"; import { authenticateApiRequest } from "~/services/apiAuth.server"; import { IngestSendEvent } from "~/services/events/ingestSendEvent.server"; diff --git a/apps/webapp/app/routes/api.v2.events.$eventId.ts b/apps/webapp/app/routes/api.v2.events.$eventId.ts index b56b8bf896..88447e5819 100644 --- a/apps/webapp/app/routes/api.v2.events.$eventId.ts +++ b/apps/webapp/app/routes/api.v2.events.$eventId.ts @@ -1,6 +1,6 @@ import type { LoaderFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { GetEvent } from "@trigger.dev/core"; +import { GetEvent } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { $replica } from "~/db.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/routes/api.v2.runs.$runId.statuses.ts b/apps/webapp/app/routes/api.v2.runs.$runId.statuses.ts index c3dfada960..ba23a2a28b 100644 --- a/apps/webapp/app/routes/api.v2.runs.$runId.statuses.ts +++ b/apps/webapp/app/routes/api.v2.runs.$runId.statuses.ts @@ -1,6 +1,6 @@ import type { LoaderFunctionArgs } from "@remix-run/server-runtime"; import { json } from "@remix-run/server-runtime"; -import { JobRunStatusRecordSchema } from "@trigger.dev/core"; +import { JobRunStatusRecordSchema } from "@trigger.dev/core/schemas"; import { z } from "zod"; import { prisma } from "~/db.server"; import { authenticateApiRequest } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/routes/engine.v1.dev.dequeue.ts b/apps/webapp/app/routes/engine.v1.dev.dequeue.ts index 0f10c3dee1..048ccdbca7 100644 --- a/apps/webapp/app/routes/engine.v1.dev.dequeue.ts +++ b/apps/webapp/app/routes/engine.v1.dev.dequeue.ts @@ -1,6 +1,6 @@ import { json } from "@remix-run/server-runtime"; import { DequeuedMessage, DevDequeueRequestBody, MachineResources } from "@trigger.dev/core/v3"; -import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/isomorphic"; import { env } from "~/env.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; import { engine } from "~/v3/runEngine.server"; diff --git a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts index d804f49b79..9bdc4d16d2 100644 --- a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts +++ b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.logs.debug.ts @@ -1,6 +1,6 @@ import { TypedResponse } from "@remix-run/server-runtime"; -import { assertExhaustive } from "@trigger.dev/core"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { assertExhaustive } from "@trigger.dev/core/utils"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import { WorkerApiDebugLogBody, WorkerApiRunAttemptStartResponseBody, diff --git a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts index 13752e36fa..da4bab693b 100644 --- a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts +++ b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.complete.ts @@ -1,18 +1,13 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { assertExhaustive } from "@trigger.dev/core"; -import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; +import { RunId, SnapshotId } from "@trigger.dev/core/v3/isomorphic"; import { - WorkerApiDebugLogBody, WorkerApiRunAttemptCompleteRequestBody, WorkerApiRunAttemptCompleteResponseBody, - WorkerApiRunAttemptStartResponseBody, - WorkloadHeartbeatResponseBody, } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { prisma } from "~/db.server"; import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { recordRunDebugLog } from "~/v3/eventRepository.server"; import { engine } from "~/v3/runEngine.server"; const { action } = createActionApiRoute( diff --git a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts index 25787eee05..de214064a8 100644 --- a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts +++ b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.attempts.start.ts @@ -1,6 +1,6 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { MachinePreset } from "@trigger.dev/core/v3"; -import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; +import { RunId, SnapshotId } from "@trigger.dev/core/v3/isomorphic"; import { WorkerApiRunAttemptStartRequestBody, WorkerApiRunAttemptStartResponseBody, diff --git a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts index 2bf88ff1ac..bab59fd063 100644 --- a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts +++ b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.$snapshotFriendlyId.heartbeat.ts @@ -1,16 +1,10 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { assertExhaustive } from "@trigger.dev/core"; -import { RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; -import { - WorkerApiDebugLogBody, - WorkerApiRunAttemptStartResponseBody, - WorkloadHeartbeatResponseBody, -} from "@trigger.dev/core/v3/workers"; +import { RunId, SnapshotId } from "@trigger.dev/core/v3/isomorphic"; +import { WorkloadHeartbeatResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { prisma } from "~/db.server"; import { logger } from "~/services/logger.server"; import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server"; -import { recordRunDebugLog } from "~/v3/eventRepository.server"; import { engine } from "~/v3/runEngine.server"; const { action } = createActionApiRoute( diff --git a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts index 91664888a0..60505460bd 100644 --- a/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts +++ b/apps/webapp/app/routes/engine.v1.dev.runs.$runFriendlyId.snapshots.latest.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import { WorkerApiRunLatestSnapshotResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { prisma } from "~/db.server"; diff --git a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts index 04ef8c5aae..8e11d4d626 100644 --- a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts +++ b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.wait.duration.ts @@ -1,6 +1,6 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; import { WaitForDurationRequestBody, WaitForDurationResponseBody } from "@trigger.dev/core/v3"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import { z } from "zod"; import { prisma } from "~/db.server"; diff --git a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts index e34e25529f..b20d0fd22d 100644 --- a/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts +++ b/apps/webapp/app/routes/engine.v1.runs.$runFriendlyId.waitpoints.tokens.$waitpointFriendlyId.wait.ts @@ -1,6 +1,6 @@ import { json } from "@remix-run/server-runtime"; import { WaitForWaitpointTokenResponseBody } from "@trigger.dev/core/v3"; -import { RunId, WaitpointId } from "@trigger.dev/core/v3/apps"; +import { RunId, WaitpointId } from "@trigger.dev/core/v3/isomorphic"; import { z } from "zod"; import { $replica } from "~/db.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts b/apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts index fbfa194662..76b0f0d3f9 100644 --- a/apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts +++ b/apps/webapp/app/routes/engine.v1.worker-actions.deployments.$deploymentFriendlyId.dequeue.ts @@ -1,5 +1,5 @@ import { json, TypedResponse } from "@remix-run/server-runtime"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/isomorphic"; import { WorkerApiDequeueResponseBody } from "@trigger.dev/core/v3/workers"; import { z } from "zod"; import { $replica, prisma } from "~/db.server"; diff --git a/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts index 2ec26906b7..a814ae257f 100644 --- a/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts +++ b/apps/webapp/app/routes/engine.v1.worker-actions.runs.$runFriendlyId.logs.debug.ts @@ -1,5 +1,5 @@ -import { assertExhaustive } from "@trigger.dev/core"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { assertExhaustive } from "@trigger.dev/core/utils"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import { WorkerApiDebugLogBody } from "@trigger.dev/core/v3/runEngineWorker"; import { z } from "zod"; import { createActionWorkerApiRoute } from "~/services/routeBuilders/apiBuilder.server"; diff --git a/apps/webapp/app/routes/resources.batches.$batchId.check-completion.ts b/apps/webapp/app/routes/resources.batches.$batchId.check-completion.ts index bd1eff8b1e..5fff969e6a 100644 --- a/apps/webapp/app/routes/resources.batches.$batchId.check-completion.ts +++ b/apps/webapp/app/routes/resources.batches.$batchId.check-completion.ts @@ -1,6 +1,6 @@ import { parse } from "@conform-to/zod"; import { ActionFunction, json } from "@remix-run/node"; -import { assertExhaustive } from "@trigger.dev/core"; +import { assertExhaustive } from "@trigger.dev/core/utils"; import { z } from "zod"; import { redirectWithErrorMessage, redirectWithSuccessMessage } from "~/models/message.server"; import { logger } from "~/services/logger.server"; diff --git a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx index 56ebef0726..d5d0fa3689 100644 --- a/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx +++ b/apps/webapp/app/routes/resources.orgs.$organizationSlug.projects.$projectParam.waitpoints.$waitpointFriendlyId.complete/route.tsx @@ -8,8 +8,8 @@ import { stringifyIO, timeoutError, } from "@trigger.dev/core/v3"; -import { WaitpointId } from "@trigger.dev/core/v3/apps"; -import { Waitpoint } from "@trigger.dev/database"; +import { WaitpointId } from "@trigger.dev/core/v3/isomorphic"; +import type { Waitpoint } from "@trigger.dev/database"; import { useCallback, useRef } from "react"; import { z } from "zod"; import { AnimatedHourglassIcon } from "~/assets/icons/AnimatedHourglassIcon"; diff --git a/apps/webapp/app/services/endpoints/indexEndpoint.server.ts b/apps/webapp/app/services/endpoints/indexEndpoint.server.ts index 27df22c4fc..62e3bf8868 100644 --- a/apps/webapp/app/services/endpoints/indexEndpoint.server.ts +++ b/apps/webapp/app/services/endpoints/indexEndpoint.server.ts @@ -1,6 +1,6 @@ import type { EndpointIndexSource } from "@trigger.dev/database"; import { PrismaClient, prisma } from "~/db.server"; -import { PerformEndpointIndexService } from "./performEndpointIndexService"; +import { PerformEndpointIndexService } from "./performEndpointIndexService.server"; export class IndexEndpointService { #prismaClient: PrismaClient; diff --git a/apps/webapp/app/services/endpoints/performEndpointIndexService.ts b/apps/webapp/app/services/endpoints/performEndpointIndexService.server.ts similarity index 100% rename from apps/webapp/app/services/endpoints/performEndpointIndexService.ts rename to apps/webapp/app/services/endpoints/performEndpointIndexService.server.ts diff --git a/apps/webapp/app/services/events/sqsEventConsumer.ts b/apps/webapp/app/services/events/sqsEventConsumer.server.ts similarity index 100% rename from apps/webapp/app/services/events/sqsEventConsumer.ts rename to apps/webapp/app/services/events/sqsEventConsumer.server.ts diff --git a/apps/webapp/app/services/httpendpoint/HandleHttpEndpointService.ts b/apps/webapp/app/services/httpendpoint/HandleHttpEndpointService.server.ts similarity index 100% rename from apps/webapp/app/services/httpendpoint/HandleHttpEndpointService.ts rename to apps/webapp/app/services/httpendpoint/HandleHttpEndpointService.server.ts diff --git a/apps/webapp/app/services/triggers/registerWebhook.server.ts b/apps/webapp/app/services/triggers/registerWebhook.server.ts index f5e004f86b..ff8bddc778 100644 --- a/apps/webapp/app/services/triggers/registerWebhook.server.ts +++ b/apps/webapp/app/services/triggers/registerWebhook.server.ts @@ -6,7 +6,7 @@ import { Prisma, WebhookEnvironment } from "@trigger.dev/database"; import { ulid } from "../ulid.server"; import { getSecretStore } from "../secrets/secretStore.server"; import { z } from "zod"; -import { httpEndpointUrl } from "../httpendpoint/HandleHttpEndpointService"; +import { httpEndpointUrl } from "../httpendpoint/HandleHttpEndpointService.server"; import { isEqual } from "ohash"; type ExtendedWebhook = Prisma.WebhookGetPayload<{ diff --git a/apps/webapp/app/services/worker.server.ts b/apps/webapp/app/services/worker.server.ts index 7d42dd7453..9a6bfe5666 100644 --- a/apps/webapp/app/services/worker.server.ts +++ b/apps/webapp/app/services/worker.server.ts @@ -25,7 +25,7 @@ import { ExpireDispatcherService } from "./dispatchers/expireDispatcher.server"; import { InvokeEphemeralDispatcherService } from "./dispatchers/invokeEphemeralEventDispatcher.server"; import { sendEmail } from "./email.server"; import { IndexEndpointService } from "./endpoints/indexEndpoint.server"; -import { PerformEndpointIndexService } from "./endpoints/performEndpointIndexService"; +import { PerformEndpointIndexService } from "./endpoints/performEndpointIndexService.server"; import { ProbeEndpointService } from "./endpoints/probeEndpoint.server"; import { RecurringEndpointIndexService } from "./endpoints/recurringEndpointIndex.server"; import { DeliverEventService } from "./events/deliverEvent.server"; diff --git a/apps/webapp/app/utils/delays.ts b/apps/webapp/app/utils/delays.ts index eaa296e11b..31bdd7518f 100644 --- a/apps/webapp/app/utils/delays.ts +++ b/apps/webapp/app/utils/delays.ts @@ -1,4 +1,4 @@ -import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; +import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/isomorphic"; export const calculateDurationInMs = (options: { seconds?: number; diff --git a/apps/webapp/app/v3/friendlyIdentifiers.ts b/apps/webapp/app/v3/friendlyIdentifiers.ts index b545fb3431..c671b3361b 100644 --- a/apps/webapp/app/v3/friendlyIdentifiers.ts +++ b/apps/webapp/app/v3/friendlyIdentifiers.ts @@ -1 +1 @@ -export { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +export { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; diff --git a/apps/webapp/app/v3/handleSocketIo.server.ts b/apps/webapp/app/v3/handleSocketIo.server.ts index 3290a67ed8..5ef5a47ce9 100644 --- a/apps/webapp/app/v3/handleSocketIo.server.ts +++ b/apps/webapp/app/v3/handleSocketIo.server.ts @@ -9,7 +9,7 @@ import { ProviderToPlatformMessages, SharedQueueToClientMessages, } from "@trigger.dev/core/v3"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import type { WorkerClientToServerEvents, WorkerServerToClientEvents, diff --git a/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts b/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts index 8d0b4db9d5..e72ede2398 100644 --- a/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts +++ b/apps/webapp/app/v3/marqs/devQueueConsumer.server.ts @@ -19,7 +19,7 @@ import { FailedTaskRunService } from "../failedTaskRun.server"; import { CancelDevSessionRunsService } from "../services/cancelDevSessionRuns.server"; import { CompleteAttemptService } from "../services/completeAttempt.server"; import { attributesFromAuthenticatedEnv, tracer } from "../tracer.server"; -import { getMaxDuration } from "@trigger.dev/core/v3/apps"; +import { getMaxDuration } from "@trigger.dev/core/v3/isomorphic"; import { DevSubscriber, devPubSub } from "./devPubSub.server"; import { findQueueInEnvironment, sanitizeQueueName } from "~/models/taskQueue.server"; import { createRedisClient, RedisClient } from "~/redis.server"; diff --git a/apps/webapp/app/v3/models/workerDeployment.server.ts b/apps/webapp/app/v3/models/workerDeployment.server.ts index 37d5cae111..ef44f0e748 100644 --- a/apps/webapp/app/v3/models/workerDeployment.server.ts +++ b/apps/webapp/app/v3/models/workerDeployment.server.ts @@ -3,7 +3,7 @@ import { BackgroundWorker, WorkerDeployment } from "@trigger.dev/database"; import { CURRENT_DEPLOYMENT_LABEL, CURRENT_UNMANAGED_DEPLOYMENT_LABEL, -} from "@trigger.dev/core/v3/apps"; +} from "@trigger.dev/core/v3/isomorphic"; import { Prisma, prisma } from "~/db.server"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; diff --git a/apps/webapp/app/v3/runEngineHandlers.server.ts b/apps/webapp/app/v3/runEngineHandlers.server.ts index 4bd833974f..0663e76537 100644 --- a/apps/webapp/app/v3/runEngineHandlers.server.ts +++ b/apps/webapp/app/v3/runEngineHandlers.server.ts @@ -12,7 +12,7 @@ import { reportInvocationUsage } from "~/services/platform.v3.server"; import { roomFromFriendlyRunId, socketIo } from "./handleSocketIo.server"; import { engine } from "./runEngine.server"; import { PerformTaskRunAlertsService } from "./services/alerts/performTaskRunAlerts.server"; -import { RunId } from "@trigger.dev/core/v3/apps"; +import { RunId } from "@trigger.dev/core/v3/isomorphic"; import { updateMetadataService } from "~/services/metadata/updateMetadata.server"; import { findEnvironmentFromRun } from "~/models/runtimeEnvironment.server"; import { env } from "~/env.server"; diff --git a/apps/webapp/app/v3/services/batchTriggerV4.server.ts b/apps/webapp/app/v3/services/batchTriggerV4.server.ts index aaa945f915..1434a8f915 100644 --- a/apps/webapp/app/v3/services/batchTriggerV4.server.ts +++ b/apps/webapp/app/v3/services/batchTriggerV4.server.ts @@ -6,7 +6,7 @@ import { packetRequiresOffloading, parsePacket, } from "@trigger.dev/core/v3"; -import { BatchId, RunId } from "@trigger.dev/core/v3/apps"; +import { BatchId, RunId } from "@trigger.dev/core/v3/isomorphic"; import { BatchTaskRun, Prisma } from "@trigger.dev/database"; import { z } from "zod"; import { $transaction, prisma, PrismaClientOrTransaction } from "~/db.server"; diff --git a/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts b/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts index a5740bfe90..bf7968e91e 100644 --- a/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts +++ b/apps/webapp/app/v3/services/changeCurrentDeployment.server.ts @@ -2,7 +2,7 @@ import { WorkerDeployment } from "@trigger.dev/database"; import { BaseService, ServiceValidationError } from "./baseService.server"; import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; import { compareDeploymentVersions } from "../utils/deploymentVersions"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/isomorphic"; export type ChangeCurrentDeploymentDirection = "promote" | "rollback"; diff --git a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts index fb5e6eeef0..d66f1102de 100644 --- a/apps/webapp/app/v3/services/createBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createBackgroundWorker.server.ts @@ -21,7 +21,7 @@ import { updateEnvConcurrencyLimits, updateQueueConcurrencyLimits, } from "../runQueue.server"; -import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/isomorphic"; import { sanitizeQueueName } from "~/models/taskQueue.server"; export class CreateBackgroundWorkerService extends BaseService { diff --git a/apps/webapp/app/v3/services/createCheckpoint.server.ts b/apps/webapp/app/v3/services/createCheckpoint.server.ts index 85f6eb8192..54419aac54 100644 --- a/apps/webapp/app/v3/services/createCheckpoint.server.ts +++ b/apps/webapp/app/v3/services/createCheckpoint.server.ts @@ -8,7 +8,7 @@ import { BaseService } from "./baseService.server"; import { CreateCheckpointRestoreEventService } from "./createCheckpointRestoreEvent.server"; import { ResumeBatchRunService } from "./resumeBatchRun.server"; import { ResumeDependentParentsService } from "./resumeDependentParents.server"; -import { CheckpointId } from "@trigger.dev/core/v3/apps"; +import { CheckpointId } from "@trigger.dev/core/v3/isomorphic"; export class CreateCheckpointService extends BaseService { public async call( diff --git a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts index 2f378ffdc1..d3ff4e6c72 100644 --- a/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeployedBackgroundWorker.server.ts @@ -1,6 +1,5 @@ import { CreateBackgroundWorkerRequestBody } from "@trigger.dev/core/v3"; import type { BackgroundWorker } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { socketIo } from "../handleSocketIo.server"; @@ -11,7 +10,7 @@ import { createBackgroundTasks, syncDeclarativeSchedules } from "./createBackgro import { ExecuteTasksWaitingForDeployService } from "./executeTasksWaitingForDeploy"; import { projectPubSub } from "./projectPubSub.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; -import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL, BackgroundWorkerId } from "@trigger.dev/core/v3/isomorphic"; export class CreateDeployedBackgroundWorkerService extends BaseService { public async call( diff --git a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts index 53f2dedd88..e502e1cdaa 100644 --- a/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts +++ b/apps/webapp/app/v3/services/createDeploymentBackgroundWorker.server.ts @@ -9,7 +9,7 @@ import { syncDeclarativeSchedules, } from "./createBackgroundWorker.server"; import { TimeoutDeploymentService } from "./timeoutDeployment.server"; -import { BackgroundWorkerId } from "@trigger.dev/core/v3/apps"; +import { BackgroundWorkerId } from "@trigger.dev/core/v3/isomorphic"; export class CreateDeploymentBackgroundWorkerService extends BaseService { public async call( diff --git a/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts b/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts index fd0f22f3c2..be655bfdaa 100644 --- a/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts +++ b/apps/webapp/app/v3/services/enqueueDelayedRun.server.ts @@ -1,4 +1,4 @@ -import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/apps"; +import { parseNaturalLanguageDuration } from "@trigger.dev/core/v3/isomorphic"; import { $transaction } from "~/db.server"; import { logger } from "~/services/logger.server"; import { marqs } from "~/v3/marqs/index.server"; diff --git a/apps/webapp/app/v3/services/finalizeDeployment.server.ts b/apps/webapp/app/v3/services/finalizeDeployment.server.ts index a69b21c02e..3389c21137 100644 --- a/apps/webapp/app/v3/services/finalizeDeployment.server.ts +++ b/apps/webapp/app/v3/services/finalizeDeployment.server.ts @@ -1,5 +1,4 @@ import { FinalizeDeploymentRequestBody } from "@trigger.dev/core/v3/schemas"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; import { AuthenticatedEnvironment } from "~/services/apiAuth.server"; import { logger } from "~/services/logger.server"; import { socketIo } from "../handleSocketIo.server"; diff --git a/apps/webapp/app/v3/services/triggerTaskV1.server.ts b/apps/webapp/app/v3/services/triggerTaskV1.server.ts index e8a89c00ec..c8d85465d4 100644 --- a/apps/webapp/app/v3/services/triggerTaskV1.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV1.server.ts @@ -11,7 +11,7 @@ import { parseNaturalLanguageDuration, sanitizeQueueName, stringifyDuration, -} from "@trigger.dev/core/v3/apps"; +} from "@trigger.dev/core/v3/isomorphic"; import { Prisma } from "@trigger.dev/database"; import { env } from "~/env.server"; import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; diff --git a/apps/webapp/app/v3/services/triggerTaskV2.server.ts b/apps/webapp/app/v3/services/triggerTaskV2.server.ts index 592b92b688..1d1f6cb558 100644 --- a/apps/webapp/app/v3/services/triggerTaskV2.server.ts +++ b/apps/webapp/app/v3/services/triggerTaskV2.server.ts @@ -6,7 +6,12 @@ import { SemanticInternalAttributes, TriggerTaskRequestBody, } from "@trigger.dev/core/v3"; -import { BatchId, RunId, sanitizeQueueName, stringifyDuration } from "@trigger.dev/core/v3/apps"; +import { + BatchId, + RunId, + sanitizeQueueName, + stringifyDuration, +} from "@trigger.dev/core/v3/isomorphic"; import { Prisma, TaskRun } from "@trigger.dev/database"; import { env } from "~/env.server"; import { createTag, MAX_TAGS_PER_RUN } from "~/models/taskRunTag.server"; diff --git a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts index 1c58efcf6a..d7a8a10d7d 100644 --- a/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts +++ b/apps/webapp/app/v3/services/worker/workerGroupTokenService.server.ts @@ -24,7 +24,10 @@ import { env } from "~/env.server"; import { $transaction } from "~/db.server"; import { resolveVariablesForEnvironment } from "~/v3/environmentVariables/environmentVariablesRepository.server"; import { generateJWTTokenForEnvironment } from "~/services/apiAuth.server"; -import { CURRENT_UNMANAGED_DEPLOYMENT_LABEL, fromFriendlyId } from "@trigger.dev/core/v3/apps"; +import { + CURRENT_UNMANAGED_DEPLOYMENT_LABEL, + fromFriendlyId, +} from "@trigger.dev/core/v3/isomorphic"; import { machinePresetFromName } from "~/v3/machinePresets.server"; import { defaultMachine } from "@trigger.dev/platform/v3"; diff --git a/apps/webapp/remix.config.js b/apps/webapp/remix.config.js index 519c134a98..eb290765e8 100644 --- a/apps/webapp/remix.config.js +++ b/apps/webapp/remix.config.js @@ -9,15 +9,12 @@ module.exports = { serverModuleFormat: "cjs", serverDependenciesToBundle: [ /^remix-utils.*/, + /^@internal\//, // Bundle all internal packages + /^@trigger\.dev\//, // Bundle all trigger packages "marked", "axios", - "@internal/redis-worker", "p-limit", "yocto-queue", - "@trigger.dev/core", - "@trigger.dev/sdk", - "@trigger.dev/platform", - "@trigger.dev/yalt", "@unkey/cache", "@unkey/cache/stores", "emails", @@ -26,12 +23,6 @@ module.exports = { "superjson", "prismjs/components/prism-json", "prismjs/components/prism-typescript", - "@internal/run-engine", - "@internal/redis", - "@internal/tracing", ], browserNodeBuiltinsPolyfill: { modules: { path: true, os: true, crypto: true } }, - watchPaths: async () => { - return ["../../packages/core/src/**/*", "../../packages/emails/src/**/*"]; - }, }; diff --git a/apps/webapp/tsconfig.json b/apps/webapp/tsconfig.json index c38fbe44f0..5c80d471ea 100644 --- a/apps/webapp/tsconfig.json +++ b/apps/webapp/tsconfig.json @@ -20,30 +20,9 @@ "baseUrl": ".", "paths": { "~/*": ["./app/*"], - "@/*": ["./*"], - "@trigger.dev/sdk": ["../../packages/trigger-sdk/src/index"], - "@trigger.dev/sdk/*": ["../../packages/trigger-sdk/src/*"], - "@trigger.dev/core": ["../../packages/core/src/index"], - "@trigger.dev/core/*": ["../../packages/core/src/*"], - "@trigger.dev/database": ["../../internal-packages/database/src/index"], - "@trigger.dev/database/*": ["../../internal-packages/database/src/*"], - "@trigger.dev/yalt": ["../../packages/yalt/src/index"], - "@trigger.dev/yalt/*": ["../../packages/yalt/src/*"], - "@trigger.dev/otlp-importer": ["../../internal-packages/otlp-importer/src/index"], - "@trigger.dev/otlp-importer/*": ["../../internal-packages/otlp-importer/src/*"], - "emails": ["../../internal-packages/emails/src/index"], - "emails/*": ["../../internal-packages/emails/src/*"], - "@internal/zod-worker": ["../../internal-packages/zod-worker/src/index"], - "@internal/zod-worker/*": ["../../internal-packages/zod-worker/src/*"], - "@internal/run-engine": ["../../internal-packages/run-engine/src/index"], - "@internal/run-engine/*": ["../../internal-packages/run-engine/src/*"], - "@internal/redis-worker": ["../../internal-packages/redis-worker/src/index"], - "@internal/redis-worker/*": ["../../internal-packages/redis-worker/src/*"], - "@internal/redis": ["../../internal-packages/redis/src/index"], - "@internal/redis/*": ["../../internal-packages/redis/src/*"], - "@internal/tracing": ["../../internal-packages/tracing/src/index"], - "@internal/tracing/*": ["../../internal-packages/tracing/src/*"] + "@/*": ["./*"] }, - "noEmit": true + "noEmit": true, + "customConditions": ["@triggerdotdev/source"] } } diff --git a/internal-packages/database/package.json b/internal-packages/database/package.json index a170b10cec..be3a505fdb 100644 --- a/internal-packages/database/package.json +++ b/internal-packages/database/package.json @@ -2,21 +2,25 @@ "name": "@trigger.dev/database", "private": true, "version": "0.0.2", - "main": "./src/index.ts", - "types": "./src/index.ts", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", "dependencies": { "@prisma/client": "5.4.1" }, "devDependencies": { - "prisma": "5.4.1" + "prisma": "5.4.1", + "rimraf": "6.0.1" }, "scripts": { + "clean": "rimraf dist", "generate": "prisma generate", "db:migrate:dev:create": "prisma migrate dev --create-only", "db:migrate:deploy": "prisma migrate deploy", "db:push": "prisma db push", "db:studio": "prisma studio", "db:reset": "prisma migrate reset", - "typecheck": "tsc --noEmit" + "typecheck": "tsc --noEmit", + "build": "pnpm run clean && tsc --noEmit false --outDir dist --declaration", + "dev": "tsc --noEmit false --outDir dist --declaration --watch" } -} +} \ No newline at end of file diff --git a/internal-packages/redis-worker/package.json b/internal-packages/redis-worker/package.json index 4d25a44d4c..19e6efc8da 100644 --- a/internal-packages/redis-worker/package.json +++ b/internal-packages/redis-worker/package.json @@ -2,9 +2,17 @@ "name": "@internal/redis-worker", "private": true, "version": "0.0.1", - "main": "./src/index.ts", - "types": "./src/index.ts", + "main": "./dist/src/index.js", + "types": "./dist/src/index.d.ts", "type": "module", + "exports": { + ".": { + "@triggerdotdev/source": "./src/index.ts", + "import": "./dist/src/index.js", + "types": "./dist/src/index.d.ts", + "default": "./dist/src/index.js" + } + }, "dependencies": { "@internal/tracing": "workspace:*", "@internal/redis": "workspace:*", @@ -17,10 +25,14 @@ "devDependencies": { "@internal/testcontainers": "workspace:*", "@types/lodash.omit": "^4.5.7", - "vitest": "^1.4.0" + "vitest": "^1.4.0", + "rimraf": "6.0.1" }, "scripts": { - "typecheck": "tsc --noEmit", - "test": "vitest --no-file-parallelism" + "clean": "rimraf dist", + "typecheck": "tsc --noEmit -p tsconfig.build.json", + "test": "vitest --sequence.concurrent=false --no-file-parallelism", + "build": "pnpm run clean && tsc -p tsconfig.build.json", + "dev": "tsc --watch -p tsconfig.build.json" } } \ No newline at end of file diff --git a/internal-packages/redis-worker/tsconfig.build.json b/internal-packages/redis-worker/tsconfig.build.json new file mode 100644 index 0000000000..619461da80 --- /dev/null +++ b/internal-packages/redis-worker/tsconfig.build.json @@ -0,0 +1,21 @@ +{ + "include": ["src/**/*.ts"], + "exclude": ["src/**/*.test.ts"], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "outDir": "dist", + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true, + "declaration": true + } +} diff --git a/internal-packages/redis-worker/tsconfig.json b/internal-packages/redis-worker/tsconfig.json index be98ae04b8..af630abe1f 100644 --- a/internal-packages/redis-worker/tsconfig.json +++ b/internal-packages/redis-worker/tsconfig.json @@ -1,27 +1,8 @@ { + "references": [{ "path": "./tsconfig.src.json" }, { "path": "./tsconfig.test.json" }], "compilerOptions": { - "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "Node16", "moduleResolution": "Node16", - "moduleDetection": "force", - "verbatimModuleSyntax": false, - "types": ["vitest/globals"], - "esModuleInterop": true, - "forceConsistentCasingInFileNames": true, - "isolatedModules": true, - "preserveWatchOutput": true, - "skipLibCheck": true, - "noEmit": true, - "strict": true, - "paths": { - "@internal/testcontainers": ["../../internal-packages/testcontainers/src/index"], - "@internal/testcontainers/*": ["../../internal-packages/testcontainers/src/*"], - "@trigger.dev/core": ["../../packages/core/src/index"], - "@trigger.dev/core/*": ["../../packages/core/src/*"], - "@internal/redis": ["../../internal-packages/redis/src/index"], - "@internal/redis/*": ["../../internal-packages/redis/src/*"] - } - }, - "exclude": ["node_modules"] + "module": "Node16", + "customConditions": ["@triggerdotdev/source"] + } } diff --git a/internal-packages/redis-worker/tsconfig.src.json b/internal-packages/redis-worker/tsconfig.src.json new file mode 100644 index 0000000000..5617aa970c --- /dev/null +++ b/internal-packages/redis-worker/tsconfig.src.json @@ -0,0 +1,19 @@ +{ + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "src/**/*.test.ts"], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true + } +} diff --git a/internal-packages/redis-worker/tsconfig.test.json b/internal-packages/redis-worker/tsconfig.test.json new file mode 100644 index 0000000000..b68d234bd7 --- /dev/null +++ b/internal-packages/redis-worker/tsconfig.test.json @@ -0,0 +1,20 @@ +{ + "include": ["src/**/*.test.ts"], + "references": [{ "path": "./tsconfig.src.json" }], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "types": ["vitest/globals"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true + } +} diff --git a/internal-packages/run-engine/package.json b/internal-packages/run-engine/package.json index 3c440c3857..450ea1cb06 100644 --- a/internal-packages/run-engine/package.json +++ b/internal-packages/run-engine/package.json @@ -2,9 +2,17 @@ "name": "@internal/run-engine", "private": true, "version": "0.0.1", - "main": "./src/index.ts", - "types": "./src/index.ts", + "main": "./dist/src/index.js", + "types": "./dist/src/index.d.ts", "type": "module", + "exports": { + ".": { + "@triggerdotdev/source": "./src/index.ts", + "import": "./dist/src/index.js", + "types": "./dist/src/index.d.ts", + "default": "./dist/src/index.js" + } + }, "dependencies": { "@internal/redis": "workspace:*", "@internal/redis-worker": "workspace:*", @@ -21,10 +29,14 @@ "devDependencies": { "@internal/testcontainers": "workspace:*", "vitest": "^1.4.0", - "@types/seedrandom": "^3.0.8" + "@types/seedrandom": "^3.0.8", + "rimraf": "6.0.1" }, "scripts": { - "typecheck": "tsc --noEmit", - "test": "vitest --sequence.concurrent=false" + "clean": "rimraf dist", + "typecheck": "tsc --noEmit -p tsconfig.build.json", + "test": "vitest --sequence.concurrent=false --no-file-parallelism", + "build": "pnpm run clean && tsc -p tsconfig.build.json", + "dev": "tsc --watch -p tsconfig.build.json" } } \ No newline at end of file diff --git a/internal-packages/run-engine/src/engine/db/worker.ts b/internal-packages/run-engine/src/engine/db/worker.ts index 8cccc9a4fa..2c3264615c 100644 --- a/internal-packages/run-engine/src/engine/db/worker.ts +++ b/internal-packages/run-engine/src/engine/db/worker.ts @@ -5,7 +5,7 @@ import { PrismaClientOrTransaction, WorkerDeployment, } from "@trigger.dev/database"; -import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/apps"; +import { CURRENT_DEPLOYMENT_LABEL } from "@trigger.dev/core/v3/isomorphic"; type RunWithMininimalEnvironment = Prisma.TaskRunGetPayload<{ include: { diff --git a/internal-packages/run-engine/src/engine/executionSnapshots.ts b/internal-packages/run-engine/src/engine/executionSnapshots.ts index 5daca4f419..3f2cec09a3 100644 --- a/internal-packages/run-engine/src/engine/executionSnapshots.ts +++ b/internal-packages/run-engine/src/engine/executionSnapshots.ts @@ -1,5 +1,5 @@ import { CompletedWaitpoint, ExecutionResult } from "@trigger.dev/core/v3"; -import { BatchId, RunId, SnapshotId } from "@trigger.dev/core/v3/apps"; +import { BatchId, RunId, SnapshotId } from "@trigger.dev/core/v3/isomorphic"; import { PrismaClientOrTransaction, TaskRunCheckpoint, diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 31670d442c..89b398ce58 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -35,7 +35,7 @@ import { sanitizeQueueName, SnapshotId, WaitpointId, -} from "@trigger.dev/core/v3/apps"; +} from "@trigger.dev/core/v3/isomorphic"; import { $transaction, Prisma, diff --git a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts index 04c38801cb..56f7a945b3 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTrigger.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts index 9bd99e3ac3..d5b6d4d324 100644 --- a/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts +++ b/internal-packages/run-engine/src/engine/tests/batchTriggerAndWait.test.ts @@ -8,7 +8,7 @@ import { trace } from "@internal/tracing"; import { expect, describe } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; vi.setConfig({ testTimeout: 60_000 }); diff --git a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts index 2b1b0da307..ae2de9d2f1 100644 --- a/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts +++ b/internal-packages/run-engine/src/engine/tests/dequeuing.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "node:timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts index d2a9a5c809..fd1558e3aa 100644 --- a/internal-packages/run-engine/src/engine/tests/priority.test.ts +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -4,7 +4,7 @@ import { setupBackgroundWorker, } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; -import { generateFriendlyId } from "@trigger.dev/core/v3/apps"; +import { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { PrismaClientOrTransaction } from "@trigger.dev/database"; diff --git a/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts index 0a2f053ff9..b07a47db88 100644 --- a/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts +++ b/internal-packages/run-engine/src/run-queue/fairQueueSelectionStrategy.test.ts @@ -1,10 +1,10 @@ +import { createRedisClient, RedisOptions } from "@internal/redis"; import { redisTest } from "@internal/testcontainers"; import { describe, expect, vi } from "vitest"; +import { RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET } from "./constants.js"; import { FairQueueSelectionStrategy } from "./fairQueueSelectionStrategy.js"; import { RunQueueFullKeyProducer } from "./keyProducer.js"; -import { createRedisClient, Redis, RedisOptions } from "@internal/redis"; import { EnvQueues, RunQueueKeyProducer } from "./types.js"; -import { RUN_QUEUE_RESUME_PRIORITY_TIMESTAMP_OFFSET } from "./constants.js"; vi.setConfig({ testTimeout: 60_000 }); // 30 seconds timeout diff --git a/internal-packages/run-engine/src/run-queue/types.ts b/internal-packages/run-engine/src/run-queue/types.ts index b703d318ea..0eaa048f78 100644 --- a/internal-packages/run-engine/src/run-queue/types.ts +++ b/internal-packages/run-engine/src/run-queue/types.ts @@ -1,5 +1,5 @@ import { z } from "zod"; -import { RuntimeEnvironmentType } from "../../../database/src/index.js"; +import { RuntimeEnvironmentType } from "@trigger.dev/database"; import type { MinimalAuthenticatedEnvironment } from "../shared/index.js"; export const InputPayload = z.object({ diff --git a/internal-packages/run-engine/tsconfig.build.json b/internal-packages/run-engine/tsconfig.build.json new file mode 100644 index 0000000000..619461da80 --- /dev/null +++ b/internal-packages/run-engine/tsconfig.build.json @@ -0,0 +1,21 @@ +{ + "include": ["src/**/*.ts"], + "exclude": ["src/**/*.test.ts"], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "outDir": "dist", + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true, + "declaration": true + } +} diff --git a/internal-packages/run-engine/tsconfig.json b/internal-packages/run-engine/tsconfig.json index c3ea60e197..af630abe1f 100644 --- a/internal-packages/run-engine/tsconfig.json +++ b/internal-packages/run-engine/tsconfig.json @@ -1,19 +1,8 @@ { + "references": [{ "path": "./tsconfig.src.json" }, { "path": "./tsconfig.test.json" }], "compilerOptions": { - "target": "ES2019", - "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "Node16", "moduleResolution": "Node16", - "moduleDetection": "force", - "verbatimModuleSyntax": false, - "types": ["vitest/globals"], - "esModuleInterop": true, - "forceConsistentCasingInFileNames": true, - "isolatedModules": true, - "preserveWatchOutput": true, - "skipLibCheck": true, - "noEmit": true, - "strict": true - }, - "exclude": ["node_modules"] + "module": "Node16", + "customConditions": ["@triggerdotdev/source"] + } } diff --git a/internal-packages/run-engine/tsconfig.src.json b/internal-packages/run-engine/tsconfig.src.json new file mode 100644 index 0000000000..5617aa970c --- /dev/null +++ b/internal-packages/run-engine/tsconfig.src.json @@ -0,0 +1,19 @@ +{ + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "src/**/*.test.ts"], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true + } +} diff --git a/internal-packages/run-engine/tsconfig.test.json b/internal-packages/run-engine/tsconfig.test.json new file mode 100644 index 0000000000..b68d234bd7 --- /dev/null +++ b/internal-packages/run-engine/tsconfig.test.json @@ -0,0 +1,20 @@ +{ + "include": ["src/**/*.test.ts"], + "references": [{ "path": "./tsconfig.src.json" }], + "compilerOptions": { + "composite": true, + "target": "ES2019", + "lib": ["ES2019", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], + "module": "Node16", + "moduleResolution": "Node16", + "moduleDetection": "force", + "verbatimModuleSyntax": false, + "types": ["vitest/globals"], + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "isolatedModules": true, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true + } +} diff --git a/internal-packages/testcontainers/src/setup.ts b/internal-packages/testcontainers/src/setup.ts index ffd9d86e1b..a51e24eadd 100644 --- a/internal-packages/testcontainers/src/setup.ts +++ b/internal-packages/testcontainers/src/setup.ts @@ -2,7 +2,7 @@ import { CURRENT_DEPLOYMENT_LABEL, generateFriendlyId, sanitizeQueueName, -} from "@trigger.dev/core/v3/apps"; +} from "@trigger.dev/core/v3/isomorphic"; import { MachineConfig, RetryOptions } from "@trigger.dev/core/v3/schemas"; import { BackgroundWorkerTask, diff --git a/packages/core/package.json b/packages/core/package.json index 5fe268a77d..2275141983 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -58,7 +58,8 @@ "./v3/schemas": "./src/v3/schemas/index.ts", "./v3/runEngineWorker": "./src/v3/runEngineWorker/index.ts", "./v3/machines": "./src/v3/machines/index.ts", - "./v3/serverOnly": "./src/v3/serverOnly/index.ts" + "./v3/serverOnly": "./src/v3/serverOnly/index.ts", + "./v3/isomorphic": "./src/v3/isomorphic/index.ts" }, "sourceDialects": [ "@triggerdotdev/source" @@ -176,6 +177,9 @@ ], "v3/serverOnly": [ "dist/commonjs/v3/serverOnly/index.d.ts" + ], + "v3/isomorphic": [ + "dist/commonjs/v3/isomorphic/index.d.ts" ] } }, @@ -658,10 +662,21 @@ "types": "./dist/commonjs/v3/serverOnly/index.d.ts", "default": "./dist/commonjs/v3/serverOnly/index.js" } + }, + "./v3/isomorphic": { + "import": { + "@triggerdotdev/source": "./src/v3/isomorphic/index.ts", + "types": "./dist/esm/v3/isomorphic/index.d.ts", + "default": "./dist/esm/v3/isomorphic/index.js" + }, + "require": { + "types": "./dist/commonjs/v3/isomorphic/index.d.ts", + "default": "./dist/commonjs/v3/isomorphic/index.js" + } } }, "type": "module", "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", "module": "./dist/esm/index.js" -} +} \ No newline at end of file diff --git a/packages/core/src/v3/apps/index.ts b/packages/core/src/v3/apps/index.ts index 93a1ac2876..80cc2d5a91 100644 --- a/packages/core/src/v3/apps/index.ts +++ b/packages/core/src/v3/apps/index.ts @@ -4,9 +4,4 @@ export * from "./process.js"; export * from "./http.js"; export * from "./provider.js"; export * from "./isExecaChildProcess.js"; -export * from "./friendlyId.js"; -export * from "./duration.js"; -export * from "./maxDuration.js"; -export * from "./queueName.js"; -export * from "./consts.js"; export * from "./exec.js"; diff --git a/packages/core/src/v3/apps/consts.ts b/packages/core/src/v3/isomorphic/consts.ts similarity index 100% rename from packages/core/src/v3/apps/consts.ts rename to packages/core/src/v3/isomorphic/consts.ts diff --git a/packages/core/src/v3/apps/duration.ts b/packages/core/src/v3/isomorphic/duration.ts similarity index 100% rename from packages/core/src/v3/apps/duration.ts rename to packages/core/src/v3/isomorphic/duration.ts diff --git a/packages/core/src/v3/apps/friendlyId.ts b/packages/core/src/v3/isomorphic/friendlyId.ts similarity index 100% rename from packages/core/src/v3/apps/friendlyId.ts rename to packages/core/src/v3/isomorphic/friendlyId.ts diff --git a/packages/core/src/v3/isomorphic/index.ts b/packages/core/src/v3/isomorphic/index.ts new file mode 100644 index 0000000000..53836fb096 --- /dev/null +++ b/packages/core/src/v3/isomorphic/index.ts @@ -0,0 +1,5 @@ +export * from "./friendlyId.js"; +export * from "./duration.js"; +export * from "./maxDuration.js"; +export * from "./queueName.js"; +export * from "./consts.js"; diff --git a/packages/core/src/v3/apps/maxDuration.ts b/packages/core/src/v3/isomorphic/maxDuration.ts similarity index 100% rename from packages/core/src/v3/apps/maxDuration.ts rename to packages/core/src/v3/isomorphic/maxDuration.ts diff --git a/packages/core/src/v3/apps/queueName.ts b/packages/core/src/v3/isomorphic/queueName.ts similarity index 100% rename from packages/core/src/v3/apps/queueName.ts rename to packages/core/src/v3/isomorphic/queueName.ts diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ac4cabe86f..d2a5bcc43a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -884,6 +884,9 @@ importers: prisma: specifier: 5.4.1 version: 5.4.1 + rimraf: + specifier: 6.0.1 + version: 6.0.1 internal-packages/emails: dependencies: @@ -984,6 +987,9 @@ importers: '@types/lodash.omit': specifier: ^4.5.7 version: 4.5.7 + rimraf: + specifier: 6.0.1 + version: 6.0.1 vitest: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) @@ -1030,6 +1036,9 @@ importers: '@types/seedrandom': specifier: ^3.0.8 version: 3.0.8 + rimraf: + specifier: 6.0.1 + version: 6.0.1 vitest: specifier: ^1.4.0 version: 1.6.0(@types/node@20.14.14) From 30b22778cecf2d3a1aae8a84803b7e709a88241b Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 13:31:38 +0000 Subject: [PATCH 09/12] Fixed webapp typechecks --- apps/webapp/package.json | 4 ++-- apps/webapp/tsconfig.check.json | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/apps/webapp/package.json b/apps/webapp/package.json index 81e802a9a1..002080afa9 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -14,7 +14,7 @@ "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint .", "start": "cross-env NODE_ENV=production node --max-old-space-size=8192 ./build/server.js", "start:local": "cross-env node --max-old-space-size=8192 ./build/server.js", - "typecheck": "tsc -p ./tsconfig.check.json", + "typecheck": "tsc --noEmit -p ./tsconfig.check.json", "db:seed": "node prisma/seed.js", "db:seed:local": "ts-node prisma/seed.ts", "build:db:populate": "esbuild --platform=node --bundle --minify --format=cjs ./prisma/populate.ts --outdir=prisma", @@ -259,4 +259,4 @@ "engines": { "node": ">=16.0.0" } -} +} \ No newline at end of file diff --git a/apps/webapp/tsconfig.check.json b/apps/webapp/tsconfig.check.json index f1adffe51a..8839d20eb4 100644 --- a/apps/webapp/tsconfig.check.json +++ b/apps/webapp/tsconfig.check.json @@ -5,6 +5,7 @@ "paths": { "~/*": ["./app/*"], "@/*": ["./*"] - } + }, + "customConditions": [] } } From dd59b4769b959d7f6349894d5704e9544b41f435 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 13:57:55 +0000 Subject: [PATCH 10/12] dev now depends on build, fixed supervisor typecheck --- apps/supervisor/tsconfig.json | 4 ---- internal-packages/run-engine/src/engine/index.ts | 2 +- internal-packages/run-engine/src/engine/retrying.ts | 12 ++++++------ package.json | 2 +- packages/core/package.json | 2 +- packages/react-hooks/src/package.json | 3 +++ turbo.json | 5 ++++- 7 files changed, 16 insertions(+), 14 deletions(-) create mode 100644 packages/react-hooks/src/package.json diff --git a/apps/supervisor/tsconfig.json b/apps/supervisor/tsconfig.json index 176a07ab7c..bd9b391e1b 100644 --- a/apps/supervisor/tsconfig.json +++ b/apps/supervisor/tsconfig.json @@ -4,9 +4,5 @@ "compilerOptions": { "rootDir": "src", "outDir": "dist" - }, - "paths": { - "@trigger.dev/core/v3": ["../../packages/core/src/v3"], - "@trigger.dev/core/v3/*": ["../../packages/core/src/v3/*"] } } diff --git a/internal-packages/run-engine/src/engine/index.ts b/internal-packages/run-engine/src/engine/index.ts index 89b398ce58..87d3b7e84d 100644 --- a/internal-packages/run-engine/src/engine/index.ts +++ b/internal-packages/run-engine/src/engine/index.ts @@ -71,7 +71,7 @@ import { } from "./statuses.js"; import { HeartbeatTimeouts, RunEngineOptions, TriggerParams } from "./types.js"; import { RunQueueFullKeyProducer } from "../run-queue/keyProducer.js"; -import { retryOutcomeFromCompletion } from "./retrying"; +import { retryOutcomeFromCompletion } from "./retrying.js"; const workerCatalog = { finishWaitpoint: { diff --git a/internal-packages/run-engine/src/engine/retrying.ts b/internal-packages/run-engine/src/engine/retrying.ts index 974bcd1151..f214738ade 100644 --- a/internal-packages/run-engine/src/engine/retrying.ts +++ b/internal-packages/run-engine/src/engine/retrying.ts @@ -1,16 +1,16 @@ import { + calculateNextRetryDelay, isOOMRunError, RetryOptions, + sanitizeError, shouldRetryError, TaskRunError, - TaskRunExecutionRetry, taskRunErrorEnhancer, - sanitizeError, - calculateNextRetryDelay, + TaskRunExecutionRetry, } from "@trigger.dev/core/v3"; -import { PrismaClientOrTransaction, TaskRunStatus } from "@trigger.dev/database"; -import { MAX_TASK_RUN_ATTEMPTS } from "./consts"; -import { ServiceValidationError } from "."; +import { PrismaClientOrTransaction } from "@trigger.dev/database"; +import { MAX_TASK_RUN_ATTEMPTS } from "./consts.js"; +import { ServiceValidationError } from "./index.js"; type Params = { runId: string; diff --git a/package.json b/package.json index c38925e9c3..af5c17276b 100644 --- a/package.json +++ b/package.json @@ -15,7 +15,7 @@ "db:seed": "turbo run db:seed", "db:studio": "turbo run db:studio", "db:populate": "turbo run db:populate", - "dev": "turbo run dev --parallel", + "dev": "turbo run dev", "i:dev": "infisical run -- turbo run dev --parallel", "format": "prettier . --write --config prettier.config.js", "generate": "turbo run generate", diff --git a/packages/core/package.json b/packages/core/package.json index 2275141983..c203338781 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -679,4 +679,4 @@ "main": "./dist/commonjs/index.js", "types": "./dist/commonjs/index.d.ts", "module": "./dist/esm/index.js" -} \ No newline at end of file +} diff --git a/packages/react-hooks/src/package.json b/packages/react-hooks/src/package.json new file mode 100644 index 0000000000..5bbefffbab --- /dev/null +++ b/packages/react-hooks/src/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/turbo.json b/turbo.json index b397a74da9..fd81697c94 100644 --- a/turbo.json +++ b/turbo.json @@ -53,7 +53,10 @@ "cache": false }, "dev": { - "cache": false + "cache": false, + "dependsOn": [ + "^build" + ] }, "i:dev": { "cache": false From acfed1cba5a08d80b30bad08ac965a2c6efe919b Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 14:18:02 +0000 Subject: [PATCH 11/12] Fixed run engine tests --- .../run-engine/src/engine/tests/attemptFailures.test.ts | 5 ++--- .../run-engine/src/engine/tests/heartbeats.test.ts | 1 - .../run-engine/src/engine/tests/notDeployed.test.ts | 1 - .../run-engine/src/engine/tests/priority.test.ts | 1 - package.json | 2 +- 5 files changed, 3 insertions(+), 7 deletions(-) diff --git a/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts b/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts index 232d090a73..e46f2ae2fb 100644 --- a/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts +++ b/internal-packages/run-engine/src/engine/tests/attemptFailures.test.ts @@ -4,11 +4,10 @@ import { setupAuthenticatedEnvironment, setupBackgroundWorker, } from "@internal/testcontainers"; -import { trace } from "@opentelemetry/api"; +import { trace } from "@internal/tracing"; +import { setTimeout } from "node:timers/promises"; import { expect } from "vitest"; -import { EventBusEventArgs } from "../eventBus.js"; import { RunEngine } from "../index.js"; -import { setTimeout } from "node:timers/promises"; describe("RunEngine attempt failures", () => { containerTest( diff --git a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts index 9d07e36729..518189e9ab 100644 --- a/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts +++ b/internal-packages/run-engine/src/engine/tests/heartbeats.test.ts @@ -5,7 +5,6 @@ import { assertNonNullable, } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; -import { expect, describe } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts index 82211bd0bf..f27f23e95d 100644 --- a/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts +++ b/internal-packages/run-engine/src/engine/tests/notDeployed.test.ts @@ -5,7 +5,6 @@ import { assertNonNullable, } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; -import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { setTimeout } from "timers/promises"; diff --git a/internal-packages/run-engine/src/engine/tests/priority.test.ts b/internal-packages/run-engine/src/engine/tests/priority.test.ts index fd1558e3aa..2b62544f3a 100644 --- a/internal-packages/run-engine/src/engine/tests/priority.test.ts +++ b/internal-packages/run-engine/src/engine/tests/priority.test.ts @@ -5,7 +5,6 @@ import { } from "@internal/testcontainers"; import { trace } from "@internal/tracing"; import { generateFriendlyId } from "@trigger.dev/core/v3/isomorphic"; -import { expect } from "vitest"; import { RunEngine } from "../index.js"; import { PrismaClientOrTransaction } from "@trigger.dev/database"; import { MinimalAuthenticatedEnvironment } from "../../shared/index.js"; diff --git a/package.json b/package.json index af5c17276b..82f51afb20 100644 --- a/package.json +++ b/package.json @@ -16,7 +16,7 @@ "db:studio": "turbo run db:studio", "db:populate": "turbo run db:populate", "dev": "turbo run dev", - "i:dev": "infisical run -- turbo run dev --parallel", + "i:dev": "infisical run -- turbo run dev", "format": "prettier . --write --config prettier.config.js", "generate": "turbo run generate", "lint": "turbo run lint", From 87f8eba4ded3b208d69629bb9f10d4be834373e0 Mon Sep 17 00:00:00 2001 From: Eric Allam Date: Fri, 7 Mar 2025 14:21:11 +0000 Subject: [PATCH 12/12] Fixed e2e tests --- packages/cli-v3/e2e/utils.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/cli-v3/e2e/utils.ts b/packages/cli-v3/e2e/utils.ts index 46d8697e23..9c65ed450c 100644 --- a/packages/cli-v3/e2e/utils.ts +++ b/packages/cli-v3/e2e/utils.ts @@ -363,6 +363,12 @@ export async function executeTestCaseRun({ ref: "main", name: "test", }, + machine: { + name: "small-1x", + cpu: 1, + memory: 256, + centsPerMs: 0.0000001, + }, }, }, messageId: "run_1234",