diff --git a/package-lock.json b/package-lock.json index 736abe70a4..664dbb68ce 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7319,25 +7319,9 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "packages/authx": { - "name": "@redis/authx", - "version": "5.0.0-next.5", - "extraneous": true, - "license": "MIT", - "dependencies": { - "@azure/msal-node": "^2.16.1" - }, - "devDependencies": {}, - "engines": { - "node": ">= 18" - }, - "peerDependencies": { - "@redis/client": "^5.0.0-next.5" - } - }, "packages/bloom": { "name": "@redis/bloom", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "devDependencies": { "@redis/test-utils": "*" @@ -7346,12 +7330,12 @@ "node": ">= 18" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" } }, "packages/client": { "name": "@redis/client", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "dependencies": { "cluster-key-slot": "1.1.2" @@ -7367,7 +7351,7 @@ }, "packages/entraid": { "name": "@redis/entraid", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "dependencies": { "@azure/identity": "^4.7.0", @@ -7386,7 +7370,7 @@ "node": ">= 18" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" } }, "packages/entraid/node_modules/@types/node": { @@ -7423,7 +7407,7 @@ }, "packages/json": { "name": "@redis/json", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "devDependencies": { "@redis/test-utils": "*" @@ -7432,18 +7416,18 @@ "node": ">= 18" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" } }, "packages/redis": { - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "dependencies": { - "@redis/bloom": "5.8.2", - "@redis/client": "5.8.2", - "@redis/json": "5.8.2", - "@redis/search": "5.8.2", - "@redis/time-series": "5.8.2" + "@redis/bloom": "5.9.0-beta.0", + "@redis/client": "5.9.0-beta.0", + "@redis/json": "5.9.0-beta.0", + "@redis/search": "5.9.0-beta.0", + "@redis/time-series": "5.9.0-beta.0" }, "engines": { "node": ">= 18" @@ -7451,7 +7435,7 @@ }, "packages/search": { "name": "@redis/search", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "devDependencies": { "@redis/test-utils": "*" @@ -7460,7 +7444,7 @@ "node": ">= 18" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" } }, "packages/test-utils": { @@ -7529,7 +7513,7 @@ }, "packages/time-series": { "name": "@redis/time-series", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "devDependencies": { "@redis/test-utils": "*" @@ -7538,7 +7522,7 @@ "node": ">= 18" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" } } } diff --git a/packages/bloom/package.json b/packages/bloom/package.json index e2ff5a8b42..6947934fc4 100644 --- a/packages/bloom/package.json +++ b/packages/bloom/package.json @@ -1,6 +1,6 @@ { "name": "@redis/bloom", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/lib/index.js", "types": "./dist/lib/index.d.ts", @@ -13,7 +13,7 @@ "release": "release-it" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" }, "devDependencies": { "@redis/test-utils": "*" diff --git a/packages/client/lib/client/commands-queue.ts b/packages/client/lib/client/commands-queue.ts index 52a07a7e3b..ae67ca28cd 100644 --- a/packages/client/lib/client/commands-queue.ts +++ b/packages/client/lib/client/commands-queue.ts @@ -1,10 +1,11 @@ -import { SinglyLinkedList, DoublyLinkedNode, DoublyLinkedList } from './linked-list'; +import { DoublyLinkedNode, DoublyLinkedList, EmptyAwareSinglyLinkedList } from './linked-list'; import encodeCommand from '../RESP/encoder'; import { Decoder, PUSH_TYPE_MAPPING, RESP_TYPES } from '../RESP/decoder'; import { TypeMapping, ReplyUnion, RespVersions, RedisArgument } from '../RESP/types'; import { ChannelListeners, PubSub, PubSubCommand, PubSubListener, PubSubType, PubSubTypeListeners } from './pub-sub'; -import { AbortError, ErrorReply, TimeoutError } from '../errors'; +import { AbortError, ErrorReply, CommandTimeoutDuringMaintenanceError, TimeoutError } from '../errors'; import { MonitorCallback } from '.'; +import { dbgMaintenance } from './enterprise-maintenance-manager'; export interface CommandOptions { chainId?: symbol; @@ -30,6 +31,7 @@ export interface CommandToWrite extends CommandWaitingForReply { timeout: { signal: AbortSignal; listener: () => unknown; + originalTimeout: number | undefined; } | undefined; } @@ -50,22 +52,74 @@ const RESP2_PUSH_TYPE_MAPPING = { [RESP_TYPES.SIMPLE_STRING]: Buffer }; +// Try to handle a push notification. Return whether you +// successfully consumed the notification or not. This is +// important in order for the queue to be able to pass the +// notification to another handler if the current one did not +// succeed. +type PushHandler = (pushItems: Array) => boolean; + export default class RedisCommandsQueue { readonly #respVersion; readonly #maxLength; readonly #toWrite = new DoublyLinkedList(); - readonly #waitingForReply = new SinglyLinkedList(); + readonly #waitingForReply = new EmptyAwareSinglyLinkedList(); readonly #onShardedChannelMoved; #chainInExecution: symbol | undefined; readonly decoder; readonly #pubSub = new PubSub(); + #pushHandlers: PushHandler[] = [this.#onPush.bind(this)]; + + #maintenanceCommandTimeout: number | undefined + + setMaintenanceCommandTimeout(ms: number | undefined) { + // Prevent possible api misuse + if (this.#maintenanceCommandTimeout === ms) { + dbgMaintenance(`Queue already set maintenanceCommandTimeout to ${ms}, skipping`); + return; + }; + + dbgMaintenance(`Setting maintenance command timeout to ${ms}`); + this.#maintenanceCommandTimeout = ms; + + if(this.#maintenanceCommandTimeout === undefined) { + dbgMaintenance(`Queue will keep maintenanceCommandTimeout for exisitng commands, just to be on the safe side. New commands will receive normal timeouts`); + return; + } + + let counter = 0; + const total = this.#toWrite.length; + + // Overwrite timeouts of all eligible toWrite commands + for(const node of this.#toWrite.nodes()) { + const command = node.value; + + // Remove timeout listener if it exists + RedisCommandsQueue.#removeTimeoutListener(command) + + counter++; + const newTimeout = this.#maintenanceCommandTimeout; + + // Overwrite the command's timeout + const signal = AbortSignal.timeout(newTimeout); + command.timeout = { + signal, + listener: () => { + this.#toWrite.remove(node); + command.reject(new CommandTimeoutDuringMaintenanceError(newTimeout)); + }, + originalTimeout: command.timeout?.originalTimeout + }; + signal.addEventListener('abort', command.timeout.listener, { once: true }); + }; + dbgMaintenance(`Total of ${counter} of ${total} timeouts reset to ${ms}`); + } + get isPubSubActive() { return this.#pubSub.isActive; } - #invalidateCallback?: (key: RedisArgument | null) => unknown; - constructor( respVersion: RespVersions, maxLength: number | null | undefined, @@ -107,6 +161,7 @@ export default class RedisCommandsQueue { } return true; } + return false } #getTypeMapping() { @@ -119,30 +174,27 @@ export default class RedisCommandsQueue { onErrorReply: err => this.#onErrorReply(err), //TODO: we can shave off a few cycles by not adding onPush handler at all if CSC is not used onPush: push => { - if (!this.#onPush(push)) { - // currently only supporting "invalidate" over RESP3 push messages - switch (push[0].toString()) { - case "invalidate": { - if (this.#invalidateCallback) { - if (push[1] !== null) { - for (const key of push[1]) { - this.#invalidateCallback(key); - } - } else { - this.#invalidateCallback(null); - } - } - break; - } - } + for(const pushHandler of this.#pushHandlers) { + if(pushHandler(push)) return } }, getTypeMapping: () => this.#getTypeMapping() }); } - setInvalidateCallback(callback?: (key: RedisArgument | null) => unknown) { - this.#invalidateCallback = callback; + addPushHandler(handler: PushHandler): void { + this.#pushHandlers.push(handler); + } + + async waitForInflightCommandsToComplete(): Promise { + // In-flight commands already completed + if(this.#waitingForReply.length === 0) { + return + }; + // Otherwise wait for in-flight commands to fire `empty` event + return new Promise(resolve => { + this.#waitingForReply.events.on('empty', resolve) + }); } addCommand( @@ -168,15 +220,20 @@ export default class RedisCommandsQueue { typeMapping: options?.typeMapping }; - const timeout = options?.timeout; + // If #maintenanceCommandTimeout was explicitly set, we should + // use it instead of the timeout provided by the command + const timeout = this.#maintenanceCommandTimeout ?? options?.timeout; + const wasInMaintenance = this.#maintenanceCommandTimeout !== undefined; if (timeout) { + const signal = AbortSignal.timeout(timeout); value.timeout = { signal, listener: () => { this.#toWrite.remove(node); - value.reject(new TimeoutError()); - } + value.reject(wasInMaintenance ? new CommandTimeoutDuringMaintenanceError(timeout) : new TimeoutError()); + }, + originalTimeout: options?.timeout }; signal.addEventListener('abort', value.timeout.listener, { once: true }); } @@ -432,7 +489,7 @@ export default class RedisCommandsQueue { } static #removeTimeoutListener(command: CommandToWrite) { - command.timeout!.signal.removeEventListener('abort', command.timeout!.listener); + command.timeout?.signal.removeEventListener('abort', command.timeout!.listener); } static #flushToWrite(toBeSent: CommandToWrite, err: Error) { diff --git a/packages/client/lib/client/enterprise-maintenance-manager.ts b/packages/client/lib/client/enterprise-maintenance-manager.ts new file mode 100644 index 0000000000..631fb1f711 --- /dev/null +++ b/packages/client/lib/client/enterprise-maintenance-manager.ts @@ -0,0 +1,353 @@ +import { RedisClientOptions } from "."; +import RedisCommandsQueue from "./commands-queue"; +import { RedisArgument } from "../.."; +import { isIP } from "net"; +import { lookup } from "dns/promises"; +import assert from "node:assert"; +import { setTimeout } from "node:timers/promises"; +import RedisSocket from "./socket"; +import diagnostics_channel from "node:diagnostics_channel"; + +export const MAINTENANCE_EVENTS = { + PAUSE_WRITING: "pause-writing", + RESUME_WRITING: "resume-writing", + TIMEOUTS_UPDATE: "timeouts-update", +} as const; + +const PN = { + MOVING: "MOVING", + MIGRATING: "MIGRATING", + MIGRATED: "MIGRATED", + FAILING_OVER: "FAILING_OVER", + FAILED_OVER: "FAILED_OVER", +}; + +export type DiagnosticsEvent = { + type: string; + timestamp: number; + data?: Object; +}; + +export const dbgMaintenance = (...args: any[]) => { + if (!process.env.REDIS_DEBUG_MAINTENANCE) return; + return console.log("[MNT]", ...args); +}; + +export const emitDiagnostics = (event: DiagnosticsEvent) => { + if (!process.env.REDIS_EMIT_DIAGNOSTICS) return; + + const channel = diagnostics_channel.channel("redis.maintenance"); + channel.publish(event); +}; + +export interface MaintenanceUpdate { + relaxedCommandTimeout?: number; + relaxedSocketTimeout?: number; +} + +interface Client { + _ejectSocket: () => RedisSocket; + _insertSocket: (socket: RedisSocket) => void; + _pause: () => void; + _unpause: () => void; + _maintenanceUpdate: (update: MaintenanceUpdate) => void; + duplicate: () => Client; + connect: () => Promise; + destroy: () => void; + on: (event: string, callback: (value: unknown) => void) => void; +} + +export default class EnterpriseMaintenanceManager { + #commandsQueue: RedisCommandsQueue; + #options: RedisClientOptions; + #isMaintenance = 0; + #client: Client; + + static setupDefaultMaintOptions(options: RedisClientOptions) { + if (options.maintPushNotifications === undefined) { + options.maintPushNotifications = + options?.RESP === 3 ? "auto" : "disabled"; + } + if (options.maintMovingEndpointType === undefined) { + options.maintMovingEndpointType = "auto"; + } + if (options.maintRelaxedSocketTimeout === undefined) { + options.maintRelaxedSocketTimeout = 10000; + } + if (options.maintRelaxedCommandTimeout === undefined) { + options.maintRelaxedCommandTimeout = 10000; + } + } + + static async getHandshakeCommand( + tls: boolean, + host: string, + options: RedisClientOptions, + ): Promise< + | { cmd: Array; errorHandler: (error: Error) => void } + | undefined + > { + if (options.maintPushNotifications === "disabled") return; + + const movingEndpointType = await determineEndpoint(tls, host, options); + return { + cmd: [ + "CLIENT", + "MAINT_NOTIFICATIONS", + "ON", + "moving-endpoint-type", + movingEndpointType, + ], + errorHandler: (error: Error) => { + dbgMaintenance("handshake failed:", error); + if (options.maintPushNotifications === "enabled") { + throw error; + } + }, + }; + } + + constructor( + commandsQueue: RedisCommandsQueue, + client: Client, + options: RedisClientOptions, + ) { + this.#commandsQueue = commandsQueue; + this.#options = options; + this.#client = client; + + this.#commandsQueue.addPushHandler(this.#onPush); + } + + #onPush = (push: Array): boolean => { + dbgMaintenance("ONPUSH:", push.map(String)); + + if (!Array.isArray(push) || !["MOVING", "MIGRATING", "MIGRATED", "FAILING_OVER", "FAILED_OVER"].includes(String(push[0]))) { + return false; + } + + const type = String(push[0]); + + emitDiagnostics({ + type, + timestamp: Date.now(), + data: { + push: push.map(String), + }, + }); + switch (type) { + case PN.MOVING: { + // [ 'MOVING', '17', '15', '54.78.247.156:12075' ] + // ^seq ^after ^new ip + const afterSeconds = push[2]; + const url: string | null = push[3] ? String(push[3]) : null; + dbgMaintenance("Received MOVING:", afterSeconds, url); + this.#onMoving(afterSeconds, url); + return true; + } + case PN.MIGRATING: + case PN.FAILING_OVER: { + dbgMaintenance("Received MIGRATING|FAILING_OVER"); + this.#onMigrating(); + return true; + } + case PN.MIGRATED: + case PN.FAILED_OVER: { + dbgMaintenance("Received MIGRATED|FAILED_OVER"); + this.#onMigrated(); + return true; + } + } + return false; + }; + + // Queue: + // toWrite [ C D E ] + // waitingForReply [ A B ] - aka In-flight commands + // + // time: ---1-2---3-4-5-6--------------------------- + // + // 1. [EVENT] MOVING PN received + // 2. [ACTION] Pause writing ( we need to wait for new socket to connect and for all in-flight commands to complete ) + // 3. [EVENT] New socket connected + // 4. [EVENT] In-flight commands completed + // 5. [ACTION] Destroy old socket + // 6. [ACTION] Resume writing -> we are going to write to the new socket from now on + #onMoving = async ( + afterSeconds: number, + url: string | null, + ): Promise => { + // 1 [EVENT] MOVING PN received + this.#onMigrating(); + + let host: string; + let port: number; + + // The special value `none` indicates that the `MOVING` message doesn’t need + // to contain an endpoint. Instead it contains the value `null` then. In + // such a corner case, the client is expected to schedule a graceful + // reconnect to its currently configured endpoint after half of the grace + // period that was communicated by the server is over. + if (url === null) { + assert(this.#options.maintMovingEndpointType === "none"); + assert(this.#options.socket !== undefined); + assert("host" in this.#options.socket); + assert(typeof this.#options.socket.host === "string"); + host = this.#options.socket.host; + assert(typeof this.#options.socket.port === "number"); + port = this.#options.socket.port; + const waitTime = (afterSeconds * 1000) / 2; + dbgMaintenance(`Wait for ${waitTime}ms`); + await setTimeout(waitTime); + } else { + const split = url.split(":"); + host = split[0]; + port = Number(split[1]); + } + + // 2 [ACTION] Pause writing + dbgMaintenance("Pausing writing of new commands to old socket"); + this.#client._pause(); + + dbgMaintenance("Creating new tmp client"); + let start = performance.now(); + + // If the URL is provided, it takes precedense + // the options object could just be mutated + if(this.#options.url) { + const u = new URL(this.#options.url); + u.hostname = host; + u.port = String(port); + this.#options.url = u.toString(); + } else { + this.#options.socket = { + ...this.#options.socket, + host, + port + } + } + const tmpClient = this.#client.duplicate(); + tmpClient.on('error', (error: unknown) => { + //We dont know how to handle tmp client errors + dbgMaintenance(`[ERR]`, error) + }); + dbgMaintenance(`Tmp client created in ${( performance.now() - start ).toFixed(2)}ms`); + dbgMaintenance( + `Set timeout for tmp client to ${this.#options.maintRelaxedSocketTimeout}`, + ); + tmpClient._maintenanceUpdate({ + relaxedCommandTimeout: this.#options.maintRelaxedCommandTimeout, + relaxedSocketTimeout: this.#options.maintRelaxedSocketTimeout, + }); + dbgMaintenance(`Connecting tmp client: ${host}:${port}`); + start = performance.now(); + await tmpClient.connect(); + dbgMaintenance(`Connected to tmp client in ${(performance.now() - start).toFixed(2)}ms`); + // 3 [EVENT] New socket connected + + dbgMaintenance(`Wait for all in-flight commands to complete`); + await this.#commandsQueue.waitForInflightCommandsToComplete(); + dbgMaintenance(`In-flight commands completed`); + // 4 [EVENT] In-flight commands completed + + dbgMaintenance("Swap client sockets..."); + const oldSocket = this.#client._ejectSocket(); + const newSocket = tmpClient._ejectSocket(); + this.#client._insertSocket(newSocket); + tmpClient._insertSocket(oldSocket); + tmpClient.destroy(); + dbgMaintenance("Swap client sockets done."); + // 5 + 6 + dbgMaintenance("Resume writing"); + this.#client._unpause(); + this.#onMigrated(); + }; + + #onMigrating = () => { + this.#isMaintenance++; + if (this.#isMaintenance > 1) { + dbgMaintenance(`Timeout relaxation already done`); + return; + } + + const update: MaintenanceUpdate = { + relaxedCommandTimeout: this.#options.maintRelaxedCommandTimeout, + relaxedSocketTimeout: this.#options.maintRelaxedSocketTimeout, + }; + + this.#client._maintenanceUpdate(update); + }; + + #onMigrated = () => { + //ensure that #isMaintenance doesnt go under 0 + this.#isMaintenance = Math.max(this.#isMaintenance - 1, 0); + if (this.#isMaintenance > 0) { + dbgMaintenance(`Not ready to unrelax timeouts yet`); + return; + } + + const update: MaintenanceUpdate = { + relaxedCommandTimeout: undefined, + relaxedSocketTimeout: undefined + }; + + this.#client._maintenanceUpdate(update); + }; +} + +export type MovingEndpointType = + | "auto" + | "internal-ip" + | "internal-fqdn" + | "external-ip" + | "external-fqdn" + | "none"; + +function isPrivateIP(ip: string): boolean { + const version = isIP(ip); + if (version === 4) { + const octets = ip.split(".").map(Number); + return ( + octets[0] === 10 || + (octets[0] === 172 && octets[1] >= 16 && octets[1] <= 31) || + (octets[0] === 192 && octets[1] === 168) + ); + } + if (version === 6) { + return ( + ip.startsWith("fc") || // Unique local + ip.startsWith("fd") || // Unique local + ip === "::1" || // Loopback + ip.startsWith("fe80") // Link-local unicast + ); + } + return false; +} + +async function determineEndpoint( + tlsEnabled: boolean, + host: string, + options: RedisClientOptions, +): Promise { + assert(options.maintMovingEndpointType !== undefined); + if (options.maintMovingEndpointType !== "auto") { + dbgMaintenance( + `Determine endpoint type: ${options.maintMovingEndpointType}`, + ); + return options.maintMovingEndpointType; + } + + const ip = isIP(host) ? host : (await lookup(host, { family: 0 })).address; + + const isPrivate = isPrivateIP(ip); + + let result: MovingEndpointType; + if (tlsEnabled) { + result = isPrivate ? "internal-fqdn" : "external-fqdn"; + } else { + result = isPrivate ? "internal-ip" : "external-ip"; + } + + dbgMaintenance(`Determine endpoint type: ${result}`); + return result; +} diff --git a/packages/client/lib/client/index.ts b/packages/client/lib/client/index.ts index 57b1231670..cf5763357a 100644 --- a/packages/client/lib/client/index.ts +++ b/packages/client/lib/client/index.ts @@ -1,5 +1,5 @@ import COMMANDS from '../commands'; -import RedisSocket, { RedisSocketOptions } from './socket'; +import RedisSocket, { RedisSocketOptions, RedisTcpSocketOptions } from './socket'; import { BasicAuth, CredentialsError, CredentialsProvider, StreamingCredentialsProvider, UnableToObtainNewCredentialsError, Disposable } from '../authx'; import RedisCommandsQueue, { CommandOptions } from './commands-queue'; import { EventEmitter } from 'node:events'; @@ -20,6 +20,7 @@ import { BasicClientSideCache, ClientSideCacheConfig, ClientSideCacheProvider } import { BasicCommandParser, CommandParser } from './parser'; import SingleEntryCache from '../single-entry-cache'; import { version } from '../../package.json' +import EnterpriseMaintenanceManager, { MaintenanceUpdate, MovingEndpointType } from './enterprise-maintenance-manager'; export interface RedisClientOptions< M extends RedisModules = RedisModules, @@ -144,7 +145,46 @@ export interface RedisClientOptions< * Tag to append to library name that is sent to the Redis server */ clientInfoTag?: string; -} + /** + * Controls how the client handles Redis Enterprise maintenance push notifications. + * + * - `disabled`: The feature is not used by the client. + * - `enabled`: The client attempts to enable the feature on the server. If the server responds with an error, the connection is interrupted. + * - `auto`: The client attempts to enable the feature on the server. If the server returns an error, the client disables the feature and continues. + * + * The default is `auto`. + */ + maintPushNotifications?: 'disabled' | 'enabled' | 'auto'; + /** + * Controls how the client requests the endpoint to reconnect to during a MOVING notification in Redis Enterprise maintenance. + * + * - `auto`: If the connection is opened to a name or IP address that is from/resolves to a reserved private IP range, request an internal endpoint (e.g., internal-ip), otherwise an external one. If TLS is enabled, then request a FQDN. + * - `internal-ip`: Enforce requesting the internal IP. + * - `internal-fqdn`: Enforce requesting the internal FQDN. + * - `external-ip`: Enforce requesting the external IP address. + * - `external-fqdn`: Enforce requesting the external FQDN. + * - `none`: Used to request a null endpoint, which tells the client to reconnect based on its current config + + * The default is `auto`. + */ + maintMovingEndpointType?: MovingEndpointType; + /** + * Specifies a more relaxed timeout (in milliseconds) for commands during a maintenance window. + * This helps minimize command timeouts during maintenance. If not provided, the `commandOptions.timeout` + * will be used instead. Timeouts during maintenance period result in a `CommandTimeoutDuringMaintenance` error. + * + * The default is 10000 + */ + maintRelaxedCommandTimeout?: number; + /** + * Specifies a more relaxed timeout (in milliseconds) for the socket during a maintenance window. + * This helps minimize socket timeouts during maintenance. If not provided, the `socket.timeout` + * will be used instead. Timeouts during maintenance period result in a `SocketTimeoutDuringMaintenance` error. + * + * The default is 10000 + */ + maintRelaxedSocketTimeout?: number; +}; export type WithCommands< RESP extends RespVersions, @@ -390,7 +430,7 @@ export default class RedisClient< } readonly #options?: RedisClientOptions; - readonly #socket: RedisSocket; + #socket: RedisSocket; readonly #queue: RedisCommandsQueue; #selectedDB = 0; #monitorCallback?: MonitorCallback; @@ -403,11 +443,16 @@ export default class RedisClient< #watchEpoch?: number; #clientSideCache?: ClientSideCacheProvider; #credentialsSubscription: Disposable | null = null; + // Flag used to pause writing to the socket during maintenance windows. + // When true, prevents new commands from being written while waiting for: + // 1. New socket to be ready after maintenance redirect + // 2. In-flight commands on the old socket to complete + #paused = false; + get clientSideCache() { return this._self.#clientSideCache; } - get options(): RedisClientOptions | undefined { return this._self.#options; } @@ -457,6 +502,11 @@ export default class RedisClient< this.#queue = this.#initiateQueue(); this.#socket = this.#initiateSocket(); + + if(options?.maintPushNotifications !== 'disabled') { + new EnterpriseMaintenanceManager(this.#queue, this, this.#options!); + }; + if (options?.clientSideCache) { if (options.clientSideCache instanceof ClientSideCacheProvider) { this.#clientSideCache = options.clientSideCache; @@ -464,7 +514,19 @@ export default class RedisClient< const cscConfig = options.clientSideCache; this.#clientSideCache = new BasicClientSideCache(cscConfig); } - this.#queue.setInvalidateCallback(this.#clientSideCache.invalidate.bind(this.#clientSideCache)); + this.#queue.addPushHandler((push: Array): boolean => { + if (push[0].toString() !== 'invalidate') return false; + + if (push[1] !== null) { + for (const key of push[1]) { + this.#clientSideCache?.invalidate(key) + } + } else { + this.#clientSideCache?.invalidate(null) + } + + return true + }); } } @@ -473,7 +535,12 @@ export default class RedisClient< throw new Error('Client Side Caching is only supported with RESP3'); } + if (options?.maintPushNotifications && options?.maintPushNotifications !== 'disabled' && options?.RESP !== 3) { + throw new Error('Graceful Maintenance is only supported with RESP3'); + } + } + #initiateOptions(options?: RedisClientOptions): RedisClientOptions | undefined { // Convert username/password to credentialsProvider if no credentialsProvider is already in place @@ -496,13 +563,15 @@ export default class RedisClient< this._commandOptions = options.commandOptions; } + if(options?.maintPushNotifications !== 'disabled') { + EnterpriseMaintenanceManager.setupDefaultMaintOptions(options!); + } + if (options?.url) { const parsedOptions = RedisClient.parseOptions(options); - if (parsedOptions?.database) { this._self.#selectedDB = parsedOptions.database; } - return parsedOptions; } @@ -679,9 +748,44 @@ export default class RedisClient< commands.push({cmd: this.#clientSideCache.trackingOn()}); } + const { tls, host } = this.#options!.socket as RedisTcpSocketOptions; + const maintenanceHandshakeCmd = await EnterpriseMaintenanceManager.getHandshakeCommand(!!tls, host!, this.#options!); + if(maintenanceHandshakeCmd) { + commands.push(maintenanceHandshakeCmd); + }; + return commands; } + #attachListeners(socket: RedisSocket) { + socket.on('data', chunk => { + try { + this.#queue.decoder.write(chunk); + } catch (err) { + this.#queue.resetDecoder(); + this.emit('error', err); + } + }) + .on('error', err => { + this.emit('error', err); + this.#clientSideCache?.onError(); + if (this.#socket.isOpen && !this.#options?.disableOfflineQueue) { + this.#queue.flushWaitingForReply(err); + } else { + this.#queue.flushAll(err); + } + }) + .on('connect', () => this.emit('connect')) + .on('ready', () => { + this.emit('ready'); + this.#setPingTimer(); + this.#maybeScheduleWrite(); + }) + .on('reconnecting', () => this.emit('reconnecting')) + .on('drain', () => this.#maybeScheduleWrite()) + .on('end', () => this.emit('end')); + } + #initiateSocket(): RedisSocket { const socketInitiator = async () => { const promises = [], @@ -713,33 +817,9 @@ export default class RedisClient< } }; - return new RedisSocket(socketInitiator, this.#options?.socket) - .on('data', chunk => { - try { - this.#queue.decoder.write(chunk); - } catch (err) { - this.#queue.resetDecoder(); - this.emit('error', err); - } - }) - .on('error', err => { - this.emit('error', err); - this.#clientSideCache?.onError(); - if (this.#socket.isOpen && !this.#options?.disableOfflineQueue) { - this.#queue.flushWaitingForReply(err); - } else { - this.#queue.flushAll(err); - } - }) - .on('connect', () => this.emit('connect')) - .on('ready', () => { - this.emit('ready'); - this.#setPingTimer(); - this.#maybeScheduleWrite(); - }) - .on('reconnecting', () => this.emit('reconnecting')) - .on('drain', () => this.#maybeScheduleWrite()) - .on('end', () => this.emit('end')); + const socket = new RedisSocket(socketInitiator, this.#options?.socket); + this.#attachListeners(socket); + return socket; } #pingTimer?: NodeJS.Timeout; @@ -851,6 +931,51 @@ export default class RedisClient< return this as unknown as RedisClientType; } + /** + * @internal + */ + _ejectSocket(): RedisSocket { + const socket = this._self.#socket; + // @ts-ignore + this._self.#socket = null; + socket.removeAllListeners(); + return socket; + } + + /** + * @internal + */ + _insertSocket(socket: RedisSocket) { + if(this._self.#socket) { + this._self._ejectSocket().destroy(); + } + this._self.#socket = socket; + this._self.#attachListeners(this._self.#socket); + } + + /** + * @internal + */ + _maintenanceUpdate(update: MaintenanceUpdate) { + this._self.#socket.setMaintenanceTimeout(update.relaxedSocketTimeout); + this._self.#queue.setMaintenanceCommandTimeout(update.relaxedCommandTimeout); + } + + /** + * @internal + */ + _pause() { + this._self.#paused = true; + } + + /** + * @internal + */ + _unpause() { + this._self.#paused = false; + this._self.#maybeScheduleWrite(); + } + /** * @internal */ @@ -1080,6 +1205,9 @@ export default class RedisClient< } #write() { + if(this.#paused) { + return + } this.#socket.write(this.#queue.commandsToWrite()); } diff --git a/packages/client/lib/client/linked-list.spec.ts b/packages/client/lib/client/linked-list.spec.ts index 9547fb81c7..c791d21900 100644 --- a/packages/client/lib/client/linked-list.spec.ts +++ b/packages/client/lib/client/linked-list.spec.ts @@ -1,138 +1,197 @@ -import { SinglyLinkedList, DoublyLinkedList } from './linked-list'; -import { equal, deepEqual } from 'assert/strict'; - -describe('DoublyLinkedList', () => { +import { + SinglyLinkedList, + DoublyLinkedList, + EmptyAwareSinglyLinkedList, +} from "./linked-list"; +import { equal, deepEqual } from "assert/strict"; + +describe("DoublyLinkedList", () => { const list = new DoublyLinkedList(); - it('should start empty', () => { + it("should start empty", () => { equal(list.length, 0); equal(list.head, undefined); equal(list.tail, undefined); deepEqual(Array.from(list), []); }); - it('shift empty', () => { + it("shift empty", () => { equal(list.shift(), undefined); equal(list.length, 0); deepEqual(Array.from(list), []); }); - it('push 1', () => { + it("push 1", () => { list.push(1); equal(list.length, 1); deepEqual(Array.from(list), [1]); }); - it('push 2', () => { + it("push 2", () => { list.push(2); equal(list.length, 2); deepEqual(Array.from(list), [1, 2]); }); - it('unshift 0', () => { + it("unshift 0", () => { list.unshift(0); equal(list.length, 3); deepEqual(Array.from(list), [0, 1, 2]); }); - it('remove middle node', () => { + it("remove middle node", () => { list.remove(list.head!.next!); equal(list.length, 2); deepEqual(Array.from(list), [0, 2]); }); - it('remove head', () => { + it("remove head", () => { list.remove(list.head!); equal(list.length, 1); deepEqual(Array.from(list), [2]); }); - it('remove tail', () => { + it("remove tail", () => { list.remove(list.tail!); equal(list.length, 0); deepEqual(Array.from(list), []); }); - it('unshift empty queue', () => { + it("unshift empty queue", () => { list.unshift(0); equal(list.length, 1); deepEqual(Array.from(list), [0]); }); - it('push 1', () => { + it("push 1", () => { list.push(1); equal(list.length, 2); deepEqual(Array.from(list), [0, 1]); }); - it('shift', () => { + it("shift", () => { equal(list.shift(), 0); equal(list.length, 1); deepEqual(Array.from(list), [1]); }); - it('shift last element', () => { + it("shift last element", () => { equal(list.shift(), 1); equal(list.length, 0); deepEqual(Array.from(list), []); }); + + it("provide forEach for nodes", () => { + list.reset(); + list.push(1); + list.push(2); + list.push(3); + let count = 0; + for(const _ of list.nodes()) { + count++; + } + equal(count, 3); + for(const _ of list.nodes()) { + count++; + } + equal(count, 6); + }); }); -describe('SinglyLinkedList', () => { +describe("SinglyLinkedList", () => { const list = new SinglyLinkedList(); - it('should start empty', () => { + it("should start empty", () => { equal(list.length, 0); equal(list.head, undefined); equal(list.tail, undefined); deepEqual(Array.from(list), []); }); - it('shift empty', () => { + it("shift empty", () => { equal(list.shift(), undefined); equal(list.length, 0); deepEqual(Array.from(list), []); }); - it('push 1', () => { + it("push 1", () => { list.push(1); equal(list.length, 1); deepEqual(Array.from(list), [1]); }); - it('push 2', () => { + it("push 2", () => { list.push(2); equal(list.length, 2); deepEqual(Array.from(list), [1, 2]); }); - it('push 3', () => { + it("push 3", () => { list.push(3); equal(list.length, 3); deepEqual(Array.from(list), [1, 2, 3]); }); - it('shift 1', () => { + it("shift 1", () => { equal(list.shift(), 1); equal(list.length, 2); deepEqual(Array.from(list), [2, 3]); }); - it('shift 2', () => { + it("shift 2", () => { equal(list.shift(), 2); equal(list.length, 1); deepEqual(Array.from(list), [3]); }); - it('shift 3', () => { + it("shift 3", () => { equal(list.shift(), 3); equal(list.length, 0); deepEqual(Array.from(list), []); }); - it('should be empty', () => { + it("should be empty", () => { equal(list.length, 0); equal(list.head, undefined); equal(list.tail, undefined); }); }); + +describe("EmptyAwareSinglyLinkedList", () => { + it("should emit 'empty' event when reset", () => { + const list = new EmptyAwareSinglyLinkedList(); + let count = 0; + list.events.on("empty", () => count++); + list.push(1); + list.reset(); + equal(count, 1); + list.reset(); + equal(count, 1); + }); + + it("should emit 'empty' event when shift makes the list empty", () => { + const list = new EmptyAwareSinglyLinkedList(); + let count = 0; + list.events.on("empty", () => count++); + list.push(1); + list.push(2); + list.shift(); + equal(count, 0); + list.shift(); + equal(count, 1); + list.shift(); + equal(count, 1); + }); + + it("should emit 'empty' event when remove makes the list empty", () => { + const list = new EmptyAwareSinglyLinkedList(); + let count = 0; + list.events.on("empty", () => count++); + const node1 = list.push(1); + const node2 = list.push(2); + list.remove(node1, undefined); + equal(count, 0); + list.remove(node2, undefined); + equal(count, 1); + }); +}); diff --git a/packages/client/lib/client/linked-list.ts b/packages/client/lib/client/linked-list.ts index 29678f027b..461f1d4082 100644 --- a/packages/client/lib/client/linked-list.ts +++ b/packages/client/lib/client/linked-list.ts @@ -1,3 +1,5 @@ +import EventEmitter from "events"; + export interface DoublyLinkedNode { value: T; previous: DoublyLinkedNode | undefined; @@ -32,7 +34,7 @@ export class DoublyLinkedList { next: undefined, value }; - } + } return this.#tail = this.#tail.next = { previous: this.#tail, @@ -93,7 +95,7 @@ export class DoublyLinkedList { node.previous!.next = node.next; node.previous = undefined; } - + node.next = undefined; } @@ -109,6 +111,14 @@ export class DoublyLinkedList { node = node.next; } } + + *nodes() { + let node = this.#head; + while(node) { + yield node; + node = node.next; + } + } } export interface SinglyLinkedNode { @@ -201,3 +211,30 @@ export class SinglyLinkedList { } } } + +export class EmptyAwareSinglyLinkedList extends SinglyLinkedList { + readonly events = new EventEmitter(); + reset() { + const old = this.length; + super.reset(); + if(old !== this.length && this.length === 0) { + this.events.emit('empty'); + } + } + shift(): T | undefined { + const old = this.length; + const ret = super.shift(); + if(old !== this.length && this.length === 0) { + this.events.emit('empty'); + } + return ret; + } + remove(node: SinglyLinkedNode, parent: SinglyLinkedNode | undefined) { + const old = this.length; + super.remove(node, parent); + if(old !== this.length && this.length === 0) { + this.events.emit('empty'); + } + } + +} diff --git a/packages/client/lib/client/socket.ts b/packages/client/lib/client/socket.ts index 5f0bcc4492..c5569e8654 100644 --- a/packages/client/lib/client/socket.ts +++ b/packages/client/lib/client/socket.ts @@ -1,9 +1,10 @@ import { EventEmitter, once } from 'node:events'; import net from 'node:net'; import tls from 'node:tls'; -import { ConnectionTimeoutError, ClientClosedError, SocketClosedUnexpectedlyError, ReconnectStrategyError, SocketTimeoutError } from '../errors'; +import { ConnectionTimeoutError, ClientClosedError, SocketClosedUnexpectedlyError, ReconnectStrategyError, SocketTimeoutError, SocketTimeoutDuringMaintenanceError } from '../errors'; import { setTimeout } from 'node:timers/promises'; import { RedisArgument } from '../RESP/types'; +import { dbgMaintenance } from './enterprise-maintenance-manager'; type NetOptions = { tls?: false; @@ -60,6 +61,8 @@ export default class RedisSocket extends EventEmitter { readonly #socketFactory; readonly #socketTimeout; + #maintenanceTimeout: number | undefined; + #socket?: net.Socket | tls.TLSSocket; #isOpen = false; @@ -238,6 +241,22 @@ export default class RedisSocket extends EventEmitter { } while (this.#isOpen && !this.#isReady); } + setMaintenanceTimeout(ms?: number) { + dbgMaintenance(`Set socket timeout to ${ms}`); + if (this.#maintenanceTimeout === ms) { + dbgMaintenance(`Socket already set maintenanceCommandTimeout to ${ms}, skipping`); + return; + }; + + this.#maintenanceTimeout = ms; + + if(ms !== undefined) { + this.#socket?.setTimeout(ms); + } else { + this.#socket?.setTimeout(this.#socketTimeout ?? 0); + } + } + async #createSocket(): Promise { const socket = this.#socketFactory.create(); @@ -260,7 +279,10 @@ export default class RedisSocket extends EventEmitter { if (this.#socketTimeout) { socket.once('timeout', () => { - socket.destroy(new SocketTimeoutError(this.#socketTimeout!)); + const error = this.#maintenanceTimeout + ? new SocketTimeoutDuringMaintenanceError(this.#maintenanceTimeout) + : new SocketTimeoutError(this.#socketTimeout!) + socket.destroy(error); }); socket.setTimeout(this.#socketTimeout); } diff --git a/packages/client/lib/errors.ts b/packages/client/lib/errors.ts index 5cb9166df0..4d9ddf7f2b 100644 --- a/packages/client/lib/errors.ts +++ b/packages/client/lib/errors.ts @@ -71,6 +71,18 @@ export class BlobError extends ErrorReply {} export class TimeoutError extends Error {} +export class SocketTimeoutDuringMaintenanceError extends TimeoutError { + constructor(timeout: number) { + super(`Socket timeout during maintenance. Expecting data, but didn't receive any in ${timeout}ms.`); + } +} + +export class CommandTimeoutDuringMaintenanceError extends TimeoutError { + constructor(timeout: number) { + super(`Command timeout during maintenance. Waited to write command for more than ${timeout}ms.`); + } +} + export class MultiErrorReply extends ErrorReply { replies: Array; errorIndexes: Array; diff --git a/packages/client/lib/tests/test-scenario/configuration.e2e.ts b/packages/client/lib/tests/test-scenario/configuration.e2e.ts new file mode 100644 index 0000000000..a648375f6e --- /dev/null +++ b/packages/client/lib/tests/test-scenario/configuration.e2e.ts @@ -0,0 +1,201 @@ +import assert from "node:assert"; +import diagnostics_channel from "node:diagnostics_channel"; +import { DiagnosticsEvent } from "../../client/enterprise-maintenance-manager"; + +import { + RedisConnectionConfig, + createTestClient, + getDatabaseConfig, + getDatabaseConfigFromEnv, + getEnvConfig, +} from "./test-scenario.util"; +import { createClient } from "../../.."; +import { FaultInjectorClient } from "./fault-injector-client"; +import { MovingEndpointType } from "../../../dist/lib/client/enterprise-maintenance-manager"; +import { RedisTcpSocketOptions } from "../../client/socket"; + +describe("Client Configuration and Handshake", () => { + let clientConfig: RedisConnectionConfig; + let client: ReturnType>; + let faultInjectorClient: FaultInjectorClient; + let log: DiagnosticsEvent[] = []; + + before(() => { + const envConfig = getEnvConfig(); + const redisConfig = getDatabaseConfigFromEnv( + envConfig.redisEndpointsConfigPath, + ); + + faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl); + clientConfig = getDatabaseConfig(redisConfig); + + diagnostics_channel.subscribe("redis.maintenance", (event) => { + log.push(event as DiagnosticsEvent); + }); + }); + + beforeEach(() => { + log.length = 0; + }); + + afterEach(async () => { + if (client && client.isOpen) { + await client.flushAll(); + client.destroy(); + } + }); + + describe("Parameter Configuration", () => { + const endpoints: MovingEndpointType[] = [ + "auto", + // "internal-ip", + // "internal-fqdn", + "external-ip", + "external-fqdn", + "none", + ]; + + for (const endpointType of endpoints) { + it(`clientHandshakeWithEndpointType '${endpointType}'`, async () => { + try { + client = await createTestClient(clientConfig, { + maintMovingEndpointType: endpointType, + }); + client.on("error", () => {}); + + //need to copy those because they will be mutated later + const oldOptions = JSON.parse(JSON.stringify(client.options)); + assert.ok(oldOptions); + + const { action_id } = await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(action_id); + + const movingEvent = log.find((event) => event.type === "MOVING"); + assert(!!movingEvent, "Didnt receive moving PN"); + + let endpoint: string | undefined; + try { + //@ts-ignore + endpoint = movingEvent.data.push[3]; + } catch (err) { + assert( + false, + `couldnt get endpoint from event ${JSON.stringify(movingEvent)}`, + ); + } + + assert(endpoint !== undefined, "no endpoint"); + + const newOptions = client.options; + assert.ok(newOptions); + + if (oldOptions?.url) { + if (endpointType === "none") { + assert.equal( + newOptions!.url, + oldOptions.url, + "For movingEndpointTpe 'none', we expect old and new url to be the same", + ); + } else { + assert.equal( + newOptions.url, + endpoint, + "Expected what came through the wire to be set in the new client", + ); + assert.notEqual( + newOptions!.url, + oldOptions.url, + `For movingEndpointTpe ${endpointType}, we expect old and new url to be different`, + ); + } + } else { + const oldSocket = oldOptions.socket as RedisTcpSocketOptions; + const newSocket = newOptions.socket as RedisTcpSocketOptions; + assert.ok(oldSocket); + assert.ok(newSocket); + + if (endpointType === "none") { + assert.equal( + newSocket.host, + oldSocket.host, + "For movingEndpointTpe 'none', we expect old and new host to be the same", + ); + } else { + assert.equal( + newSocket.host + ":" + newSocket.port, + endpoint, + "Expected what came through the wire to be set in the new client", + ); + assert.notEqual( + newSocket.host, + oldSocket.host, + `For movingEndpointTpe ${endpointType}, we expect old and new host to be different`, + ); + } + } + } catch (error: any) { + if ( + endpointType === "internal-fqdn" || + endpointType === "internal-ip" + ) { + // errors are expected here, because we cannot connect to internal endpoints unless we are deployed in the same place as the server + } else { + assert(false, error); + } + } + }); + } + }); + + describe("Feature Enablement", () => { + it("connectionHandshakeIncludesEnablingNotifications", async () => { + client = await createTestClient(clientConfig, { + maintPushNotifications: "enabled", + }); + + const { action_id } = await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(action_id); + + let movingEvent = false; + let migratingEvent = false; + let migratedEvent = false; + for (const event of log) { + if (event.type === "MOVING") movingEvent = true; + if (event.type === "MIGRATING") migratingEvent = true; + if (event.type === "MIGRATED") migratedEvent = true; + } + assert.ok(movingEvent, "didnt receive MOVING PN"); + assert.ok(migratingEvent, "didnt receive MIGRATING PN"); + assert.ok(migratedEvent, "didnt receive MIGRATED PN"); + }); + + it("disabledDontReceiveNotifications", async () => { + try { + client = await createTestClient(clientConfig, { + maintPushNotifications: "disabled", + socket: { + reconnectStrategy: false + } + }); + client.on('error', console.log.bind(console)) + + const { action_id } = await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(action_id); + + assert.equal(log.length, 0, "received a PN while feature is disabled"); + } catch (error: any) { } + }); + }); +}); diff --git a/packages/client/lib/tests/test-scenario/connection-handoff.e2e.ts b/packages/client/lib/tests/test-scenario/connection-handoff.e2e.ts new file mode 100644 index 0000000000..3fbf5e38d4 --- /dev/null +++ b/packages/client/lib/tests/test-scenario/connection-handoff.e2e.ts @@ -0,0 +1,176 @@ +import { FaultInjectorClient } from "./fault-injector-client"; +import { + createTestClient, + getDatabaseConfig, + getDatabaseConfigFromEnv, + getEnvConfig, + RedisConnectionConfig, +} from "./test-scenario.util"; +import { createClient, RedisClientOptions } from "../../.."; +import { before } from "mocha"; +import Sinon, { SinonSpy, spy, stub } from "sinon"; +import assert from "node:assert"; + +/** + * Creates a spy on a duplicated client method + * @param client - The Redis client instance + * @param funcName - The name of the method to spy on + * @returns Object containing the promise that resolves with the spy and restore function + */ +const spyOnTemporaryClientInstanceMethod = ( + client: ReturnType>, + methodName: string +) => { + const { promise, resolve } = ( + Promise as typeof Promise & { + withResolvers: () => { + promise: Promise<{ spy: SinonSpy; restore: () => void }>; + resolve: (value: any) => void; + }; + } + ).withResolvers(); + + const originalDuplicate = client.duplicate.bind(client); + + const duplicateStub: Sinon.SinonStub = stub( + // Temporary clients (in the context of hitless upgrade) + // are created by calling the duplicate method on the client. + Object.getPrototypeOf(client), + "duplicate" + ).callsFake((opts) => { + const tmpClient = originalDuplicate(opts); + resolve({ + spy: spy(tmpClient, methodName), + restore: duplicateStub.restore, + }); + + return tmpClient; + }); + + return { + getSpy: () => promise, + }; +}; + +describe("Connection Handoff", () => { + let clientConfig: RedisConnectionConfig; + let client: ReturnType>; + let faultInjectorClient: FaultInjectorClient; + + before(() => { + const envConfig = getEnvConfig(); + const redisConfig = getDatabaseConfigFromEnv( + envConfig.redisEndpointsConfigPath + ); + + faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl); + clientConfig = getDatabaseConfig(redisConfig); + }); + + afterEach(async () => { + if (client && client.isOpen) { + await client.flushAll(); + client.destroy(); + } + }); + + describe("New Connection Establishment & Traffic Resumption", () => { + const cases: Array<{ + name: string; + clientOptions: Partial; + }> = [ + { + name: "default options", + clientOptions: {}, + }, + { + name: "external-ip", + clientOptions: { + maintMovingEndpointType: "external-ip", + }, + }, + { + name: "external-fqdn", + clientOptions: { + maintMovingEndpointType: "external-fqdn", + }, + }, + { + name: "auto", + clientOptions: { + maintMovingEndpointType: "auto", + }, + }, + { + name: "none", + clientOptions: { + maintMovingEndpointType: "none", + }, + }, + ]; + + for (const { name, clientOptions } of cases) { + it(`should establish new connection and resume traffic afterwards - ${name}`, async () => { + client = await createTestClient(clientConfig, clientOptions); + + const spyObject = spyOnTemporaryClientInstanceMethod(client, "connect"); + + // PART 1 Establish initial connection + const { action_id: lowTimeoutBindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction( + lowTimeoutBindAndMigrateActionId + ); + + const spyResult = await spyObject.getSpy(); + + assert.strictEqual(spyResult.spy.callCount, 1); + + // PART 2 Verify traffic resumption + const currentTime = Date.now().toString(); + await client.set("key", currentTime); + const result = await client.get("key"); + + assert.strictEqual(result, currentTime); + + spyResult.restore(); + }); + } + }); + + describe("TLS Connection Handoff", () => { + it.skip("TODO receiveMessagesWithTLSEnabledTest", async () => { + // + }); + it.skip("TODO connectionHandoffWithStaticInternalNameTest", async () => { + // + }); + it.skip("TODO connectionHandoffWithStaticExternalNameTest", async () => { + // + }); + }); + + describe("Connection Cleanup", () => { + it("should shut down old connection", async () => { + const spyObject = spyOnTemporaryClientInstanceMethod(client, "destroy"); + + const { action_id: lowTimeoutBindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(lowTimeoutBindAndMigrateActionId); + + const spyResult = await spyObject.getSpy(); + + assert.equal(spyResult.spy.callCount, 1); + + spyResult.restore(); + }); + }); +}); diff --git a/packages/client/lib/tests/test-scenario/fault-injector-client.ts b/packages/client/lib/tests/test-scenario/fault-injector-client.ts new file mode 100644 index 0000000000..13c81412b1 --- /dev/null +++ b/packages/client/lib/tests/test-scenario/fault-injector-client.ts @@ -0,0 +1,176 @@ +import { setTimeout } from "node:timers/promises"; + +export type ActionType = + | "dmc_restart" + | "failover" + | "reshard" + | "sequence_of_actions" + | "network_failure" + | "execute_rlutil_command" + | "execute_rladmin_command" + | "migrate" + | "bind" + | "update_cluster_config"; + +export interface ActionRequest { + type: ActionType; + parameters?: { + bdb_id?: string; + [key: string]: unknown; + }; +} + +export interface ActionStatus { + status: string; + error: unknown; + output: string; +} + +export class FaultInjectorClient { + private baseUrl: string; + #fetch: typeof fetch; + + constructor(baseUrl: string, fetchImpl: typeof fetch = fetch) { + this.baseUrl = baseUrl.replace(/\/+$/, ""); // trim trailing slash + this.#fetch = fetchImpl; + } + + /** + * Lists all available actions. + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + public listActions(): Promise { + return this.#request("GET", "/action"); + } + + /** + * Triggers a specific action. + * @param action The action request to trigger + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + public triggerAction( + action: ActionRequest + ): Promise { + return this.#request("POST", "/action", action); + } + + /** + * Gets the status of a specific action. + * @param actionId The ID of the action to check + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + public getActionStatus(actionId: string): Promise { + return this.#request("GET", `/action/${actionId}`); + } + + /** + * Waits for an action to complete. + * @param actionId The ID of the action to wait for + * @param options Optional timeout and max wait time + * @throws {Error} When the action does not complete within the max wait time + */ + public async waitForAction( + actionId: string, + { + timeoutMs, + maxWaitTimeMs, + }: { + timeoutMs?: number; + maxWaitTimeMs?: number; + } = {} + ): Promise { + const timeout = timeoutMs || 1000; + const maxWaitTime = maxWaitTimeMs || 60000; + + const startTime = Date.now(); + + while (Date.now() - startTime < maxWaitTime) { + const action = await this.getActionStatus(actionId); + + if (["finished", "failed", "success"].includes(action.status)) { + return action; + } + + await setTimeout(timeout); + } + + throw new Error(`Timeout waiting for action ${actionId}`); + } + + async migrateAndBindAction({ + bdbId, + clusterIndex, + }: { + bdbId: string | number; + clusterIndex: string | number; + }) { + const bdbIdStr = bdbId.toString(); + const clusterIndexStr = clusterIndex.toString(); + + return this.triggerAction<{ + action_id: string; + }>({ + type: "sequence_of_actions", + parameters: { + bdbId: bdbIdStr, + actions: [ + { + type: "migrate", + params: { + cluster_index: clusterIndexStr, + }, + }, + { + type: "bind", + params: { + cluster_index: clusterIndexStr, + bdb_id: bdbIdStr, + }, + }, + ], + }, + }); + } + + async #request( + method: string, + path: string, + body?: Object | string + ): Promise { + const url = `${this.baseUrl}${path}`; + const headers: Record = { + "Content-Type": "application/json", + }; + + let payload: string | undefined; + + if (body) { + if (typeof body === "string") { + headers["Content-Type"] = "text/plain"; + payload = body; + } else { + headers["Content-Type"] = "application/json"; + payload = JSON.stringify(body); + } + } + + const response = await this.#fetch(url, { method, headers, body: payload }); + + if (!response.ok) { + try { + const text = await response.text(); + throw new Error(`HTTP ${response.status} - ${text}`); + } catch { + throw new Error(`HTTP ${response.status}`); + } + } + + try { + return (await response.json()) as T; + } catch { + throw new Error( + `HTTP ${response.status} - Unable to parse response as JSON` + ); + } + } +} diff --git a/packages/client/lib/tests/test-scenario/negative-tests.e2e.ts b/packages/client/lib/tests/test-scenario/negative-tests.e2e.ts new file mode 100644 index 0000000000..9e90b80c50 --- /dev/null +++ b/packages/client/lib/tests/test-scenario/negative-tests.e2e.ts @@ -0,0 +1,15 @@ +import assert from "assert"; +import { createClient } from "../../.."; + +describe("Negative tests", () => { + it("should only be enabled with RESP3", () => { + assert.throws( + () => + createClient({ + RESP: 2, + maintPushNotifications: "enabled", + }), + "Error: Graceful Maintenance is only supported with RESP3", + ); + }); +}); diff --git a/packages/client/lib/tests/test-scenario/push-notification.e2e.ts b/packages/client/lib/tests/test-scenario/push-notification.e2e.ts new file mode 100644 index 0000000000..9962d0a02d --- /dev/null +++ b/packages/client/lib/tests/test-scenario/push-notification.e2e.ts @@ -0,0 +1,349 @@ +import assert from "node:assert"; +import diagnostics_channel from "node:diagnostics_channel"; +import { FaultInjectorClient } from "./fault-injector-client"; +import { + createTestClient, + getDatabaseConfig, + getDatabaseConfigFromEnv, + getEnvConfig, + RedisConnectionConfig, +} from "./test-scenario.util"; +import { createClient } from "../../.."; +import { DiagnosticsEvent } from "../../client/enterprise-maintenance-manager"; +import { before } from "mocha"; + +describe("Push Notifications", () => { + const createNotificationMessageHandler = ( + result: Record, + notifications: Array + ) => { + return (message: unknown) => { + if (notifications.includes((message as DiagnosticsEvent).type)) { + const event = message as DiagnosticsEvent; + result[event.type] = (result[event.type] ?? 0) + 1; + } + }; + }; + + let onMessageHandler: ReturnType; + let clientConfig: RedisConnectionConfig; + let client: ReturnType>; + let faultInjectorClient: FaultInjectorClient; + + before(() => { + const envConfig = getEnvConfig(); + const redisConfig = getDatabaseConfigFromEnv( + envConfig.redisEndpointsConfigPath + ); + + faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl); + clientConfig = getDatabaseConfig(redisConfig); + }); + + afterEach(() => { + if (onMessageHandler!) { + diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler); + } + + if (client && client.isOpen) { + client.destroy(); + } + }); + + describe("Push Notifications Enabled", () => { + beforeEach(async () => { + client = await createTestClient(clientConfig); + + await client.flushAll(); + }); + + it("should receive MOVING, MIGRATING, and MIGRATED push notifications", async () => { + const notifications: Array = [ + "MOVING", + "MIGRATING", + "MIGRATED", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: bindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(bindAndMigrateActionId); + + assert.strictEqual( + diagnosticsMap.MOVING, + 1, + "Should have received exactly one MOVING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATING, + 1, + "Should have received exactly one MIGRATING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATED, + 1, + "Should have received exactly one MIGRATED notification" + ); + }); + + it("should receive FAILING_OVER and FAILED_OVER push notifications", async () => { + const notifications: Array = [ + "FAILING_OVER", + "FAILED_OVER", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(failoverActionId); + + assert.strictEqual( + diagnosticsMap.FAILING_OVER, + 1, + "Should have received exactly one FAILING_OVER notification" + ); + assert.strictEqual( + diagnosticsMap.FAILED_OVER, + 1, + "Should have received exactly one FAILED_OVER notification" + ); + }); + }); + + describe("Push Notifications Disabled - Client", () => { + beforeEach(async () => { + client = await createTestClient(clientConfig, { + maintPushNotifications: "disabled", + }); + + client.on("error", (_err) => { + // Expect the socket to be closed + // Ignore errors + }); + + await client.flushAll(); + }); + + it("should NOT receive MOVING, MIGRATING, and MIGRATED push notifications", async () => { + const notifications: Array = [ + "MOVING", + "MIGRATING", + "MIGRATED", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: bindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(bindAndMigrateActionId); + + assert.strictEqual( + diagnosticsMap.MOVING, + undefined, + "Should NOT have received exactly one MOVING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATING, + undefined, + "Should NOT have received exactly one MIGRATING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATED, + undefined, + "Should NOT have received exactly one MIGRATED notification" + ); + }); + + it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => { + const notifications: Array = [ + "FAILING_OVER", + "FAILED_OVER", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(failoverActionId); + + assert.strictEqual( + diagnosticsMap.FAILING_OVER, + undefined, + "Should have received exactly one FAILING_OVER notification" + ); + assert.strictEqual( + diagnosticsMap.FAILED_OVER, + undefined, + "Should have received exactly one FAILED_OVER notification" + ); + }); + }); + + describe("Push Notifications Disabled - Server", () => { + beforeEach(async () => { + client = await createTestClient(clientConfig); + + client.on("error", (_err) => { + // Expect the socket to be closed + // Ignore errors + }); + + await client.flushAll(); + }); + + before(async () => { + const { action_id: disablePushNotificationsActionId } = + await faultInjectorClient.triggerAction({ + type: "update_cluster_config", + parameters: { + config: { client_maint_notifications: false }, + }, + }); + + await faultInjectorClient.waitForAction(disablePushNotificationsActionId); + }); + + after(async () => { + const { action_id: enablePushNotificationsActionId } = + await faultInjectorClient.triggerAction({ + type: "update_cluster_config", + parameters: { + config: { client_maint_notifications: true }, + }, + }); + + await faultInjectorClient.waitForAction(enablePushNotificationsActionId); + }); + + it("should NOT receive MOVING, MIGRATING, and MIGRATED push notifications", async () => { + const notifications: Array = [ + "MOVING", + "MIGRATING", + "MIGRATED", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: bindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(bindAndMigrateActionId); + + assert.strictEqual( + diagnosticsMap.MOVING, + undefined, + "Should NOT have received exactly one MOVING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATING, + undefined, + "Should NOT have received exactly one MIGRATING notification" + ); + assert.strictEqual( + diagnosticsMap.MIGRATED, + undefined, + "Should NOT have received exactly one MIGRATED notification" + ); + }); + + it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => { + const notifications: Array = [ + "FAILING_OVER", + "FAILED_OVER", + ]; + + const diagnosticsMap: Record = {}; + + onMessageHandler = createNotificationMessageHandler( + diagnosticsMap, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(failoverActionId); + + assert.strictEqual( + diagnosticsMap.FAILING_OVER, + undefined, + "Should have received exactly one FAILING_OVER notification" + ); + assert.strictEqual( + diagnosticsMap.FAILED_OVER, + undefined, + "Should have received exactly one FAILED_OVER notification" + ); + }); + }); +}); diff --git a/packages/client/lib/tests/test-scenario/test-scenario.util.ts b/packages/client/lib/tests/test-scenario/test-scenario.util.ts new file mode 100644 index 0000000000..c98ba90fe1 --- /dev/null +++ b/packages/client/lib/tests/test-scenario/test-scenario.util.ts @@ -0,0 +1,174 @@ +import { readFileSync } from "fs"; +import { createClient, RedisClientOptions } from "../../.."; +import { stub } from "sinon"; + +type DatabaseEndpoint = { + addr: string[]; + addr_type: string; + dns_name: string; + oss_cluster_api_preferred_endpoint_type: string; + oss_cluster_api_preferred_ip_type: string; + port: number; + proxy_policy: string; + uid: string; +}; + +type DatabaseConfig = { + bdb_id: number; + username: string; + password: string; + tls: boolean; + raw_endpoints: DatabaseEndpoint[]; + endpoints: string[]; +}; + +type DatabasesConfig = { + [databaseName: string]: DatabaseConfig; +}; + +type EnvConfig = { + redisEndpointsConfigPath: string; + faultInjectorUrl: string; +}; + +/** + * Reads environment variables required for the test scenario + * @returns Environment configuration object + * @throws Error if required environment variables are not set + */ +export function getEnvConfig(): EnvConfig { + if (!process.env.REDIS_ENDPOINTS_CONFIG_PATH) { + throw new Error( + "REDIS_ENDPOINTS_CONFIG_PATH environment variable must be set" + ); + } + + if (!process.env.FAULT_INJECTION_API_URL) { + throw new Error("FAULT_INJECTION_API_URL environment variable must be set"); + } + + return { + redisEndpointsConfigPath: process.env.REDIS_ENDPOINTS_CONFIG_PATH, + faultInjectorUrl: process.env.FAULT_INJECTION_API_URL, + }; +} + +/** + * Reads database configuration from a file + * @param filePath - The path to the database configuration file + * @returns Parsed database configuration object + * @throws Error if file doesn't exist or JSON is invalid + */ +export function getDatabaseConfigFromEnv(filePath: string): DatabasesConfig { + try { + const fileContent = readFileSync(filePath, "utf8"); + return JSON.parse(fileContent) as DatabasesConfig; + } catch (error) { + throw new Error(`Failed to read or parse database config from ${filePath}`); + } +} + +export interface RedisConnectionConfig { + host: string; + port: number; + username: string; + password: string; + tls: boolean; + bdbId: number; +} + +/** + * Gets Redis connection parameters for a specific database + * @param databasesConfig - The parsed database configuration object + * @param databaseName - Optional name of the database to retrieve (defaults to the first one) + * @returns Redis connection configuration with host, port, username, password, and tls + * @throws Error if the specified database is not found in the configuration + */ +export function getDatabaseConfig( + databasesConfig: DatabasesConfig, + databaseName?: string +): RedisConnectionConfig { + const dbConfig = databaseName + ? databasesConfig[databaseName] + : Object.values(databasesConfig)[0]; + + if (!dbConfig) { + throw new Error( + `Database ${databaseName ? databaseName : ""} not found in configuration` + ); + } + + const endpoint = dbConfig.raw_endpoints[0]; // Use the first endpoint + + return { + host: endpoint.dns_name, + port: endpoint.port, + username: dbConfig.username, + password: dbConfig.password, + tls: dbConfig.tls, + bdbId: dbConfig.bdb_id, + }; +} + +/** + * Executes the provided function in a context where setImmediate is stubbed to not do anything. + * This blocks setImmediate callbacks from executing + * + * @param command - The command to execute + * @returns The error and duration of the command execution + */ +export async function blockCommand(command: () => Promise) { + let error: any; + + const start = performance.now(); + + let setImmediateStub: any; + + try { + setImmediateStub = stub(global, "setImmediate"); + setImmediateStub.callsFake(() => { + //Dont call the callback, effectively blocking execution + }); + await command(); + } catch (err: any) { + error = err; + } finally { + if (setImmediateStub) { + setImmediateStub.restore(); + } + } + + return { + error, + duration: performance.now() - start, + }; +} + +/** + * Creates a test client with the provided configuration, connects it and attaches an error handler listener + * @param clientConfig - The Redis connection configuration + * @param options - Optional client options + * @returns The created Redis client + */ +export async function createTestClient( + clientConfig: RedisConnectionConfig, + options: Partial = {} +) { + const client = createClient({ + socket: { + host: clientConfig.host, + port: clientConfig.port, + ...(clientConfig.tls === true ? { tls: true } : {}), + }, + password: clientConfig.password, + username: clientConfig.username, + RESP: 3, + maintPushNotifications: "auto", + maintMovingEndpointType: "auto", + ...options, + }); + + await client.connect(); + + return client; +} diff --git a/packages/client/lib/tests/test-scenario/timeout-during-notifications.e2e.ts b/packages/client/lib/tests/test-scenario/timeout-during-notifications.e2e.ts new file mode 100644 index 0000000000..a60aacb703 --- /dev/null +++ b/packages/client/lib/tests/test-scenario/timeout-during-notifications.e2e.ts @@ -0,0 +1,290 @@ +import assert from "node:assert"; + +import { FaultInjectorClient } from "./fault-injector-client"; +import { + getDatabaseConfig, + getDatabaseConfigFromEnv, + getEnvConfig, + RedisConnectionConfig, + blockCommand, + createTestClient, +} from "./test-scenario.util"; +import { createClient } from "../../.."; +import { before } from "mocha"; +import diagnostics_channel from "node:diagnostics_channel"; +import { DiagnosticsEvent } from "../../client/enterprise-maintenance-manager"; + +describe("Timeout Handling During Notifications", () => { + let clientConfig: RedisConnectionConfig; + let faultInjectorClient: FaultInjectorClient; + let client: ReturnType>; + + const NORMAL_COMMAND_TIMEOUT = 50; + const RELAXED_COMMAND_TIMEOUT = 2000; + + /** + * Creates a handler for the `redis.maintenance` channel that will execute and block a command on the client + * when a notification is received and save the result in the `result` object. + * This is used to test that the command timeout is relaxed during notifications. + */ + const createNotificationMessageHandler = ( + client: ReturnType>, + result: Record, + notifications: Array + ) => { + return (message: unknown) => { + if (notifications.includes((message as DiagnosticsEvent).type)) { + setImmediate(async () => { + result[(message as DiagnosticsEvent).type] = await blockCommand( + async () => { + await client.set("key", "value"); + } + ); + }); + } + }; + }; + + before(() => { + const envConfig = getEnvConfig(); + const redisConfig = getDatabaseConfigFromEnv( + envConfig.redisEndpointsConfigPath + ); + + clientConfig = getDatabaseConfig(redisConfig); + faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl); + }); + + beforeEach(async () => { + client = await createTestClient(clientConfig, { + commandOptions: { timeout: NORMAL_COMMAND_TIMEOUT }, + maintRelaxedCommandTimeout: RELAXED_COMMAND_TIMEOUT, + }); + + await client.flushAll(); + }); + + afterEach(() => { + if (client && client.isOpen) { + client.destroy(); + } + }); + + it("should relax command timeout on MOVING, MIGRATING", async () => { + // PART 1 + // Normal command timeout + const { error, duration } = await blockCommand(async () => { + await client.set("key", "value"); + }); + + assert.ok( + error instanceof Error, + "Command Timeout error should be instanceof Error" + ); + assert.ok( + duration > NORMAL_COMMAND_TIMEOUT && + duration < NORMAL_COMMAND_TIMEOUT * 1.1, + `Normal command should timeout within normal timeout ms` + ); + assert.strictEqual( + error?.constructor?.name, + "TimeoutError", + "Command Timeout error should be TimeoutError" + ); + + // PART 2 + // Command timeout during maintenance + const notifications: Array = [ + "MOVING", + "MIGRATING", + ]; + + const result: Record< + DiagnosticsEvent["type"], + { error: any; duration: number } + > = {}; + + const onMessageHandler = createNotificationMessageHandler( + client, + result, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: bindAndMigrateActionId } = + await faultInjectorClient.migrateAndBindAction({ + bdbId: clientConfig.bdbId, + clusterIndex: 0, + }); + + await faultInjectorClient.waitForAction(bindAndMigrateActionId); + + diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler); + + notifications.forEach((notification) => { + assert.ok( + result[notification]?.error instanceof Error, + `${notification} notification error should be instanceof Error` + ); + assert.ok( + result[notification]?.duration > RELAXED_COMMAND_TIMEOUT && + result[notification]?.duration < RELAXED_COMMAND_TIMEOUT * 1.1, + `${notification} notification should timeout within relaxed timeout` + ); + assert.strictEqual( + result[notification]?.error?.constructor?.name, + "CommandTimeoutDuringMaintenanceError", + `${notification} notification error should be CommandTimeoutDuringMaintenanceError` + ); + }); + }); + + it("should unrelax command timeout after MIGRATED and MOVING", async () => { + const { action_id: migrateActionId } = + await faultInjectorClient.triggerAction({ + type: "migrate", + parameters: { + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(migrateActionId); + + // PART 1 + // After migration + const { error: errorMigrate, duration: durationMigrate } = + await blockCommand(async () => { + await client.set("key", "value"); + }); + + assert.ok( + errorMigrate instanceof Error, + "Command Timeout error should be instanceof Error" + ); + assert.ok( + durationMigrate > NORMAL_COMMAND_TIMEOUT && + durationMigrate < NORMAL_COMMAND_TIMEOUT * 1.1, + `Normal command should timeout within normal timeout ms` + ); + assert.strictEqual( + errorMigrate?.constructor?.name, + "TimeoutError", + "Command Timeout error should be TimeoutError" + ); + + const { action_id: bindActionId } = await faultInjectorClient.triggerAction( + { + type: "bind", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + } + ); + + await faultInjectorClient.waitForAction(bindActionId); + + // PART 2 + // After bind + const { error: errorBind, duration: durationBind } = await blockCommand( + async () => { + await client.set("key", "value"); + } + ); + + assert.ok( + errorBind instanceof Error, + "Command Timeout error should be instanceof Error" + ); + assert.ok( + durationBind > NORMAL_COMMAND_TIMEOUT && + durationBind < NORMAL_COMMAND_TIMEOUT * 1.1, + `Normal command should timeout within normal timeout ms` + ); + assert.strictEqual( + errorBind?.constructor?.name, + "TimeoutError", + "Command Timeout error should be TimeoutError" + ); + }); + + it("should relax command timeout on FAILING_OVER", async () => { + const notifications: Array = ["FAILING_OVER"]; + + const result: Record< + DiagnosticsEvent["type"], + { error: any; duration: number } + > = {}; + + const onMessageHandler = createNotificationMessageHandler( + client, + result, + notifications + ); + + diagnostics_channel.subscribe("redis.maintenance", onMessageHandler); + + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(failoverActionId); + + diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler); + + notifications.forEach((notification) => { + assert.ok( + result[notification]?.error instanceof Error, + `${notification} notification error should be instanceof Error` + ); + assert.ok( + result[notification]?.duration > RELAXED_COMMAND_TIMEOUT && + result[notification]?.duration < RELAXED_COMMAND_TIMEOUT * 1.1, + `${notification} notification should timeout within relaxed timeout` + ); + assert.strictEqual( + result[notification]?.error?.constructor?.name, + "CommandTimeoutDuringMaintenanceError", + `${notification} notification error should be CommandTimeoutDuringMaintenanceError` + ); + }); + }); + + it("should unrelax command timeout after FAILED_OVER", async () => { + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + await faultInjectorClient.waitForAction(failoverActionId); + + const { error, duration } = await blockCommand(async () => { + await client.set("key", "value"); + }); + + assert.ok( + error instanceof Error, + "Command Timeout error should be instanceof Error" + ); + assert.ok( + duration > NORMAL_COMMAND_TIMEOUT && + duration < NORMAL_COMMAND_TIMEOUT * 1.1, + `Normal command should timeout within normal timeout ms` + ); + assert.strictEqual( + error?.constructor?.name, + "TimeoutError", + "Command Timeout error should be TimeoutError" + ); + }); +}); diff --git a/packages/client/package.json b/packages/client/package.json index 1332083bf1..2b8ad41610 100644 --- a/packages/client/package.json +++ b/packages/client/package.json @@ -1,6 +1,6 @@ { "name": "@redis/client", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/index.js", "types": "./dist/index.d.ts", diff --git a/packages/client/tsconfig.json b/packages/client/tsconfig.json index b1f7b44d91..f87c7d4f53 100644 --- a/packages/client/tsconfig.json +++ b/packages/client/tsconfig.json @@ -11,7 +11,8 @@ "exclude": [ "./lib/test-utils.ts", "./lib/**/*.spec.ts", - "./lib/sentinel/test-util.ts" + "./lib/sentinel/test-util.ts", + "./lib/tests/**/*.ts" ], "typedocOptions": { "entryPoints": [ diff --git a/packages/entraid/package.json b/packages/entraid/package.json index 9991fa3fb8..b02fdc7ea5 100644 --- a/packages/entraid/package.json +++ b/packages/entraid/package.json @@ -1,6 +1,6 @@ { "name": "@redis/entraid", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/index.js", "types": "./dist/index.d.ts", @@ -22,7 +22,7 @@ "@azure/msal-node": "^2.16.1" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" }, "devDependencies": { "@types/express": "^4.17.21", diff --git a/packages/json/package.json b/packages/json/package.json index ff689dd17e..552d67ac39 100644 --- a/packages/json/package.json +++ b/packages/json/package.json @@ -1,6 +1,6 @@ { "name": "@redis/json", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/lib/index.js", "types": "./dist/lib/index.d.ts", @@ -13,7 +13,7 @@ "release": "release-it" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" }, "devDependencies": { "@redis/test-utils": "*" diff --git a/packages/redis/package.json b/packages/redis/package.json index 583a660681..06b76cc5cb 100644 --- a/packages/redis/package.json +++ b/packages/redis/package.json @@ -1,7 +1,7 @@ { "name": "redis", "description": "A modern, high performance Redis client", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/index.js", "types": "./dist/index.d.ts", @@ -13,11 +13,11 @@ "release": "release-it" }, "dependencies": { - "@redis/bloom": "5.8.2", - "@redis/client": "5.8.2", - "@redis/json": "5.8.2", - "@redis/search": "5.8.2", - "@redis/time-series": "5.8.2" + "@redis/bloom": "5.9.0-beta.0", + "@redis/client": "5.9.0-beta.0", + "@redis/json": "5.9.0-beta.0", + "@redis/search": "5.9.0-beta.0", + "@redis/time-series": "5.9.0-beta.0" }, "engines": { "node": ">= 18" diff --git a/packages/search/package.json b/packages/search/package.json index 40238080e8..0ca44b2273 100644 --- a/packages/search/package.json +++ b/packages/search/package.json @@ -1,6 +1,6 @@ { "name": "@redis/search", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/lib/index.js", "types": "./dist/lib/index.d.ts", @@ -14,7 +14,7 @@ "release": "release-it" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" }, "devDependencies": { "@redis/test-utils": "*" diff --git a/packages/time-series/package.json b/packages/time-series/package.json index 46ea5b16fe..81e170b467 100644 --- a/packages/time-series/package.json +++ b/packages/time-series/package.json @@ -1,6 +1,6 @@ { "name": "@redis/time-series", - "version": "5.8.2", + "version": "5.9.0-beta.0", "license": "MIT", "main": "./dist/lib/index.js", "types": "./dist/lib/index.d.ts", @@ -13,7 +13,7 @@ "release": "release-it" }, "peerDependencies": { - "@redis/client": "^5.8.2" + "@redis/client": "^5.9.0-beta.0" }, "devDependencies": { "@redis/test-utils": "*"