diff --git a/test/scenario/sharded-pub-sub.test.ts b/test/scenario/sharded-pub-sub.test.ts new file mode 100644 index 00000000..b9db49aa --- /dev/null +++ b/test/scenario/sharded-pub-sub.test.ts @@ -0,0 +1,408 @@ +import type { TestConfig } from "./utils/test.util"; +import { + createClusterTestClient, + getConfig, + wait, + waitClientReady, +} from "./utils/test.util"; + +import { FaultInjectorClient } from "./utils/fault-injector"; +import { TestCommandRunner } from "./utils/command-runner"; +import { CHANNELS, CHANNELS_BY_SLOT } from "./utils/test.util"; +import { MessageTracker } from "./utils/message-tracker"; +import { Cluster } from "../../lib"; +import { assert } from "chai"; + +describe("Sharded Pub/Sub E2E", () => { + let faultInjectorClient: FaultInjectorClient; + let config: TestConfig; + + before(() => { + config = getConfig(); + + faultInjectorClient = new FaultInjectorClient(config.faultInjectorUrl); + }); + + describe("Single Subscriber", () => { + let subscriber: Cluster; + let publisher: Cluster; + let messageTracker: MessageTracker; + + beforeEach(async () => { + messageTracker = new MessageTracker(CHANNELS); + subscriber = createClusterTestClient(config.clientConfig, { + shardedSubscribers: true, + }); + publisher = createClusterTestClient(config.clientConfig, { + shardedSubscribers: true, + }); + await Promise.all([ + waitClientReady(subscriber), + waitClientReady(publisher), + ]); + }); + + afterEach(async () => { + await Promise.all([subscriber.quit(), publisher.quit()]); + }); + + it("should receive messages published to multiple channels", async () => { + for (const channel of CHANNELS) { + await subscriber.ssubscribe(channel); + } + + subscriber.on("smessage", (channelName, _) => { + messageTracker.incrementReceived(channelName); + }); + + const { controller, result } = + TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker + ); + + // Wait for 10 seconds, while publishing messages + await wait(10_000); + controller.abort(); + await result; + + for (const channel of CHANNELS) { + assert.strictEqual( + messageTracker.getChannelStats(channel)?.received, + messageTracker.getChannelStats(channel)?.sent + ); + } + }); + + it("should resume publishing and receiving after failover", async () => { + for (const channel of CHANNELS) { + await subscriber.ssubscribe(channel); + } + + subscriber.on("smessage", (channelName, _) => { + messageTracker.incrementReceived(channelName); + }); + + // Trigger failover twice + for (let i = 0; i < 2; i++) { + // Start publishing messages + const { controller: publishAbort, result: publishResult } = + TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker + ); + + // Trigger failover during publishing + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: config.clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + // Wait for failover to complete + await faultInjectorClient.waitForAction(failoverActionId); + + publishAbort.abort(); + await publishResult; + + for (const channel of CHANNELS) { + const sent = messageTracker.getChannelStats(channel)!.sent; + const received = messageTracker.getChannelStats(channel)!.received; + + assert.ok( + received <= sent, + `Channel ${channel}: received (${received}) should be <= sent (${sent})` + ); + } + + // Wait for 2 seconds before resuming publishing + await wait(2_000); + + messageTracker.reset(); + + const { + controller: afterFailoverController, + result: afterFailoverResult, + } = TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker + ); + + await wait(10_000); + afterFailoverController.abort(); + await afterFailoverResult; + + for (const channel of CHANNELS) { + const sent = messageTracker.getChannelStats(channel)!.sent; + const received = messageTracker.getChannelStats(channel)!.received; + assert.ok(sent > 0, `Channel ${channel} should have sent messages`); + assert.ok( + received > 0, + `Channel ${channel} should have received messages` + ); + assert.strictEqual( + messageTracker.getChannelStats(channel)!.received, + messageTracker.getChannelStats(channel)!.sent, + `Channel ${channel} received (${received}) should equal sent (${sent}) once resumed after failover` + ); + } + } + }); + + it("should NOT receive messages after sunsubscribe", async () => { + for (const channel of CHANNELS) { + await subscriber.ssubscribe(channel); + } + + subscriber.on("smessage", (channelName, _) => { + messageTracker.incrementReceived(channelName); + }); + + const { controller, result } = + TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker + ); + + // Wait for 5 seconds, while publishing messages + await wait(5_000); + controller.abort(); + await result; + + for (const channel of CHANNELS) { + assert.strictEqual( + messageTracker.getChannelStats(channel)?.received, + messageTracker.getChannelStats(channel)?.sent + ); + } + + // Reset message tracker + messageTracker.reset(); + + const unsubscribeChannels = [ + CHANNELS_BY_SLOT["1000"], + CHANNELS_BY_SLOT["8000"], + CHANNELS_BY_SLOT["16000"], + ]; + + for (const channel of unsubscribeChannels) { + await subscriber.sunsubscribe(channel); + } + + const { + controller: afterUnsubscribeController, + result: afterUnsubscribeResult, + } = TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker + ); + + // Wait for 5 seconds, while publishing messages + await wait(5_000); + afterUnsubscribeController.abort(); + await afterUnsubscribeResult; + + for (const channel of unsubscribeChannels) { + assert.strictEqual( + messageTracker.getChannelStats(channel)?.received, + 0, + `Channel ${channel} should not have received messages after unsubscribe` + ); + } + + // All other channels should have received messages + const stillSubscribedChannels = CHANNELS.filter( + (channel) => !unsubscribeChannels.includes(channel as any) + ); + + for (const channel of stillSubscribedChannels) { + assert.ok( + messageTracker.getChannelStats(channel)!.received > 0, + `Channel ${channel} should have received messages` + ); + } + }); + }); + + describe("Multiple Subscribers", () => { + let subscriber1: Cluster; + let subscriber2: Cluster; + + let publisher: Cluster; + + let messageTracker1: MessageTracker; + let messageTracker2: MessageTracker; + + beforeEach(async () => { + messageTracker1 = new MessageTracker(CHANNELS); + messageTracker2 = new MessageTracker(CHANNELS); + subscriber1 = createClusterTestClient(config.clientConfig, { + shardedSubscribers: true, + }); + subscriber2 = createClusterTestClient(config.clientConfig, { + shardedSubscribers: true, + }); + publisher = createClusterTestClient(config.clientConfig, { + shardedSubscribers: true, + }); + await Promise.all([ + waitClientReady(subscriber1), + waitClientReady(subscriber2), + waitClientReady(publisher), + ]); + }); + + afterEach(async () => { + await Promise.all([ + subscriber1.quit(), + subscriber2.quit(), + publisher.quit(), + ]); + }); + + it("should receive messages published to multiple channels", async () => { + for (const channel of CHANNELS) { + await subscriber1.ssubscribe(channel); + await subscriber2.ssubscribe(channel); + } + + subscriber1.on("smessage", (channelName, _) => { + messageTracker1.incrementReceived(channelName); + }); + + subscriber2.on("smessage", (channelName, _) => { + messageTracker2.incrementReceived(channelName); + }); + + const { controller, result } = + TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker1 // Use messageTracker1 for all publishing + ); + + // Wait for 10 seconds, while publishing messages + await wait(10_000); + controller.abort(); + await result; + + for (const channel of CHANNELS) { + assert.strictEqual( + messageTracker1.getChannelStats(channel)?.received, + messageTracker1.getChannelStats(channel)?.sent + ); + assert.strictEqual( + messageTracker2.getChannelStats(channel)?.received, + messageTracker1.getChannelStats(channel)?.sent + ); + } + }); + + it("should resume publishing and receiving after failover", async () => { + for (const channel of CHANNELS) { + await subscriber1.ssubscribe(channel); + await subscriber2.ssubscribe(channel); + } + + subscriber1.on("smessage", (channelName, _) => { + messageTracker1.incrementReceived(channelName); + }); + + subscriber2.on("smessage", (channelName, _) => { + messageTracker2.incrementReceived(channelName); + }); + + // Start publishing messages + const { controller: publishAbort, result: publishResult } = + TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker1 // Use messageTracker1 for all publishing + ); + + // Trigger failover during publishing + const { action_id: failoverActionId } = + await faultInjectorClient.triggerAction({ + type: "failover", + parameters: { + bdb_id: config.clientConfig.bdbId.toString(), + cluster_index: 0, + }, + }); + + // Wait for failover to complete + await faultInjectorClient.waitForAction(failoverActionId); + + publishAbort.abort(); + await publishResult; + + for (const channel of CHANNELS) { + const sent = messageTracker1.getChannelStats(channel)!.sent; + const received1 = messageTracker1.getChannelStats(channel)!.received; + + const received2 = messageTracker2.getChannelStats(channel)!.received; + + assert.ok( + received1 <= sent, + `Channel ${channel}: received (${received1}) should be <= sent (${sent})` + ); + assert.ok( + received2 <= sent, + `Channel ${channel}: received2 (${received2}) should be <= sent (${sent})` + ); + } + + // Wait for 2 seconds before resuming publishing + await wait(2_000); + + messageTracker1.reset(); + messageTracker2.reset(); + + const { + controller: afterFailoverController, + result: afterFailoverResult, + } = TestCommandRunner.publishMessagesUntilAbortSignal( + publisher, + CHANNELS, + messageTracker1 + ); + + await wait(10_000); + afterFailoverController.abort(); + await afterFailoverResult; + + for (const channel of CHANNELS) { + const sent = messageTracker1.getChannelStats(channel)!.sent; + const received1 = messageTracker1.getChannelStats(channel)!.received; + const received2 = messageTracker2.getChannelStats(channel)!.received; + assert.ok(sent > 0, `Channel ${channel} should have sent messages`); + assert.ok( + received1 > 0, + `Channel ${channel} should have received messages by subscriber 1` + ); + assert.ok( + received2 > 0, + `Channel ${channel} should have received messages by subscriber 2` + ); + assert.strictEqual( + received1, + sent, + `Channel ${channel} received (${received1}) should equal sent (${sent}) once resumed after failover by subscriber 1` + ); + assert.strictEqual( + received2, + sent, + `Channel ${channel} received (${received2}) should equal sent (${sent}) once resumed after failover by subscriber 2` + ); + } + }); + }); +}); diff --git a/test/scenario/utils/command-runner.ts b/test/scenario/utils/command-runner.ts new file mode 100644 index 00000000..2d4a3889 --- /dev/null +++ b/test/scenario/utils/command-runner.ts @@ -0,0 +1,91 @@ +import type { Cluster } from "../../../lib"; + +import type { MessageTracker } from "./message-tracker"; +import { wait } from "./test.util"; + +/** + * Options for the `publishMessagesUntilAbortSignal` method + */ +interface PublishMessagesUntilAbortSignalOptions { + /** + * Number of messages to publish in each batch + */ + batchSize: number; + /** + * Timeout between batches in milliseconds + */ + timeoutMs: number; + /** + * Function that generates the message content to be published + */ + createMessage: () => string; +} + +/** + * Utility class for running test commands until a stop signal is received + */ +export class TestCommandRunner { + private static readonly defaultPublishOptions: PublishMessagesUntilAbortSignalOptions = + { + batchSize: 10, + timeoutMs: 10, + createMessage: () => Date.now().toString(), + }; + + /** + * Continuously publishes messages to the given Redis channels until aborted. + * + * @param {Redis|Cluster} client - Redis client or cluster instance used to publish messages. + * @param {string[]} channels - List of channel names to publish messages to. + * @param {MessageTracker} messageTracker - Tracks sent and failed message counts per channel. + * @param {Partial} [options] - Optional overrides for batch size, timeout, and message factory. + * @param {AbortController} [externalAbortController] - Optional external abort controller to control publishing lifecycle. + * @returns {{ controller: AbortController, result: Promise }} + * An object containing the abort controller and a promise that resolves when publishing stops. + */ + static publishMessagesUntilAbortSignal( + client: Cluster, + channels: string[], + messageTracker: MessageTracker, + options?: Partial, + externalAbortController?: AbortController + ) { + const publishOptions = { + ...TestCommandRunner.defaultPublishOptions, + ...options, + }; + + const abortController = externalAbortController ?? new AbortController(); + + const result = async () => { + while (!abortController.signal.aborted) { + const batchPromises: Promise[] = []; + + for (let i = 0; i < publishOptions.batchSize; i++) { + for (const channel of channels) { + const message = publishOptions.createMessage(); + + const publishPromise = client + .spublish(channel, message) + .then(() => { + messageTracker.incrementSent(channel); + }) + .catch(() => { + messageTracker.incrementFailed(channel); + }); + + batchPromises.push(publishPromise); + } + } + + await Promise.all(batchPromises); + await wait(publishOptions.timeoutMs); + } + }; + + return { + controller: abortController, + result: result(), + }; + } +} diff --git a/test/scenario/utils/fault-injector.ts b/test/scenario/utils/fault-injector.ts new file mode 100644 index 00000000..c4f5c1f5 --- /dev/null +++ b/test/scenario/utils/fault-injector.ts @@ -0,0 +1,145 @@ +import { wait } from "./test.util"; + +export type ActionType = + | "dmc_restart" + | "failover" + | "reshard" + | "sequence_of_actions" + | "network_failure" + | "execute_rlutil_command" + | "execute_rladmin_command" + | "migrate" + | "bind" + | "update_cluster_config"; + +export interface ActionRequest { + type: ActionType; + parameters?: { + bdb_id?: string; + [key: string]: unknown; + }; +} + +export interface ActionStatus { + status: string; + error: unknown; + output: string; +} + +export class FaultInjectorClient { + readonly baseUrl: string; + readonly fetch: typeof fetch; + + constructor(baseUrl: string, fetchImpl: typeof fetch = fetch) { + this.baseUrl = baseUrl.replace(/\/+$/, ""); // trim trailing slash + this.fetch = fetchImpl; + } + + /** + * Lists all available actions. + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + listActions(): Promise { + return this.request("GET", "/action"); + } + + /** + * Triggers a specific action. + * @param action The action request to trigger + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + triggerAction( + action: ActionRequest + ): Promise { + return this.request("POST", "/action", action); + } + + /** + * Gets the status of a specific action. + * @param actionId The ID of the action to check + * @throws {Error} When the HTTP request fails or response cannot be parsed as JSON + */ + getActionStatus(actionId: string): Promise { + return this.request("GET", `/action/${actionId}`); + } + + /** + * Waits for an action to complete. + * @param actionId The ID of the action to wait for + * @param options Optional timeout and max wait time + * @throws {Error} When the action does not complete within the max wait time + */ + async waitForAction( + actionId: string, + { + timeoutMs, + maxWaitTimeMs, + }: { + timeoutMs?: number; + maxWaitTimeMs?: number; + } = {} + ): Promise { + const timeout = timeoutMs || 1000; + const maxWaitTime = maxWaitTimeMs || 60000; + + const startTime = Date.now(); + + while (Date.now() - startTime < maxWaitTime) { + const action = await this.getActionStatus(actionId); + + if (action.status === "failed") { + throw new Error( + `Action id: ${actionId} failed! Error: ${action.error}` + ); + } else if (["finished", "success"].includes(action.status)) { + return action; + } + + await wait(timeout); + } + + throw new Error(`Timeout waiting for action ${actionId}`); + } + + async request( + method: string, + path: string, + body?: object | string + ): Promise { + const url = `${this.baseUrl}${path}`; + const headers: Record = { + "Content-Type": "application/json", + }; + + let payload: string | undefined; + + if (body) { + if (typeof body === "string") { + headers["Content-Type"] = "text/plain"; + payload = body; + } else { + headers["Content-Type"] = "application/json"; + payload = JSON.stringify(body); + } + } + + const response = await this.fetch(url, { method, headers, body: payload }); + + if (!response.ok) { + try { + const text = await response.text(); + throw new Error(`HTTP ${response.status} - ${text}`); + } catch { + throw new Error(`HTTP ${response.status}`); + } + } + + try { + return (await response.json()) as T; + } catch { + throw new Error( + `HTTP ${response.status} - Unable to parse response as JSON` + ); + } + } +} diff --git a/test/scenario/utils/message-tracker.ts b/test/scenario/utils/message-tracker.ts new file mode 100644 index 00000000..6393356c --- /dev/null +++ b/test/scenario/utils/message-tracker.ts @@ -0,0 +1,52 @@ +export interface MessageStats { + sent: number; + received: number; + failed: number; +} + +export class MessageTracker { + private stats: Record = {}; + + constructor(channels: string[]) { + this.initializeChannels(channels); + } + + private initializeChannels(channels: string[]): void { + this.stats = channels.reduce((acc, channel) => { + acc[channel] = { sent: 0, received: 0, failed: 0 }; + return acc; + }, {} as Record); + } + + reset(): void { + Object.keys(this.stats).forEach((channel) => { + this.stats[channel] = { sent: 0, received: 0, failed: 0 }; + }); + } + + incrementSent(channel: string): void { + if (this.stats[channel]) { + this.stats[channel].sent++; + } + } + + incrementReceived(channel: string): void { + if (this.stats[channel]) { + this.stats[channel].received++; + } + } + + incrementFailed(channel: string): void { + if (this.stats[channel]) { + this.stats[channel].failed++; + } + } + + getChannelStats(channel: string): MessageStats | undefined { + return this.stats[channel]; + } + + getAllStats(): Record { + return this.stats; + } +} diff --git a/test/scenario/utils/test.util.ts b/test/scenario/utils/test.util.ts new file mode 100644 index 00000000..7227a271 --- /dev/null +++ b/test/scenario/utils/test.util.ts @@ -0,0 +1,251 @@ +import { readFileSync } from "fs"; +import { Cluster, ClusterOptions } from "../../../lib"; + +interface DatabaseEndpoint { + addr: string[]; + addr_type: string; + dns_name: string; + oss_cluster_api_preferred_endpoint_type: string; + oss_cluster_api_preferred_ip_type: string; + port: number; + proxy_policy: string; + uid: string; +} + +interface DatabaseConfig { + bdb_id: number; + username: string; + password: string; + tls: boolean; + raw_endpoints: DatabaseEndpoint[]; + endpoints: string[]; +} + +type DatabasesConfig = Record; + +interface EnvConfig { + redisEndpointsConfigPath: string; + faultInjectorUrl: string; +} + +export interface RedisConnectionConfig { + host: string; + port: number; + username: string; + password: string; + tls: boolean; + bdbId: number; +} + +export interface TestConfig { + clientConfig: RedisConnectionConfig; + faultInjectorUrl: string; +} + +/** + * Reads environment variables required for the test scenario + * @returns Environment configuration object + * @throws Error if required environment variables are not set + */ +const getEnvConfig = (): EnvConfig => { + if (!process.env["REDIS_ENDPOINTS_CONFIG_PATH"]) { + throw new Error( + "REDIS_ENDPOINTS_CONFIG_PATH environment variable must be set" + ); + } + + if (!process.env["RE_FAULT_INJECTOR_URL"]) { + throw new Error("RE_FAULT_INJECTOR_URL environment variable must be set"); + } + + return { + redisEndpointsConfigPath: process.env["REDIS_ENDPOINTS_CONFIG_PATH"], + faultInjectorUrl: process.env["RE_FAULT_INJECTOR_URL"], + }; +}; + +/** + * Reads database configuration from a file + * @param filePath - The path to the database configuration file + * @returns Parsed database configuration object + * @throws Error if file doesn't exist or JSON is invalid + */ +const getDatabaseConfigFromEnv = (filePath: string): DatabasesConfig => { + try { + const fileContent = readFileSync(filePath, "utf8"); + return JSON.parse(fileContent) as DatabasesConfig; + } catch (_error) { + throw new Error(`Failed to read or parse database config from ${filePath}`); + } +}; + +/** + * Gets Redis connection parameters for a specific database + * @param databasesConfig - The parsed database configuration object + * @param databaseName - Optional name of the database to retrieve (defaults to the first one) + * @returns Redis connection configuration with host, port, username, password, and tls + * @throws Error if the specified database is not found in the configuration + */ +const getDatabaseConfig = ( + databasesConfig: DatabasesConfig, + databaseName?: string +): RedisConnectionConfig => { + const dbConfig = databaseName + ? databasesConfig[databaseName] + : Object.values(databasesConfig)[0]; + + if (!dbConfig) { + throw new Error( + `Database ${databaseName || ""} not found in configuration` + ); + } + + const endpoint = dbConfig.raw_endpoints[0]; // Use the first endpoint + + if (!endpoint) { + throw new Error(`No endpoints found for database ${databaseName}`); + } + + return { + host: endpoint.dns_name, + port: endpoint.port, + username: dbConfig.username, + password: dbConfig.password, + tls: dbConfig.tls, + bdbId: dbConfig.bdb_id, + }; +}; + +/** + * Gets Redis connection parameters for a specific database + * @returns Redis client config and fault injector URL + * @throws Error if required environment variables are not set or if database config is invalid + */ +export const getConfig = (): TestConfig => { + const envConfig = getEnvConfig(); + const redisConfig = getDatabaseConfigFromEnv( + envConfig.redisEndpointsConfigPath + ); + + return { + clientConfig: getDatabaseConfig(redisConfig), + faultInjectorUrl: envConfig.faultInjectorUrl, + }; +}; + +/** + * Creates a test cluster client with the provided configuration, connects it and attaches an error handler listener + * @param clientConfig - The Redis connection configuration + * @param options - Optional cluster options + * @returns The created Redis Cluster client + */ +export const createClusterTestClient = ( + clientConfig: RedisConnectionConfig, + options: Partial = {} +) => { + return new Cluster( + [ + { + host: clientConfig.host, + port: clientConfig.port, + }, + ], + { + redisOptions: { + ...options.redisOptions, + ...(clientConfig.password && { password: clientConfig.password }), + ...(clientConfig.username && { username: clientConfig.username }), + }, + ...options, + } + ); +}; + +/** + * Waits for a Redis or Cluster client to reach the `"ready"` state. + * + * @param client - An `ioredis` `Redis` or `Cluster` instance. + * @param timeoutMs - Timeout in ms (default: 5000). + * @returns Promise that resolves when the client is ready. + * @throws {Error} If the client errors or does not become ready before the timeout. + */ +export const waitClientReady = async (client: Cluster, timeoutMs = 5_000) => { + if (client["status"] === "ready") { + return; + } + + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + client.off("ready", onReady); + client.off("error", onError); + reject( + new Error( + `Client ready timeout after ${timeoutMs}ms. Current status: ${client["status"]}` + ) + ); + }, timeoutMs); + + const onReady = () => { + clearTimeout(timeout); + client.off("error", onError); + client + .ping() + .then(() => resolve()) + .catch(reject); + }; + + const onError = (error: Error) => { + clearTimeout(timeout); + client.off("ready", onReady); + reject(error); + }; + + client.once("ready", onReady); + client.once("error", onError); + }); +}; + +export const wait = (ms: number) => { + return new Promise((resolve) => setTimeout(resolve, ms)); +}; + +/** + * A list of example Redis Cluster channel keys covering all slot ranges. + */ +export const CHANNELS = [ + "channel:11kv:1000", + "channel:osy:2000", + "channel:jn6:3000", + "channel:l00:4000", + "channel:4ez:5000", + "channel:4ek:6000", + "channel:9vn:7000", + "channel:dw1:8000", + "channel:9zi:9000", + "channel:4vl:10000", + "channel:utl:11000", + "channel:lyo:12000", + "channel:jzn:13000", + "channel:14uc:14000", + "channel:mz:15000", + "channel:d0v:16000", +]; + +export const CHANNELS_BY_SLOT = { + 1000: "channel:11kv:1000", + 2000: "channel:osy:2000", + 3000: "channel:jn6:3000", + 4000: "channel:l00:4000", + 5000: "channel:4ez:5000", + 6000: "channel:4ek:6000", + 7000: "channel:9vn:7000", + 8000: "channel:dw1:8000", + 9000: "channel:9zi:9000", + 10000: "channel:4vl:10000", + 11000: "channel:utl:11000", + 12000: "channel:lyo:12000", + 13000: "channel:jzn:13000", + 14000: "channel:14uc:14000", + 15000: "channel:mz:15000", + 16000: "channel:d0v:16000", +} as const;