From 6055de8734c45ae38d5688d3111afe881e8301f9 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 7 Aug 2023 11:05:24 -0500 Subject: [PATCH 01/36] v10 wip --- src/attorney.js | 22 +++++++++++++------- src/index.js | 4 +++- src/plans.js | 55 ++++++++++++++++++++----------------------------- 3 files changed, 40 insertions(+), 41 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index 2a6fae4d..26c71c50 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -22,6 +22,10 @@ const WARNINGS = { CRON_DISABLED: { message: 'Archive interval is set less than 60s. Cron processing is disabled.', code: 'pg-boss-w03' + }, + ON_COMPLETE_REMOVED: { + message: '\'onComplete\' option detected. This option has been removed. Use deadLetter if needed.', + code: 'pg-boss-w04' } } @@ -255,6 +259,8 @@ function applyExpirationConfig (config, defaults) { emitWarning(WARNINGS.EXPIRE_IN_REMOVED) } + const MAX_EXPIRATION_HOURS = 24 + assert(!('expireInSeconds' in config) || config.expireInSeconds >= 1, 'configuration assert: expireInSeconds must be at least every second') @@ -265,21 +271,23 @@ function applyExpirationConfig (config, defaults) { 'configuration assert: expireInHours must be at least every hour') const expireIn = ('expireInHours' in config) - ? `${config.expireInHours} hours` + ? config.expireInHours * 60 * 60 : ('expireInMinutes' in config) - ? `${config.expireInMinutes} minutes` + ? config.expireInMinutes * 60 : ('expireInSeconds' in config) - ? `${config.expireInSeconds} seconds` - : defaults + ? config.expireInSeconds + : defaults && defaults.expireIn ? defaults.expireIn - : '15 minutes' + : 15 * 60 + + assert(expireIn / 60 / 60 < MAX_EXPIRATION_HOURS, `configuration assert: expiration cannot exceed ${MAX_EXPIRATION_HOURS} hours`) config.expireIn = expireIn } function applyRetryConfig (config, defaults) { assert(!('retryDelay' in config) || (Number.isInteger(config.retryDelay) && config.retryDelay >= 0), 'retryDelay must be an integer >= 0') - assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 0), 'retryLimit must be an integer >= 0') + assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 1), 'retryLimit must be an integer >= 1') assert(!('retryBackoff' in config) || (config.retryBackoff === true || config.retryBackoff === false), 'retryBackoff must be either true or false') if (defaults) { @@ -289,7 +297,7 @@ function applyRetryConfig (config, defaults) { } config.retryDelay = config.retryDelay || 0 - config.retryLimit = config.retryLimit || 0 + config.retryLimit = config.retryLimit || 2 config.retryBackoff = !!config.retryBackoff config.retryDelay = (config.retryBackoff && !config.retryDelay) ? 1 : config.retryDelay config.retryLimit = (config.retryDelay && !config.retryLimit) ? 1 : config.retryLimit diff --git a/src/index.js b/src/index.js index 775bc345..806e4fb8 100644 --- a/src/index.js +++ b/src/index.js @@ -98,7 +98,9 @@ class PgBoss extends EventEmitter { await this.db.open() } - await this.contractor.start() + if (!this.config.noContractor) { + await this.contractor.start() + } this.stopped = false this.started = true diff --git a/src/plans.js b/src/plans.js index 73dc0014..24f927f3 100644 --- a/src/plans.js +++ b/src/plans.js @@ -82,6 +82,7 @@ function create (schema, version) { createVersionTable(schema), createJobStateEnum(schema), createJobTable(schema), + addPrimaryKeyToArchive(schema), cloneJobTableForArchive(schema), createScheduleTable(schema), createSubscriptionTable(schema), @@ -152,8 +153,8 @@ function createJobTable (schema) { createdOn timestamp with time zone not null default now(), completedOn timestamp with time zone, keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', - on_complete boolean not null default false, - output jsonb + output jsonb, + dead_letter text ) ` } @@ -162,6 +163,10 @@ function cloneJobTableForArchive (schema) { return `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)` } +function addPrimaryKeyToArchive (schema) { + return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY ON (id)` +} + function addArchivedOnToArchive (schema) { return `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()` } @@ -434,15 +439,6 @@ function completeJobs (schema) { WHERE id IN (SELECT UNNEST($1::uuid[])) AND state = '${states.active}' RETURNING * - ), completion_jobs as ( - INSERT INTO ${schema}.job (name, data, keepUntil) - SELECT - '${COMPLETION_JOB_PREFIX}' || name, - ${buildJsonCompletionObject(true)}, - ${keepUntilInheritance} - FROM results - WHERE NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete ) SELECT COUNT(*) FROM results ` @@ -466,13 +462,13 @@ function failJobs (schema) { ), completion_jobs as ( INSERT INTO ${schema}.job (name, data, keepUntil) SELECT - '${COMPLETION_JOB_PREFIX}' || name, + dead_letter ${buildJsonCompletionObject(true)}, ${keepUntilInheritance} FROM results WHERE state = '${states.failed}' - AND NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete + AND NOT name = dead_letter + AND dead_letter IS NOT NULL ) SELECT COUNT(*) FROM results ` @@ -484,7 +480,7 @@ function expire (schema) { UPDATE ${schema}.job SET state = CASE WHEN retryCount < retryLimit THEN '${states.retry}'::${schema}.job_state - ELSE '${states.expired}'::${schema}.job_state + ELSE '${states.failed}'::${schema}.job_state END, completedOn = ${retryCompletedOnCase}, startAfter = ${retryStartAfterCase} @@ -500,7 +496,6 @@ function expire (schema) { FROM results WHERE state = '${states.expired}' AND NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete ` } @@ -547,7 +542,7 @@ function insertJob (schema) { retryDelay, retryBackoff, keepUntil, - on_complete + dead_letter ) SELECT id, @@ -563,7 +558,7 @@ function insertJob (schema) { retryDelay, retryBackoff, keepUntil, - on_complete + dead_letter FROM ( SELECT *, CASE @@ -594,7 +589,7 @@ function insertJob (schema) { $11::int as retryDelay, $12::bool as retryBackoff, $13::text as keepUntilValue, - $14::boolean as on_complete + $14::text as dead_letter ) j1 ) j2 ) j3 @@ -617,7 +612,7 @@ function insertJobs (schema) { retryBackoff, singletonKey, keepUntil, - on_complete + dead_letter ) SELECT COALESCE(id, gen_random_uuid()) as id, @@ -631,7 +626,7 @@ function insertJobs (schema) { COALESCE("retryBackoff", false) as retryBackoff, "singletonKey", COALESCE("keepUntil", now() + interval '14 days') as keepUntil, - COALESCE("onComplete", false) as onComplete + "deadLetter" FROM json_to_recordset($1) as x( id uuid, name text, @@ -644,7 +639,7 @@ function insertJobs (schema) { "singletonKey" text, "expireInSeconds" integer, "keepUntil" timestamp with time zone, - "onComplete" boolean + "deadLetter" text ) ON CONFLICT DO NOTHING ` @@ -661,22 +656,16 @@ function archive (schema, completedInterval, failedInterval = completedInterval) return ` WITH archived_rows AS ( DELETE FROM ${schema}.job - WHERE ( - state <> '${states.failed}' AND completedOn < (now() - interval '${completedInterval}') - ) - OR ( - state = '${states.failed}' AND completedOn < (now() - interval '${failedInterval}') - ) - OR ( - state < '${states.active}' AND keepUntil < now() - ) + WHERE (state <> '${states.failed}' AND completedOn < (now() - interval '${completedInterval}')) + OR (state = '${states.failed}' AND completedOn < (now() - interval '${failedInterval}')) + OR (state < '${states.active}' AND keepUntil < now()) RETURNING * ) INSERT INTO ${schema}.archive ( - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, on_complete, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, dead_letter, output ) SELECT - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, on_complete, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, dead_letter, output FROM archived_rows ` } From d4b8ce5683faff7b8b43e387157edadcd1d2825b Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sat, 26 Aug 2023 15:32:36 -0500 Subject: [PATCH 02/36] wip --- docs/readme.md | 92 ++-------------- releasenotesv10.md | 48 +++++++++ src/attorney.js | 24 ++--- src/boss.js | 8 +- src/index.js | 2 +- src/manager.js | 71 +++++------- src/migrationStore.js | 108 +++++++------------ src/plans.js | 185 +++++++++++++------------------- src/timekeeper.js | 6 +- test/cancelTest.js | 15 ++- test/completeTest.js | 244 ++++++------------------------------------ test/expireTest.js | 68 +++++------- test/failureTest.js | 84 +++++---------- test/insertTest.js | 8 +- test/moduleTest.js | 1 - test/workTest.js | 33 +++--- types.d.ts | 26 +---- 17 files changed, 325 insertions(+), 698 deletions(-) create mode 100644 releasenotesv10.md diff --git a/docs/readme.md b/docs/readme.md index b7ceb607..a584237f 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -34,12 +34,9 @@ - [`fetch()`](#fetch) - [`fetch(name)`](#fetchname) - [`fetch(name, batchSize, [, options])`](#fetchname-batchsize--options) - - [`fetchCompleted(name [, batchSize] [, options])`](#fetchcompletedname--batchsize--options) - [`work()`](#work) - [`work(name [, options], handler)`](#workname--options-handler) - - [`onComplete(name [, options], handler)`](#oncompletename--options-handler) - [`offWork(value)`](#offworkvalue) - - [`offComplete(value)`](#offcompletevalue) - [`publish(event, data, options)`](#publishevent-data-options) - [`subscribe(event, name)`](#subscribeevent-name) - [`unsubscribe(event, name)`](#unsubscribeevent-name) @@ -71,18 +68,14 @@ You may use as many instances as needed to connect to the same Postgres database If you require multiple installations in the same database, such as for large volume queues, you may wish to specify a separate schema per install to achieve partitioning. -Architecturally, pg-boss is somewhat similar to queue products such as AWS SQS, which primarily acts as a store of jobs that are "pulled", not "pushed" from the server. If at least one pg-boss instance is running, internal maintenance jobs will be periodically run to make sure fetched jobs that are never completed are marked as expired or retried (if configured). If and when this happens, think of a job with a retry configuration to act just like the SQS message visibility timeout. In regards to job delivery, Postgres [SKIP LOCKED](http://blog.2ndquadrant.com/what-is-select-skip-locked-for-in-postgresql-9-5) will guarantee exactly-once, which is only available in SQS via FIFO queues (and its throughput limitations). However, even if you have exactly-once delivery, this is not a guarantee that a job will never be processed more than once if you opt into retries, so keep the general recommendation for idempotency with queueing systems in mind. +Architecturally, pg-boss is somewhat similar to queue products such as AWS SQS, which primarily acts as a store of jobs that are "pulled", not "pushed" from the server. If at least one pg-boss instance is running, internal maintenance jobs will be periodically run to make sure fetched jobs that are never completed are moved into the retry or failed state (this is somewhat similar to the SQS message visibility timeout). [SKIP LOCKED](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5) guarantees exactly-once delivery, which is only available in SQS via FIFO queues (with the caveat of their throughput limitations). Keep in mind that exactly-once delivery is not a guarantee that a job will never be processed more than once because of retries, so keep the general recommendation for idempotency with queueing systems in mind. ## Job states -All jobs start out in the `created` state and become `active` when picked up for work. If job processing completes successfully, jobs will go to `completed`. If a job fails, it will typcially enter the `failed` state. However, if a job has retry options configured, it will enter the `retry` state on failure instead and have a chance to re-enter `active` state. It's also possible for `active` jobs to become `expired`, which happens when job processing takes too long. Jobs can also enter `cancelled` state via [`cancel(id)`](#cancelid) or [`cancel([ids])`](#cancelids). +All jobs start out in the `created` state and become `active` when picked up for work. If job processing completes successfully, jobs will go to `completed`. If a job fails, it will typcially enter the `failed` state. However, if a job has retry options configured, it will enter the `retry` state on failure instead and have a chance to re-enter `active` state. Jobs can also enter `cancelled` state via [`cancel(id)`](#cancelid) or [`cancel([ids])`](#cancelids). All jobs that are `completed`, `expired`, `cancelled` or `failed` become eligible for archiving (i.e. they will transition into the `archive` state) after the configured `archiveCompletedAfterSeconds` time. Once `archive`d, jobs will be automatically deleted by pg-boss after the configured deletion period. -Here's a state diagram that shows the possible states and their transitions: - -![job state diagram](./images/job-states.png) - # Database install pg-boss can be installed into any database. When started, it will detect if it is installed and automatically create the required schema for all queue operations if needed. If the database doesn't already have the pgcrypto extension installed, you will need to have a superuser add it before pg-boss can create its schema. @@ -189,7 +182,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 40, "active": 26, "completed": 3400, - "expired": 4, "cancelled": 0, "failed": 49, "all": 4049 @@ -199,7 +191,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 0, "active": 0, "completed": 645, - "expired": 0, "cancelled": 0, "failed": 0, "all": 645 @@ -209,7 +200,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 40, "active": 26, "completed": 4045, - "expired": 4, "cancelled": 0, "failed": 4, "all": 4694 @@ -594,12 +584,11 @@ For example, if you set the `singletonMinutes` to 1, then submit 2 jobs within a Setting `singletonNextSlot` to true will cause the job to be scheduled to run after the current time slot if and when a job is throttled. This option is set to true, for example, when calling the convenience function `sendDebounced()`. -**Completion jobs** +**Dead Letter Queues** -* **onComplete**, bool (Default: false) - -When a job completes, a completion job will be created in the queue, copying the same retention policy as the job, for the purpose of `onComplete()` or `fetchCompleted()`. If completion jobs are not used, they will be archived according to the retention policy. If the queue in question has a very high volume, this can be set to `false` to bypass creating the completion job. This can also be set in the constructor as a default for all calls to `send()`. +* **deadLetter**, string +When a job fails after all retries, if a `deadLetter` property exists, the job's payload will be copied into that queue, copying the same retention and retry configuration as the original job. ```js @@ -694,7 +683,7 @@ interface JobInsert { singletonKey?: string; expireInSeconds?: number; keepUntil?: Date | string; - onComplete?: boolean + deadLetter?: string; } ``` @@ -743,14 +732,9 @@ Typically one would use `work()` for automated polling for new jobs based upon a | createdon | string, timestamp | | completedon | string, timestamp | | keepuntil | string, timestamp | - | oncomplete | bool | + | deadletter | string | | output | object | - * `enforceSingletonQueueActiveLimit`, bool - - If `true`, modifies the behavior of the `useSingletonQueue` flag to allow a max of 1 job to be queued plus a max of 1 job to be active. - >Note that use of this option can impact performance on instances with large numbers of jobs. - **Resolves** - `[job]`: array of job objects, `null` if none found @@ -784,9 +768,6 @@ for (let i = 0; i < jobs.length; i++) { } ``` -### `fetchCompleted(name [, batchSize] [, options])` - -Same as `fetch()`, but retrieves any completed jobs. See [`onComplete()`](#oncompletename--options-handler) for more information. ## `work()` Adds a new polling worker for a queue and executes the provided callback function when jobs are found. Multiple workers can be added if needed. @@ -832,7 +813,7 @@ The default concurrency for `work()` is 1 job every 2 seconds. Both the interval **Polling options** -How often workers will poll the queue table for jobs. Available in the constructor as a default or per worker in `work()` and `onComplete()`. +How often workers will poll the queue table for jobs. Available in the constructor as a default or per worker in `work()`. * **newJobCheckInterval**, int @@ -878,57 +859,6 @@ await boss.work('email-welcome', { batchSize: 5 }, ) ``` -### `onComplete(name [, options], handler)` - -Sometimes when a job completes, expires or fails, it's important enough to trigger other things that should react to it. `onComplete` works identically to `work()` and was created to facilitate the creation of orchestrations or sagas between jobs that may or may not know about each other. This common messaging pattern allows you to keep multi-job flow logic out of the individual job handlers so you can manage things in a more centralized fashion while not losing your mind. As you most likely already know, asynchronous jobs are complicated enough already. Internally, these jobs have a special prefix of `__state__completed__`. - -The callback for `onComplete()` returns a job containing the original job and completion details. `request` will be the original job as submitted with `id`, `name` and `data`. `response` may or may not have a value based on arguments in [complete()](#completeid--data) or [fail()](#failid--data). - -Here's an example from the test suite showing this in action. - -```js -const jobName = 'onCompleteFtw' -const requestPayload = { token:'trivial' } -const responsePayload = { message: 'so verbose', code: '1234' } - -boss.onComplete(jobName, job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.token, requestPayload.token) - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.code, responsePayload.code) - - finished() // test suite completion callback -}) - -const jobId = await boss.send(jobName, requestPayload) -const job = await boss.fetch(jobName) -await boss.complete(job.id, responsePayload) -``` - -The following is an example data object from the job retrieved in onComplete() above. - -```js -{ - "request": { - "id": "26a608d0-79bf-11e8-8391-653981c16efd", - "name": "onCompleteFtw", - "data": { - "token": "trivial" - } - }, - "response": { - "message": "so verbose", - "code": "1234" - }, - "failed": false, - "state": "completed", - "createdOn": "2018-06-26T23:04:12.9392-05:00", - "startedOn": "2018-06-26T23:04:12.945533-05:00", - "completedOn": "2018-06-26T23:04:12.949092-05:00", - "retryCount": 0 -} -``` - ## `offWork(value)` Removes a worker by name or id and stops polling. @@ -938,10 +868,6 @@ Removes a worker by name or id and stops polling. If a string, removes all workers found matching the name. If an object, only the worker with a matching `id` will be removed. -### `offComplete(value)` - -Similar to `offWork()`, but removes an `onComplete()` worker. - ## `publish(event, data, options)` Publish an event with optional data and options (Same as `send()` args). Looks up all subscriptions for the event and sends jobs to all those queues. Returns an array of job ids. @@ -1033,7 +959,7 @@ Resumes a set of cancelled jobs. ## `complete(id [, data, options])` -Completes an active job. This would likely only be used with `fetch()`. Accepts an optional `data` argument for usage with [`onComplete()`](#oncompletename--options-handler) state-based workers or `fetchCompleted()`. +Completes an active job. This would likely only be used with `fetch()`. Accepts an optional `data` argument. The promise will resolve on a successful completion, or reject if the job could not be completed. diff --git a/releasenotesv10.md b/releasenotesv10.md new file mode 100644 index 00000000..e9dd1d15 --- /dev/null +++ b/releasenotesv10.md @@ -0,0 +1,48 @@ +1. Replace index semantics for throttling and singleton + +```sql + -- anything with singletonKey means "only 1 job can be queued or active at a time" + -- this doesn't seem very useful, since you lose the ability to queue a job that needs to be run later NUKE + CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) + WHERE state < '${states.completed}' + AND singletonOn IS NULL + AND NOT singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' + + -- "singleton queue" means "only 1 job can be queued at a time" + -- this seems more like what people want when they think "one job at a time" + CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) + WHERE state < '${states.active}' + AND singletonOn IS NULL + AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' + + -- anything with singletonOn means "only 1 job within this time period, queued, active or completed" + -- Keeping completed jobs and preventing queueing a new one until after the maintenance runs? Doesn't seem very helpful + -- this is only for job creation throttling, so we probably need to keep it + CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) + WHERE state < '${states.expired}' + AND singletonKey IS NULL + + -- anything with both singletonOn and singletonKey means "only 1 job within this time period with this key, queued, active or completed" + -- Same as previous, but scoped to a filter key + CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) + WHERE state < '${states.expired}' + +``` + +2. Should we implement message group ids like SQS? This would require a new tracking table for in-flight groups and opt-in filtering + +3. consolidate failed states: expired => failed + +4. Introduce dead letter queue config + * Removes completion jobs and onComplete config + * Allows retries in dlq, since they become just like any other queue + +5. Add primary key to archive + * allows replication of database for read-replica and/or HA use cases + * Existing archive table will be renamed to archive_backup and kept until the next release of pgboss + +6. Allow instances to connect without trying to migrate to latest version (instances that should be able to process jobs, but not have access to schema changes or upgrades) + +7. Add peek API for running TOP N queries against job tables + +8. Add manual maintenance API for one-off upgrade API without processing queues diff --git a/src/attorney.js b/src/attorney.js index 26c71c50..7b605a17 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -7,14 +7,11 @@ module.exports = { checkInsertArgs, checkWorkArgs, checkFetchArgs, + queueNameHasPatternMatch, warnClockSkew } const WARNINGS = { - EXPIRE_IN_REMOVED: { - message: '\'expireIn\' option detected. This option has been removed. Use expireInSeconds, expireInMinutes or expireInHours.', - code: 'pg-boss-w01' - }, CLOCK_SKEW: { message: 'Timekeeper detected clock skew between this instance and the database server. This will not affect scheduling operations, but this warning is shown any time the skew exceeds 60 seconds.', code: 'pg-boss-w02' @@ -24,7 +21,7 @@ const WARNINGS = { code: 'pg-boss-w03' }, ON_COMPLETE_REMOVED: { - message: '\'onComplete\' option detected. This option has been removed. Use deadLetter if needed.', + message: '\'onComplete\' option detected. This option has been removed. Consider deadLetter if needed.', code: 'pg-boss-w04' } } @@ -64,7 +61,6 @@ function checkSendArgs (args, defaults) { applyRetryConfig(options, defaults) applyExpirationConfig(options, defaults) applyRetentionConfig(options, defaults) - applyCompletionConfig(options, defaults) applySingletonKeyConfig(options) const { startAfter, singletonSeconds, singletonMinutes, singletonHours } = options @@ -154,6 +150,10 @@ function sanitizeQueueNameForFetch (name) { return name.replace(/[%_*]/g, match => match === '*' ? '%' : '\\' + match) } +function queueNameHasPatternMatch (name) { + return name.includes('*') +} + function getConfig (value) { assert(value && (typeof value === 'object' || typeof value === 'string'), 'configuration assert: string or config object is required to connect to postgres') @@ -173,7 +173,6 @@ function getConfig (value) { applyNewJobCheckInterval(config) applyExpirationConfig(config) applyRetentionConfig(config) - applyCompletionConfig(config) return config } @@ -215,17 +214,6 @@ function applyArchiveFailedConfig (config) { } } -function applyCompletionConfig (config, defaults) { - assert(!('onComplete' in config) || config.onComplete === true || config.onComplete === false, - 'configuration assert: onComplete must be either true or false') - - if (!('onComplete' in config)) { - config.onComplete = defaults - ? defaults.onComplete - : false - } -} - function applyRetentionConfig (config, defaults) { assert(!('retentionSeconds' in config) || config.retentionSeconds >= 1, 'configuration assert: retentionSeconds must be at least every second') diff --git a/src/boss.js b/src/boss.js index 01a47c91..4d58d2f3 100644 --- a/src/boss.js +++ b/src/boss.js @@ -101,8 +101,7 @@ class Boss extends EventEmitter { options = { startAfter, retentionSeconds: this.maintenanceIntervalSeconds * 4, - singletonKey: queues.MAINTENANCE, - onComplete: false + singletonKey: queues.MAINTENANCE } await this.manager.send(queues.MAINTENANCE, null, options) @@ -114,8 +113,7 @@ class Boss extends EventEmitter { options = { startAfter, retentionSeconds: this.monitorIntervalSeconds * 4, - singletonKey: queues.MONITOR_STATES, - onComplete: false + singletonKey: queues.MONITOR_STATES } await this.manager.send(queues.MONITOR_STATES, null, options) @@ -137,7 +135,7 @@ class Boss extends EventEmitter { await this.setMaintenanceTime() - this.emit('maintenance', { ms: ended - started }) + this.emit(events.maintenance, { ms: ended - started }) if (!this.stopped) { await this.manager.complete(job.id) // pre-complete to bypass throttling diff --git a/src/index.js b/src/index.js index 806e4fb8..ee8d77e4 100644 --- a/src/index.js +++ b/src/index.js @@ -73,7 +73,7 @@ class PgBoss extends EventEmitter { function promoteFunction (obj, func) { this[func.name] = (...args) => { - const shouldRun = !this.started || !((func.name === 'work' || func.name === 'onComplete') && (this.stopped || this.stoppingOn)) + const shouldRun = !this.started || !(func.name === 'work' && (this.stopped || this.stoppingOn)) if (shouldRun) { return func.apply(obj, args) diff --git a/src/manager.js b/src/manager.js index 29727f03..9addcdb4 100644 --- a/src/manager.js +++ b/src/manager.js @@ -15,7 +15,7 @@ const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) const plans = require('./plans') -const { COMPLETION_JOB_PREFIX, SINGLETON_QUEUE_KEY } = plans +const { SINGLETON_QUEUE_KEY } = plans const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } @@ -72,12 +72,9 @@ class Manager extends EventEmitter { this.resume, this.fail, this.fetch, - this.fetchCompleted, this.work, this.offWork, this.notifyWorker, - this.onComplete, - this.offComplete, this.publish, this.subscribe, this.unsubscribe, @@ -117,11 +114,6 @@ class Manager extends EventEmitter { return await this.watch(name, options, callback) } - async onComplete (name, ...args) { - const { options, callback } = Attorney.checkWorkArgs(name, args, this.config) - return await this.watch(COMPLETION_JOB_PREFIX + name, options, callback) - } - addWorker (worker) { this.workers.set(worker.id, worker) } @@ -185,8 +177,7 @@ class Manager extends EventEmitter { teamSize = 1, teamConcurrency = 1, teamRefill: refill = false, - includeMetadata = false, - enforceSingletonQueueActiveLimit = false + includeMetadata = false } = options const id = uuid.v4() @@ -210,7 +201,9 @@ class Manager extends EventEmitter { createTeamRefillPromise() } - const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, enforceSingletonQueueActiveLimit }) + const patternMatch = Attorney.queueNameHasPatternMatch(name) + + const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, patternMatch }) const onFetch = async (jobs) => { if (this.config.__test__throw_worker) { @@ -329,14 +322,6 @@ class Manager extends EventEmitter { return await Promise.all(result.rows.map(({ name }) => this.send(name, ...args))) } - async offComplete (value) { - if (typeof value === 'string') { - value = COMPLETION_JOB_PREFIX + value - } - - return await this.offWork(value) - } - async send (...args) { const { name, data, options } = Attorney.checkSendArgs(args, this.config) return await this.createJob(name, data, options) @@ -405,7 +390,7 @@ class Manager extends EventEmitter { retryBackoff, retryLimit, retryDelay, - onComplete + deadLetter } = options const id = uuid[this.config.uuid]() @@ -424,7 +409,7 @@ class Manager extends EventEmitter { retryDelay, // 11 retryBackoff, // 12 keepUntil, // 13 - onComplete // 14 + deadLetter // 14 ] const db = wrapper || this.db const result = await db.executeSql(this.insertJobCommand, values) @@ -476,25 +461,31 @@ class Manager extends EventEmitter { async fetch (name, batchSize, options = {}) { const values = Attorney.checkFetchArgs(name, batchSize, options) const db = options.db || this.db - const preparedStatement = this.nextJobCommand(options.includeMetadata || false, options.enforceSingletonQueueActiveLimit || false) + const nextJobSql = this.nextJobCommand(options.includeMetadata || false) const statementValues = [values.name, batchSize || 1] let result - if (options.enforceSingletonQueueActiveLimit && !options.db) { - // Prepare/format now and send multi-statement transaction - const fetchQuery = preparedStatement - .replace('$1', Db.quotePostgresStr(statementValues[0])) - .replace('$2', statementValues[1].toString()) - // eslint-disable-next-line no-unused-vars - const [_begin, _setLocal, fetchResult, _commit] = await db.executeSql([ - 'BEGIN', - 'SET LOCAL jit = OFF', // JIT can slow things down significantly - fetchQuery, - 'COMMIT' - ].join(';\n')) - result = fetchResult - } else { - result = await db.executeSql(preparedStatement, statementValues) + + try { + if (!options.db) { + // Prepare/format now and send multi-statement transaction + const fetchQuery = nextJobSql + .replace('$1', Db.quotePostgresStr(statementValues[0])) + .replace('$2', statementValues[1].toString()) + + // eslint-disable-next-line no-unused-vars + const [_begin, _setLocal, fetchResult, _commit] = await db.executeSql([ + 'BEGIN', + 'SET LOCAL jit = OFF', // JIT can slow things down significantly + fetchQuery, + 'COMMIT' + ].join(';\n')) + result = fetchResult + } else { + result = await db.executeSql(nextJobSql, statementValues) + } + } catch (err) { + // errors from fetchquery should only be unique constraint violations } if (!result || result.rows.length === 0) { @@ -504,10 +495,6 @@ class Manager extends EventEmitter { return result.rows.length === 1 && !batchSize ? result.rows[0] : result.rows } - async fetchCompleted (name, batchSize, options = {}) { - return await this.fetch(COMPLETION_JOB_PREFIX + name, batchSize, options) - } - mapCompletionIdArg (id, funcName) { const errorMessage = `${funcName}() requires an id` diff --git a/src/migrationStore.js b/src/migrationStore.js index 08fa1b44..c3c6de72 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -64,6 +64,43 @@ function migrate (value, version, migrations) { function getAll (schema) { return [ + { + release: '10.0.0', + version: 21, + previous: 20, + install: [ + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + `DROP TYPE ${schema}.job_state`, + `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','cancelled','failed')`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE job_state`, + `ALTER TABLE ${schema}.archive RENAME to archive_backup`, + `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)`, + `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)`, + `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, + `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, + `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, + `DROP INDEX ${schema}.job_singletonKey`, + `DROP INDEX ${schema}.job_singleton_queue`, + `DROP INDEX ${schema}.job_singletonOn`, + `DROP INDEX ${schema}.job_singletonKeyOn`, + `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= 'active' AND singletonKey = '__pgboss__singleton_queue'`, + `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn) WHERE state <= 'completed' AND singletonOn IS NOT NULL AND singletonKey = '__pgboss__singleton_queue'`, + `CREATE UNIQUE INDEX job_throttle_key_on ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= 'completed' AND singletonOn IS NOT NULL AND singletonKey IS NOT NULL` + ], + uninstall: [ + `ALTER TYPE ${schema}.job_state ADD VALUE 'expired' AFTER 'completed'`, + `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, + `DROP TABLE IF EXISTS ${schema}.archive_backup`, + `DROP INDEX ${schema}.job_singleton`, + `DROP INDEX ${schema}.job_throttle_on`, + `DROP INDEX ${schema}.job_throttle_key_on`, + `CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < 'expired' AND singletonKey IS NULL`, + `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, + `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, + `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'` + ] + }, { release: '7.4.0', version: 20, @@ -97,77 +134,6 @@ function getAll (schema) { uninstall: [ `DROP TABLE ${schema}.subscription` ] - }, - { - release: '6.1.1', - version: 18, - previous: 17, - install: [ - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT false` - ] - }, - { - release: '6.0.0', - version: 17, - previous: 16, - install: [ - `DROP INDEX ${schema}.job_singletonKey`, - `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey = '__pgboss__singleton_queue'`, - `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey = '__pgboss__singleton_queue'`, - `CREATE INDEX IF NOT EXISTS job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, - `ALTER TABLE ${schema}.job ADD output jsonb`, - `ALTER TABLE ${schema}.archive ADD output jsonb`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT false`, - `ALTER TABLE ${schema}.job ALTER COLUMN keepuntil SET DEFAULT now() + interval '14 days'` - ], - uninstall: [ - `DROP INDEX ${schema}.job_fetch`, - `DROP INDEX ${schema}.job_singleton_queue`, - `DROP INDEX ${schema}.job_singletonKey`, - `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL`, - `ALTER TABLE ${schema}.job DROP COLUMN output`, - `ALTER TABLE ${schema}.archive DROP COLUMN output`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT true`, - `ALTER TABLE ${schema}.job ALTER COLUMN keepuntil SET DEFAULT now() + interval '30 days'` - ] - }, - { - release: '5.2.0', - version: 16, - previous: 15, - install: [ - `ALTER TABLE ${schema}.job ADD on_complete boolean`, - `UPDATE ${schema}.job SET on_complete = true`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT true`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET NOT NULL`, - `ALTER TABLE ${schema}.archive ADD on_complete boolean` - ], - uninstall: [ - `ALTER TABLE ${schema}.job DROP COLUMN on_complete`, - `ALTER TABLE ${schema}.archive DROP COLUMN on_complete` - ] - }, - { - release: '5.0.6', - version: 15, - previous: 14, - install: [ - `ALTER TABLE ${schema}.version ADD cron_on timestamp with time zone` - ], - uninstall: [ - `ALTER TABLE ${schema}.version DROP COLUMN cron_on` - ] - }, - { - release: '5.0.0', - version: 14, - previous: 13, - install: [ - `ALTER TABLE ${schema}.version ADD maintained_on timestamp with time zone` - ], - uninstall: [ - `ALTER TABLE ${schema}.version DROP COLUMN maintained_on` - ] } ] } diff --git a/src/plans.js b/src/plans.js index 24f927f3..9517ebac 100644 --- a/src/plans.js +++ b/src/plans.js @@ -5,7 +5,6 @@ const states = { retry: 'retry', active: 'active', completed: 'completed', - expired: 'expired', cancelled: 'cancelled', failed: 'failed' } @@ -13,7 +12,10 @@ const states = { const DEFAULT_SCHEMA = 'pgboss' const COMPLETION_JOB_PREFIX = `__state__${states.completed}__` const SINGLETON_QUEUE_KEY = '__pgboss__singleton_queue' -const SINGLETON_QUEUE_KEY_ESCAPED = SINGLETON_QUEUE_KEY.replace(/_/g, '\\_') +// __pgboss-singleton-queued +// __pgboss-singleton-active +// __pgboss-singleton-queued-active +// const SINGLETON_QUEUE_KEY_ESCAPED = SINGLETON_QUEUE_KEY.replace(/_/g, '\\_') const MIGRATE_RACE_MESSAGE = 'division by zero' const CREATE_RACE_MESSAGE = 'already exists' @@ -82,19 +84,20 @@ function create (schema, version) { createVersionTable(schema), createJobStateEnum(schema), createJobTable(schema), + createIndexJobName(schema), + createIndexJobFetch(schema), + createIndexSingletonQueued(schema), + createIndexSingletonActive(schema), + createIndexSingletonQueuedAndActive(schema), + createIndexThrottle(schema), + createArchiveTable(schema), addPrimaryKeyToArchive(schema), - cloneJobTableForArchive(schema), - createScheduleTable(schema), - createSubscriptionTable(schema), - addIdIndexToArchive(schema), addArchivedOnToArchive(schema), addArchivedOnIndexToArchive(schema), - createIndexJobName(schema), - createIndexJobFetch(schema), - createIndexSingletonOn(schema), - createIndexSingletonKeyOn(schema), - createIndexSingletonKey(schema), - createIndexSingletonQueue(schema), + addNameIndexToArchive(schema), + createArchiveBackupTable(schema), + createScheduleTable(schema), + createSubscriptionTable(schema), insertVersion(schema, version) ] @@ -126,7 +129,6 @@ function createJobStateEnum (schema) { '${states.retry}', '${states.active}', '${states.completed}', - '${states.expired}', '${states.cancelled}', '${states.failed}' ) @@ -154,17 +156,45 @@ function createJobTable (schema) { completedOn timestamp with time zone, keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', output jsonb, - dead_letter text + deadletter text ) ` } -function cloneJobTableForArchive (schema) { +function createIndexSingletonQueued (schema) { + return `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state = '${states.created}' AND singletonKey = '__pgboss-singleton-queued' AND singletonOn IS NULL` +} + +function createIndexSingletonActive (schema) { + return `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = '${states.active}' AND singletonKey = '__pgboss-singleton-active' AND singletonOn IS NULL` +} + +function createIndexSingletonQueuedAndActive (schema) { + return `CREATE UNIQUE INDEX job_singleton_queued_active ON ${schema}.job (name, state) WHERE state IN ('${states.created}','${states.active}') AND singletonKey = 'pgboss-singleton-queued-active' AND singletonOn IS NULL` +} + +function createIndexThrottle (schema) { + return `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL` +} + +function createIndexJobName (schema) { + return `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)` +} + +function createIndexJobFetch (schema) { + return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < '${states.active}'` +} + +function createArchiveTable (schema) { return `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)` } +function createArchiveBackupTable (schema) { + return `CREATE TABLE ${schema}.archive_backup (LIKE ${schema}.job)` +} + function addPrimaryKeyToArchive (schema) { - return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY ON (id)` + return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)` } function addArchivedOnToArchive (schema) { @@ -175,8 +205,8 @@ function addArchivedOnIndexToArchive (schema) { return `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)` } -function addIdIndexToArchive (schema) { - return `CREATE INDEX archive_id_idx ON ${schema}.archive(id)` +function addNameIndexToArchive (schema) { + return `CREATE INDEX archive_name_idx ON ${schema}.archive(name)` } function setMaintenanceTime (schema) { @@ -218,46 +248,6 @@ function getQueueSize (schema, options = {}) { return `SELECT count(*) as count FROM ${schema}.job WHERE name = $1 AND state < '${options.before}'` } -function createIndexSingletonKey (schema) { - // anything with singletonKey means "only 1 job can be queued or active at a time" - return ` - CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < '${states.completed}' AND singletonOn IS NULL AND NOT singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - ` -} - -function createIndexSingletonQueue (schema) { - // "singleton queue" means "only 1 job can be queued at a time" - return ` - CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < '${states.active}' AND singletonOn IS NULL AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - ` -} - -function createIndexSingletonOn (schema) { - // anything with singletonOn means "only 1 job within this time period, queued, active or completed" - return ` - CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < '${states.expired}' AND singletonKey IS NULL - ` -} - -function createIndexSingletonKeyOn (schema) { - // anything with both singletonOn and singletonKey means "only 1 job within this time period with this key, queued, active or completed" - return ` - CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < '${states.expired}' - ` -} - -function createIndexJobName (schema) { - return ` - CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops) - ` -} - -function createIndexJobFetch (schema) { - return ` - CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < '${states.active}' - ` -} - function createScheduleTable (schema) { return ` CREATE TABLE ${schema}.schedule ( @@ -356,31 +346,13 @@ function insertVersion (schema, version) { } function fetchNextJob (schema) { - return (includeMetadata, enforceSingletonQueueActiveLimit) => ` + return (includeMetadata, patternMatch) => ` WITH nextJob as ( SELECT id FROM ${schema}.job j WHERE state < '${states.active}' - AND name LIKE $1 + AND name ${patternMatch ? 'LIKE' : '='} $1 AND startAfter < now() - ${enforceSingletonQueueActiveLimit - ? `AND ( - CASE - WHEN singletonKey IS NOT NULL - AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - THEN NOT EXISTS ( - SELECT 1 - FROM ${schema}.job active_job - WHERE active_job.state = '${states.active}' - AND active_job.name = j.name - AND active_job.singletonKey = j.singletonKey - LIMIT 1 - ) - ELSE - true - END - )` - : ''} ORDER BY priority desc, createdOn, id LIMIT $2 FOR UPDATE SKIP LOCKED @@ -388,27 +360,14 @@ function fetchNextJob (schema) { UPDATE ${schema}.job j SET state = '${states.active}', startedOn = now(), - retryCount = CASE WHEN state = '${states.retry}' THEN retryCount + 1 ELSE retryCount END + retryCount = CASE WHEN startedOn IS NOT NULL THEN retryCount + 1 ELSE retryCount END FROM nextJob WHERE j.id = nextJob.id - RETURNING ${includeMetadata ? 'j.*' : 'j.id, name, data'}, EXTRACT(epoch FROM expireIn) as expire_in_seconds + RETURNING ${includeMetadata ? 'j.*' : 'j.id, name, data'}, + EXTRACT(epoch FROM expireIn) as expire_in_seconds ` } -function buildJsonCompletionObject (withResponse) { - // job completion contract - return `jsonb_build_object( - 'request', jsonb_build_object('id', id, 'name', name, 'data', data), - 'response', ${withResponse ? '$2::jsonb' : 'null'}, - 'state', state, - 'retryCount', retryCount, - 'createdOn', createdOn, - 'startedOn', startedOn, - 'completedOn', completedOn, - 'failed', CASE WHEN state = '${states.completed}' THEN false ELSE true END - )` -} - const retryCompletedOnCase = `CASE WHEN retryCount < retryLimit THEN NULL @@ -459,16 +418,18 @@ function failJobs (schema) { WHERE id IN (SELECT UNNEST($1::uuid[])) AND state < '${states.completed}' RETURNING * - ), completion_jobs as ( - INSERT INTO ${schema}.job (name, data, keepUntil) + ), dlq_jobs as ( + INSERT INTO ${schema}.job (name, data, output, retryLimit, keepUntil) SELECT - dead_letter - ${buildJsonCompletionObject(true)}, + deadletter, + data, + output, + retryLimit, ${keepUntilInheritance} FROM results WHERE state = '${states.failed}' - AND NOT name = dead_letter - AND dead_letter IS NOT NULL + AND deadletter IS NOT NULL + AND NOT name = deadletter ) SELECT COUNT(*) FROM results ` @@ -488,14 +449,16 @@ function expire (schema) { AND (startedOn + expireIn) < now() RETURNING * ) - INSERT INTO ${schema}.job (name, data, keepUntil) + INSERT INTO ${schema}.job (name, data, retryLimit, keepUntil) SELECT - '${COMPLETION_JOB_PREFIX}' || name, - ${buildJsonCompletionObject()}, + deadletter, + data, + retryLimit, ${keepUntilInheritance} FROM results - WHERE state = '${states.expired}' - AND NOT name LIKE '${COMPLETION_JOB_PREFIX}%' + WHERE state = '${states.failed}' + AND deadletter IS NOT NULL + AND NOT name = deadletter ` } @@ -542,7 +505,7 @@ function insertJob (schema) { retryDelay, retryBackoff, keepUntil, - dead_letter + deadletter ) SELECT id, @@ -558,7 +521,7 @@ function insertJob (schema) { retryDelay, retryBackoff, keepUntil, - dead_letter + deadletter FROM ( SELECT *, CASE @@ -589,7 +552,7 @@ function insertJob (schema) { $11::int as retryDelay, $12::bool as retryBackoff, $13::text as keepUntilValue, - $14::text as dead_letter + $14::text as deadletter ) j1 ) j2 ) j3 @@ -612,7 +575,7 @@ function insertJobs (schema) { retryBackoff, singletonKey, keepUntil, - dead_letter + deadletter ) SELECT COALESCE(id, gen_random_uuid()) as id, @@ -662,10 +625,10 @@ function archive (schema, completedInterval, failedInterval = completedInterval) RETURNING * ) INSERT INTO ${schema}.archive ( - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, dead_letter, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, output ) SELECT - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, dead_letter, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, output FROM archived_rows ` } diff --git a/src/timekeeper.js b/src/timekeeper.js index ef989035..a42ffc04 100644 --- a/src/timekeeper.js +++ b/src/timekeeper.js @@ -134,8 +134,7 @@ class Timekeeper extends EventEmitter { async checkSchedulesAsync () { const opts = { retryLimit: 2, - retentionSeconds: 60, - onComplete: false + retentionSeconds: 60 } await this.manager.sendDebounced(queues.CRON, null, opts, 60) @@ -186,8 +185,7 @@ class Timekeeper extends EventEmitter { async send (job) { const options = { singletonKey: job.name, - singletonSeconds: 60, - onComplete: false + singletonSeconds: 60 } await this.manager.send(queues.SEND_IT, job, options) diff --git a/test/cancelTest.js b/test/cancelTest.js index 6bff1d6d..51892479 100644 --- a/test/cancelTest.js +++ b/test/cancelTest.js @@ -27,21 +27,20 @@ describe('cancel', function () { }) it('should not cancel a completed job', async function () { - const config = this.test.bossConfig - - const boss = this.test.boss = await helper.start(config) - - const queue = 'will_not_cancel' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema await boss.send(queue) const job = await boss.fetch(queue) - await boss.complete(job.id) + const completeResult = await boss.complete(job.id) + + assert.strictEqual(completeResult.updated, 1) - const response = await boss.cancel(job.id) + const cancelResult = await boss.cancel(job.id) - assert.strictEqual(response.updated, 0) + assert.strictEqual(cancelResult.updated, 0) }) it('should cancel a batch of jobs', async function () { diff --git a/test/completeTest.js b/test/completeTest.js index dbe09f6a..f8ff41dc 100644 --- a/test/completeTest.js +++ b/test/completeTest.js @@ -1,4 +1,3 @@ -const delay = require('delay') const assert = require('assert') const helper = require('./testHelper') const PgBoss = require('../') @@ -16,7 +15,7 @@ describe('complete', function () { }) it('should complete a batch of jobs', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'complete-batch' const batchSize = 3 @@ -35,210 +34,9 @@ describe('complete', function () { assert.strictEqual(activeCount, batchSize) - await boss.complete(jobs.map(job => job.id)) + const result = await boss.complete(jobs.map(job => job.id)) - const completed = await boss.fetchCompleted(queue, batchSize) - - assert.strictEqual(batchSize, completed.length) - }) - - it('onComplete should have the payload from complete() in the response object', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'part-of-something-important' - const responsePayload = { message: 'super-important-payload', arg2: '123' } - - await boss.send(jobName) - - const job = await boss.fetch(jobName) - - await boss.complete(job.id, responsePayload) - - return new Promise((resolve) => { - boss.onComplete(jobName, async job => { - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.arg2, responsePayload.arg2) - - resolve() - }) - }) - }) - - it('onComplete should have the original payload in request object', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queueName = 'onCompleteRequestTest' - const requestPayload = { foo: 'bar' } - - const jobId = await boss.send(queueName, requestPayload) - - const job = await boss.fetch(queueName) - await boss.complete(job.id) - - return new Promise((resolve) => { - boss.onComplete(queueName, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.foo, requestPayload.foo) - - resolve() - }) - }) - }) - - it('onComplete should have both request and response', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'onCompleteFtw' - const requestPayload = { token: 'trivial' } - const responsePayload = { message: 'so verbose', code: '1234' } - - const jobId = await boss.send(jobName, requestPayload) - const job = await boss.fetch(jobName) - - await boss.complete(job.id, responsePayload) - - return new Promise((resolve) => { - boss.onComplete(jobName, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.token, requestPayload.token) - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.code, responsePayload.code) - - resolve() - }) - }) - }) - - it('should remove an onComplete worker', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'offComplete' - - let receivedCount = 0 - - boss.onComplete(jobName, { newJobCheckInterval: 500 }, async job => { - receivedCount++ - await boss.offComplete(jobName) - }) - - await boss.send(jobName) - const job1 = await boss.fetch(jobName) - await boss.complete(job1.id) - - await delay(2000) - - await boss.send(jobName) - const job2 = await boss.fetch(jobName) - await boss.complete(job2.id) - - await delay(2000) - - assert.strictEqual(receivedCount, 1) - }) - - it('should remove an onComplete worker by id', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - const queue = this.test.bossConfig.schema - - let receivedCount = 0 - - await boss.send(queue) - const job1 = await boss.fetch(queue) - await boss.complete(job1.id) - - await boss.send(queue) - const job2 = await boss.fetch(queue) - await boss.complete(job2.id) - - const id = await boss.onComplete(queue, { newJobCheckInterval: 500 }, async () => { - receivedCount++ - await boss.offComplete({ id }) - }) - - await delay(2000) - - assert.strictEqual(receivedCount, 1) - }) - - it('should fetch a completed job', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'fetchCompleted' - const jobId = await boss.send(queue) - await boss.fetch(queue) - await boss.complete(jobId) - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.request.id, jobId) - }) - - it('should not create an extra state job after completion', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'noMoreExtraStateJobs' - const config = this.test.bossConfig - - const jobId = await boss.send(queue) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - await boss.complete(job.id) - - const stateJobCount = await helper.countJobs(config.schema, 'name = $1', [`${helper.COMPLETION_JOB_PREFIX}${queue}`]) - - assert.strictEqual(stateJobCount, 1) - }) - - it('should not create a completion job if opted out during send', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'onCompleteOptOut' - - const jobId = await boss.send(queue, null, { onComplete: false }) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job, null) - }) - - it('should not create a completion job if opted out during constructor', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: false }) - - const queue = 'onCompleteOptOutGlobal' - - const jobId = await boss.send(queue) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job, null) - }) - - it('should create completion job if overriding the default from constructor', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: false }) - - const queue = 'onCompleteOptInOverride' - - const jobId = await boss.send(queue, null, { onComplete: true }) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.request.id, jobId) + assert.strictEqual(batchSize, result.jobs.length) }) it('should store job output in job.output from complete()', async function () { @@ -246,7 +44,7 @@ describe('complete', function () { const queue = 'completion-data-in-job-output' - const jobId = await boss.send(queue, null, { onComplete: false }) + const jobId = await boss.send(queue) const { id } = await boss.fetch(queue) @@ -266,7 +64,7 @@ describe('complete', function () { const queue = 'completion-data-in-job-output' - const jobId = await boss.send(queue, null, { onComplete: false }) + const jobId = await boss.send(queue) const { id } = await boss.fetch(queue) @@ -282,7 +80,7 @@ describe('complete', function () { }) it('should complete a batch of jobs with custom connection', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'complete-batch' const batchSize = 3 @@ -310,11 +108,33 @@ describe('complete', function () { } } - await boss.complete(jobs.map(job => job.id), null, { db }) + const result = await boss.complete(jobs.map(job => job.id), null, { db }) - const completed = await boss.fetchCompleted(queue, batchSize) - - assert.strictEqual(batchSize, completed.length) + assert.strictEqual(batchSize, result.jobs.length) assert.strictEqual(called, true) }) + + it('should warn with an old onComplete option only once', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, noSupervisor: true }) + + const queue = this.test.bossConfig.schema + + let warningCount = 0 + + const warningEvent = 'warning' + const onWarning = (warning) => { + assert(warning.message.includes('onComplete')) + warningCount++ + } + + process.on(warningEvent, onWarning) + + await boss.send({ name: queue, options: { onComplete: true } }) + await boss.send({ name: queue, options: { onComplete: true } }) + await boss.send({ name: queue, options: { onComplete: true } }) + + process.removeListener(warningEvent, onWarning) + + assert.strictEqual(warningCount, 1) + }) }) diff --git a/test/expireTest.js b/test/expireTest.js index c37f9628..af57f006 100644 --- a/test/expireTest.js +++ b/test/expireTest.js @@ -6,65 +6,55 @@ describe('expire', function () { const defaults = { maintenanceIntervalSeconds: 1 } it('should expire a job', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - const queue = 'expire' + boss.on('maintenance', () => { console.log(`${new Date().toISOString()}: on:maintenance event`) }) - const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1 } }) + await delay(10000) - // fetch the job but don't complete it - await boss.fetch(queue) + const queue = this.test.bossConfig.schema + const deadLetter = `${queue}_dlq` + const key = this.test.bossConfig.schema - // this should give it enough time to expire - await delay(8000) + await boss.send({ name: queue, data: { key }, options: { expireInSeconds: 1, deadLetter } }) - const job = await boss.fetchCompleted(queue) + const job1 = await boss.fetch(queue) - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual('expired', job.data.state) - }) + assert(job1) - it('should expire a job - cascaded config', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, expireInSeconds: 1 }) + await delay(3000) - const queue = 'expire-cascade-config' + const job2 = await boss.fetch(queue) - const jobId = await boss.send(queue) + assert(job2) - // fetch the job but don't complete it - const { id } = await boss.fetch(queue) + await delay(3000) - assert.strictEqual(jobId, id) + const job3 = await boss.fetch(deadLetter) - // this should give it enough time to expire - await delay(8000) - - const job = await boss.getJobById(jobId) - - assert.strictEqual('expired', job.state) + assert.strictEqual(key, job3.data.key) }) - it('should warn with an old expireIn option only once', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, noSupervisor: true }) + it('should expire a job - cascaded config', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, expireInSeconds: 1 }) + + const queue = this.test.bossConfig.schema + const deadLetter = `${queue}_dlq` - const queue = 'expireIn-warning-only-once' + const jobId = await boss.send(queue, { deadLetter }) - let warningCount = 0 + // fetch the job but don't complete it + await boss.fetch(queue) - const warningEvent = 'warning' - const onWarning = (warning) => { - assert(warning.message.includes('expireIn')) - warningCount++ - } + await delay(3000) - process.on(warningEvent, onWarning) + const { id } = await boss.fetch(queue) + assert.strictEqual(id, jobId) - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) + await delay(3000) - process.removeListener(warningEvent, onWarning) + const job = await boss.getJobById(jobId) - assert.strictEqual(warningCount, 1) + assert.strictEqual('failed', job.state) }) }) diff --git a/test/failureTest.js b/test/failureTest.js index 6b66dd4f..e1e4b96b 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -26,25 +26,6 @@ describe('failure', function () { await boss.fail(job.id) }) - it('worker for job failure', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - const jobId = await boss.send(queue, null, { onComplete: true }) - - const job = await boss.fetch(queue) - - await boss.fail(job.id) - - return new Promise((resolve, reject) => { - boss.onComplete(queue, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual('failed', job.data.state) - resolve() - }).catch(reject) - }) - }) - it('should fail a batch of jobs', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema @@ -57,7 +38,9 @@ describe('failure', function () { const jobs = await boss.fetch(queue, 3) - await boss.fail(jobs.map(job => job.id)) + const result = await boss.fail(jobs.map(job => job.id)) + + assert.strictEqual(result.jobs.length, 3) }) it('should fail a batch of jobs with a data arg', async function () { @@ -80,22 +63,6 @@ describe('failure', function () { assert(results.every(i => i.output.message === message)) }) - it('should accept a payload', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - const failPayload = { someReason: 'nuna' } - - const jobId = await boss.send(queue, null, { onComplete: true }) - - await boss.fail(jobId, failPayload) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.someReason, failPayload.someReason) - }) - it('should preserve nested objects within a payload that is an instance of Error', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema @@ -103,14 +70,14 @@ describe('failure', function () { const failPayload = new Error('Something went wrong') failPayload.some = { deeply: { nested: { reason: 'nuna' } } } - const jobId = await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await boss.fail(jobId, failPayload) - const job = await boss.fetchCompleted(queue) + const job = helper.getJobById(jobId) assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) + assert.strictEqual(job.output.response.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) }) it('failure via Promise reject() should pass string wrapped in value prop', async function () { @@ -118,15 +85,15 @@ describe('failure', function () { const queue = this.test.bossConfig.schema const failPayload = 'mah error' - await boss.work(queue, job => Promise.reject(failPayload)) - await boss.send(queue, null, { onComplete: true }) + await boss.work(queue, () => Promise.reject(failPayload)) + const jobId = await boss.send(queue) await delay(7000) - const job = await boss.fetchCompleted(queue) + const job = helper.getJobById(jobId) assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.value, failPayload) + assert.strictEqual(job.output.value, failPayload) }) it('failure via Promise reject() should pass object payload', async function () { @@ -137,31 +104,31 @@ describe('failure', function () { const errorResponse = new Error('custom error') errorResponse.something = something - await boss.work(queue, job => Promise.reject(errorResponse)) - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) + await boss.work(queue, () => Promise.reject(errorResponse)) await delay(7000) - const job = await boss.fetchCompleted(queue) + const job = helper.getJobById(jobId) assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.something, something) + assert.strictEqual(job.output.something, something) }) - it('failure with Error object should get stored in the failure job', async function () { + it('failure with Error object should be saved in the job', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema const message = 'a real error!' - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await boss.work(queue, async () => { throw new Error(message) }) await delay(2000) - const job = await boss.fetchCompleted(queue) + const job = helper.getJobById(jobId) assert.strictEqual(job.data.state, 'failed') - assert(job.data.response.message.includes(message)) + assert(job.output.message.includes(message)) }) it('should fail a job with custom connection', async function () { @@ -190,22 +157,19 @@ describe('failure', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - await boss.send(queue, null, { onComplete: true }) - - await boss.work(queue, async job => { - const err = { - message: 'something' - } + const jobId = await boss.send(queue) + const message = + await boss.work(queue, { newJobCheckInterval: 500 }, async () => { + const err = { message } err.myself = err - throw err }) await delay(2000) - const job = await boss.fetchCompleted(queue) + const job = await helper.getJobById(jobId) - assert(job) + assert.strictEqual(job.output.message, message) }) }) diff --git a/test/insertTest.js b/test/insertTest.js index 26f455c3..c3018e16 100644 --- a/test/insertTest.js +++ b/test/insertTest.js @@ -32,7 +32,7 @@ describe('insert', function () { expireInSeconds: 5, singletonKey: '123', keepUntil: new Date().toISOString(), - onComplete: true + deadLetter: `${queue}_dlq` } await boss.insert([input]) @@ -50,7 +50,7 @@ describe('insert', function () { assert.strictEqual(job.expirein.seconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expirein}`) assert.strictEqual(job.singletonkey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonkey}`) assert.strictEqual(new Date(job.keepuntil).toISOString(), input.keepUntil, `keepUntil input ${input.keepUntil} didn't match job ${job.keepuntil}`) - assert.strictEqual(job.on_complete, input.onComplete, `onComplete input ${input.onComplete} didn't match job ${job.on_complete}`) + assert.strictEqual(job.deadletter, input.deadLetter, `deadLetter input ${input.deadLetter} didn't match job ${job.deadletter}`) }) it('should create jobs from an array with all properties and custom connection', async function () { @@ -69,7 +69,7 @@ describe('insert', function () { expireInSeconds: 5, singletonKey: '123', keepUntil: new Date().toISOString(), - onComplete: true + deadLetter: `${queue}_dlq` } let called = false const db = await helper.getDb() @@ -97,7 +97,7 @@ describe('insert', function () { assert.strictEqual(job.expirein.seconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expirein}`) assert.strictEqual(job.singletonkey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonkey}`) assert.strictEqual(new Date(job.keepuntil).toISOString(), input.keepUntil, `keepUntil input ${input.keepUntil} didn't match job ${job.keepuntil}`) - assert.strictEqual(job.on_complete, input.onComplete, `onComplete input ${input.onComplete} didn't match job ${job.on_complete}`) + assert.strictEqual(job.deadletter, input.deadLetter, `deadLetter input ${input.deadLetter} didn't match job ${job.deadletter}`) assert.strictEqual(called, true) }) }) diff --git a/test/moduleTest.js b/test/moduleTest.js index 94377e1e..244eb2b3 100644 --- a/test/moduleTest.js +++ b/test/moduleTest.js @@ -8,7 +8,6 @@ describe('module', function () { assert(states.retry) assert(states.active) assert(states.completed) - assert(states.expired) assert(states.cancelled) assert(states.failed) }) diff --git a/test/workTest.js b/test/workTest.js index 3d5d4fe3..22bd370e 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -199,7 +199,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await new Promise((resolve) => { boss.work(queue, { batchSize: 1 }, async jobs => { @@ -208,11 +208,9 @@ describe('work', function () { }) }) - await delay(2000) - - const result = await boss.fetchCompleted(queue) + const job = await helper.getJobById(jobId) - assert(result) + assert.strictEqual(job.state, 'completed') }) it('returning promise applies backpressure', async function () { @@ -303,39 +301,38 @@ describe('work', function () { }) it('completion should pass string wrapped in value prop', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'processCompletionString' const result = 'success' - boss.work(queue, async job => result) + const jobId = await boss.send(queue) - await boss.send(queue) + await boss.work(queue, async job => result) - await delay(8000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await helper.getJobById(jobId) assert.strictEqual(job.data.state, 'completed') - assert.strictEqual(job.data.response.value, result) + assert.strictEqual(job.output.value, result) }) it('completion via Promise resolve() should pass object payload', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'processCompletionObject' const something = 'clever' - boss.work(queue, async job => ({ something })) - - await boss.send(queue) + const jobId = await boss.send(queue) + await boss.work(queue, async () => ({ something })) - await delay(8000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await helper.getJobById(jobId) assert.strictEqual(job.data.state, 'completed') - assert.strictEqual(job.data.response.something, something) + assert.strictEqual(job.output.something, something) }) it('should allow multiple workers to the same queue per instance', async function () { diff --git a/types.d.ts b/types.d.ts index 2ffab407..e1e60b48 100644 --- a/types.d.ts +++ b/types.d.ts @@ -56,11 +56,6 @@ declare namespace PgBoss { & RetentionOptions & RetryOptions & JobPollingOptions - & CompletionOptions - - interface CompletionOptions { - onComplete?: boolean; - } interface ExpirationOptions { expireInSeconds?: number; @@ -90,6 +85,7 @@ declare namespace PgBoss { singletonMinutes?: number; singletonHours?: number; singletonNextSlot?: boolean; + deadLetter?: string; } interface ConnectionOptions { @@ -183,7 +179,7 @@ declare namespace PgBoss { interface JobWithMetadata extends Job { priority: number; - state: 'created' | 'retry' | 'active' | 'completed' | 'expired' | 'cancelled' | 'failed'; + state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed'; retrylimit: number; retrycount: number; retrydelay: number; @@ -198,7 +194,7 @@ declare namespace PgBoss { createdon: Date; completedon: Date | null; keepuntil: Date; - oncomplete: boolean, + deadletter: boolean, output: object } @@ -214,7 +210,7 @@ declare namespace PgBoss { singletonKey?: string; expireInSeconds?: number; keepUntil?: Date | string; - onComplete?: boolean + deadLetter?: string; } interface MonitorState { @@ -223,7 +219,6 @@ declare namespace PgBoss { retry: number; active: number; completed: number; - expired: number; cancelled: number; failed: number; } @@ -236,7 +231,7 @@ declare namespace PgBoss { id: string, name: string, options: WorkOptions, - state: 'created' | 'retry' | 'active' | 'completed' | 'expired' | 'cancelled' | 'failed', + state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed', count: number, createdOn: Date, lastFetchedOn: Date, @@ -320,9 +315,6 @@ declare class PgBoss extends EventEmitter { work(name: string, options: PgBoss.BatchWorkOptions & { includeMetadata: true }, handler: PgBoss.BatchWorkWithMetadataHandler): Promise; work(name: string, options: PgBoss.BatchWorkOptions, handler: PgBoss.BatchWorkHandler): Promise; - onComplete(name: string, handler: Function): Promise; - onComplete(name: string, options: PgBoss.WorkOptions, handler: Function): Promise; - offWork(name: string): Promise; offWork(options: PgBoss.OffWorkOptions): Promise; @@ -338,19 +330,11 @@ declare class PgBoss extends EventEmitter { publish(event: string, data: object): Promise; publish(event: string, data: object, options: PgBoss.SendOptions): Promise; - offComplete(name: string): Promise; - offComplete(options: PgBoss.OffWorkOptions): Promise; - fetch(name: string): Promise | null>; fetch(name: string, batchSize: number): Promise[] | null>; fetch(name: string, batchSize: number, options: PgBoss.FetchOptions & { includeMetadata: true }): Promise[] | null>; fetch(name: string, batchSize: number, options: PgBoss.FetchOptions): Promise[] | null>; - fetchCompleted(name: string): Promise | null>; - fetchCompleted(name: string, batchSize: number): Promise[] | null>; - fetchCompleted(name: string, batchSize: number, options: PgBoss.FetchOptions & { includeMetadata: true }): Promise[] | null>; - fetchCompleted(name: string, batchSize: number, options: PgBoss.FetchOptions): Promise[] | null>; - cancel(id: string, options?: PgBoss.ConnectionOptions): Promise; cancel(ids: string[], options?: PgBoss.ConnectionOptions): Promise; From 183125a69faed42d712eff750aecc175fb54b3ce Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 09:59:29 -0500 Subject: [PATCH 03/36] wip --- src/attorney.js | 4 +- src/boss.js | 35 +++++++++------ src/db.js | 10 +++++ src/manager.js | 2 +- src/plans.js | 89 +++++++++++++------------------------ test/archiveTest.js | 23 ++++++---- test/backgroundErrorTest.js | 1 - test/deleteQueueTest.js | 6 +-- test/deleteTest.js | 19 +++----- test/expireTest.js | 38 ++++++++-------- test/failureTest.js | 46 ++++++++++++------- test/monitoringTest.js | 3 +- test/retryTest.js | 31 +++++-------- test/workTest.js | 10 ++--- 14 files changed, 153 insertions(+), 164 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index 7b605a17..c070f031 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -137,7 +137,9 @@ function checkWorkArgs (name, args, defaults) { function checkFetchArgs (name, batchSize, options) { assert(name, 'missing queue name') - name = sanitizeQueueNameForFetch(name) + if(queueNameHasPatternMatch(name)) { + name = sanitizeQueueNameForFetch(name) + } assert(!batchSize || (Number.isInteger(batchSize) && batchSize >= 1), 'batchSize must be an integer > 0') assert(!('includeMetadata' in options) || typeof options.includeMetadata === 'boolean', 'includeMetadata must be a boolean') diff --git a/src/boss.js b/src/boss.js index 4d58d2f3..0704eb5d 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,4 +1,5 @@ const EventEmitter = require('events') +const { serializeError: stringify } = require('serialize-error') const plans = require('./plans') const { states } = require('./plans') const { COMPLETION_JOB_PREFIX } = plans @@ -32,7 +33,7 @@ class Boss extends EventEmitter { this.events = events - this.expireCommand = plans.locked(config.schema, plans.expire(config.schema)) + this.failJobsByTimeoutCommand = plans.locked(config.schema, plans.failJobsByTimeout(config.schema)) this.archiveCommand = plans.locked(config.schema, plans.archive(config.schema, config.archiveInterval, config.archiveFailedInterval)) this.purgeCommand = plans.locked(config.schema, plans.purge(config.schema, config.deleteAfter)) this.getMaintenanceTimeCommand = plans.getMaintenanceTime(config.schema) @@ -44,7 +45,8 @@ class Boss extends EventEmitter { this.archive, this.purge, this.countStates, - this.getQueueNames + this.getQueueNames, + this.maintain ] } @@ -119,23 +121,29 @@ class Boss extends EventEmitter { await this.manager.send(queues.MONITOR_STATES, null, options) } + async maintain () { + const started = Date.now() + + await this.expire() + await this.archive() + await this.purge() + + const ended = Date.now() + + await this.setMaintenanceTime() + + return { ms: ended - started } + } + async onMaintenance (job) { try { if (this.config.__test__throw_maint) { throw new Error(this.config.__test__throw_maint) } - const started = Date.now() - - await this.expire() - await this.archive() - await this.purge() - - const ended = Date.now() - - await this.setMaintenanceTime() + const result = await this.maintain() - this.emit(events.maintenance, { ms: ended - started }) + this.emit(events.maintenance, result) if (!this.stopped) { await this.manager.complete(job.id) // pre-complete to bypass throttling @@ -211,7 +219,8 @@ class Boss extends EventEmitter { } async expire () { - await this.executeSql(this.expireCommand) + const output = stringify({ value: { message: 'job failed by timeout in active state' }}) + await this.executeSql(this.failJobsByTimeoutCommand, [null, output]) } async archive () { diff --git a/src/db.js b/src/db.js index 97a6261f..6bf93b71 100644 --- a/src/db.js +++ b/src/db.js @@ -25,6 +25,16 @@ class Db extends EventEmitter { async executeSql (text, values) { if (this.opened) { + if (this.config.debug === true) { + console.log(`${new Date().toISOString()}: DEBUG SQL`) + console.log(text) + + if (values) { + console.log(`${new Date().toISOString()}: DEBUG VALUES`) + console.log(values) + } + } + return await this.pool.query(text, values) } } diff --git a/src/manager.js b/src/manager.js index 9addcdb4..88b9fb76 100644 --- a/src/manager.js +++ b/src/manager.js @@ -58,7 +58,7 @@ class Manager extends EventEmitter { this.completeJobsCommand = plans.completeJobs(config.schema) this.cancelJobsCommand = plans.cancelJobs(config.schema) this.resumeJobsCommand = plans.resumeJobs(config.schema) - this.failJobsCommand = plans.failJobs(config.schema) + this.failJobsCommand = plans.failJobsById(config.schema) this.getJobByIdCommand = plans.getJobById(config.schema) this.getArchivedJobByIdCommand = plans.getArchivedJobById(config.schema) this.subscribeCommand = plans.subscribe(config.schema) diff --git a/src/plans.js b/src/plans.js index 9517ebac..de03e177 100644 --- a/src/plans.js +++ b/src/plans.js @@ -30,7 +30,8 @@ module.exports = { completeJobs, cancelJobs, resumeJobs, - failJobs, + failJobsById, + failJobsByTimeout, insertJob, insertJobs, getTime, @@ -40,7 +41,6 @@ module.exports = { subscribe, unsubscribe, getQueuesForEvent, - expire, archive, purge, countStates, @@ -368,26 +368,6 @@ function fetchNextJob (schema) { ` } -const retryCompletedOnCase = `CASE - WHEN retryCount < retryLimit - THEN NULL - ELSE now() - END` - -const retryStartAfterCase = `CASE - WHEN retryCount = retryLimit THEN startAfter - WHEN NOT retryBackoff THEN now() + retryDelay * interval '1' - ELSE now() + - ( - retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 - + - retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 * random() - ) - * interval '1' - END` - -const keepUntilInheritance = 'keepUntil + (keepUntil - startAfter)' - function completeJobs (schema) { return ` WITH results AS ( @@ -403,20 +383,38 @@ function completeJobs (schema) { ` } -function failJobs (schema) { +function failJobsById (schema) { + const where = `id IN (SELECT UNNEST($1::uuid[])) AND state < '${states.completed}'` + return failJobs(schema, where) +} + +function failJobsByTimeout (schema) { + const where = `state = '${states.active}' AND (startedOn + expireIn) < now()` + return failJobs(schema, where) +} + +function failJobs (schema, where) { return ` WITH results AS ( - UPDATE ${schema}.job - SET state = CASE - WHEN retryCount < retryLimit - THEN '${states.retry}'::${schema}.job_state + UPDATE ${schema}.job SET + state = CASE + WHEN retryCount < retryLimit THEN '${states.retry}'::${schema}.job_state ELSE '${states.failed}'::${schema}.job_state END, - completedOn = ${retryCompletedOnCase}, - startAfter = ${retryStartAfterCase}, + completedOn = CASE + WHEN retryCount < retryLimit THEN NULL + ELSE now() + END, + startAfter = CASE + WHEN retryCount = retryLimit THEN startAfter + WHEN NOT retryBackoff THEN now() + retryDelay * interval '1' + ELSE now() + ( + retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 + + retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 * random() + ) * interval '1' + END, output = $2::jsonb - WHERE id IN (SELECT UNNEST($1::uuid[])) - AND state < '${states.completed}' + WHERE ${where} RETURNING * ), dlq_jobs as ( INSERT INTO ${schema}.job (name, data, output, retryLimit, keepUntil) @@ -425,7 +423,7 @@ function failJobs (schema) { data, output, retryLimit, - ${keepUntilInheritance} + keepUntil + (keepUntil - startAfter) FROM results WHERE state = '${states.failed}' AND deadletter IS NOT NULL @@ -435,33 +433,6 @@ function failJobs (schema) { ` } -function expire (schema) { - return ` - WITH results AS ( - UPDATE ${schema}.job - SET state = CASE - WHEN retryCount < retryLimit THEN '${states.retry}'::${schema}.job_state - ELSE '${states.failed}'::${schema}.job_state - END, - completedOn = ${retryCompletedOnCase}, - startAfter = ${retryStartAfterCase} - WHERE state = '${states.active}' - AND (startedOn + expireIn) < now() - RETURNING * - ) - INSERT INTO ${schema}.job (name, data, retryLimit, keepUntil) - SELECT - deadletter, - data, - retryLimit, - ${keepUntilInheritance} - FROM results - WHERE state = '${states.failed}' - AND deadletter IS NOT NULL - AND NOT name = deadletter - ` -} - function cancelJobs (schema) { return ` with results as ( diff --git a/test/archiveTest.js b/test/archiveTest.js index 85ff88db..c7a4d8b8 100644 --- a/test/archiveTest.js +++ b/test/archiveTest.js @@ -5,8 +5,7 @@ const { states } = require('../src/plans') describe('archive', function () { const defaults = { - archiveCompletedAfterSeconds: 1, - maintenanceIntervalSeconds: 1 + archiveCompletedAfterSeconds: 1 } it('should archive a completed job', async function () { @@ -21,7 +20,7 @@ describe('archive', function () { await boss.complete(jobId) - await delay(4000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -41,7 +40,7 @@ describe('archive', function () { await boss.complete(jobId) - await delay(4000) + await boss.maintain() const archivedJob = await boss.getJobById(jobId) @@ -56,7 +55,8 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) - await delay(7000) + await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -71,7 +71,8 @@ describe('archive', function () { const jobId = await boss.send(queue) - await delay(7000) + await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -88,7 +89,9 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) await boss.fail(jobId, failPayload) - await delay(7000) + + await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -96,7 +99,7 @@ describe('archive', function () { }) it('should archive a failed job', async function () { - const config = { ...this.test.bossConfig, maintenanceIntervalSeconds: 1, archiveFailedAfterSeconds: 1 } + const config = { ...this.test.bossConfig, archiveFailedAfterSeconds: 1 } const boss = this.test.boss = await helper.start(config) const queue = this.test.bossConfig.schema @@ -104,7 +107,9 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) await boss.fail(jobId, failPayload) - await delay(7000) + + await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index fd1a14ff..5f688604 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -31,7 +31,6 @@ describe('background processing error handling', function () { it('state monitoring error handling works', async function () { const defaults = { monitorStateIntervalSeconds: 2, - maintenanceIntervalMinutes: 1, noScheduling: true, __test__throw_monitor: true } diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js index 0f05dbbc..fcb51ef1 100644 --- a/test/deleteQueueTest.js +++ b/test/deleteQueueTest.js @@ -1,6 +1,5 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') describe('deleteQueue', function () { it('should clear a specific queue', async function () { @@ -59,8 +58,7 @@ describe('deleteQueue', function () { it('clearStorage() should empty both job storage tables', async function () { const defaults = { - archiveCompletedAfterSeconds: 1, - maintenanceIntervalSeconds: 1 + archiveCompletedAfterSeconds: 1 } const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) @@ -73,8 +71,6 @@ describe('deleteQueue', function () { await boss.complete(jobId) - await delay(3000) - const db = await helper.getDb() const getJobCount = async table => { diff --git a/test/deleteTest.js b/test/deleteTest.js index 9a69080e..94d8aa51 100644 --- a/test/deleteTest.js +++ b/test/deleteTest.js @@ -1,26 +1,19 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') describe('delete', async function () { - const defaults = { - deleteAfterSeconds: 1, - maintenanceIntervalSeconds: 1 - } - it('should delete an archived job', async function () { - const jobName = 'deleteMe' - - const config = { ...this.test.bossConfig, ...defaults } + const config = { ...this.test.bossConfig, deleteAfterSeconds: 1 } const boss = this.test.boss = await helper.start(config) - const jobId = await boss.send(jobName) - const job = await boss.fetch(jobName) + const queue = this.test.bossConfig.schema + + const jobId = await boss.send(queue) - assert.strictEqual(jobId, job.id) + await boss.fetch(queue) await boss.complete(jobId) - await delay(7000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/expireTest.js b/test/expireTest.js index af57f006..4936dc92 100644 --- a/test/expireTest.js +++ b/test/expireTest.js @@ -3,55 +3,53 @@ const helper = require('./testHelper') const delay = require('delay') describe('expire', function () { - const defaults = { maintenanceIntervalSeconds: 1 } - it('should expire a job', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - boss.on('maintenance', () => { console.log(`${new Date().toISOString()}: on:maintenance event`) }) - - await delay(10000) - + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema - const deadLetter = `${queue}_dlq` const key = this.test.bossConfig.schema - await boss.send({ name: queue, data: { key }, options: { expireInSeconds: 1, deadLetter } }) + const jobId = await boss.send({ name: queue, data: { key }, options: { expireInSeconds: 1 } }) const job1 = await boss.fetch(queue) assert(job1) - await delay(3000) + await delay(1000) + + await boss.maintain() const job2 = await boss.fetch(queue) assert(job2) - await delay(3000) + await delay(1000) - const job3 = await boss.fetch(deadLetter) + await boss.maintain() - assert.strictEqual(key, job3.data.key) + const job = await boss.getJobById(jobId) + + assert.strictEqual('failed', job.state) }) it('should expire a job - cascaded config', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, expireInSeconds: 1 }) - + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, expireInSeconds: 1 }) const queue = this.test.bossConfig.schema - const deadLetter = `${queue}_dlq` - const jobId = await boss.send(queue, { deadLetter }) + const jobId = await boss.send(queue) // fetch the job but don't complete it await boss.fetch(queue) - await delay(3000) + await delay(1000) + + await boss.maintain() const { id } = await boss.fetch(queue) assert.strictEqual(id, jobId) - await delay(3000) + await delay(1000) + + await boss.maintain() const job = await boss.getJobById(jobId) diff --git a/test/failureTest.js b/test/failureTest.js index e1e4b96b..b3383303 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -74,10 +74,9 @@ describe('failure', function () { await boss.fail(jobId, failPayload) - const job = helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.output.response.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) + assert.strictEqual(job.output.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) }) it('failure via Promise reject() should pass string wrapped in value prop', async function () { @@ -85,14 +84,13 @@ describe('failure', function () { const queue = this.test.bossConfig.schema const failPayload = 'mah error' - await boss.work(queue, () => Promise.reject(failPayload)) const jobId = await boss.send(queue) + await boss.work(queue, () => Promise.reject(failPayload)) - await delay(7000) + await delay(1000) - const job = helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') assert.strictEqual(job.output.value, failPayload) }) @@ -107,11 +105,10 @@ describe('failure', function () { const jobId = await boss.send(queue) await boss.work(queue, () => Promise.reject(errorResponse)) - await delay(7000) + await delay(1000) - const job = helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') assert.strictEqual(job.output.something, something) }) @@ -123,11 +120,10 @@ describe('failure', function () { const jobId = await boss.send(queue) await boss.work(queue, async () => { throw new Error(message) }) - await delay(2000) + await delay(1000) - const job = helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') assert(job.output.message.includes(message)) }) @@ -158,7 +154,7 @@ describe('failure', function () { const queue = this.test.bossConfig.schema const jobId = await boss.send(queue) - const message = + const message = 'mhmm' await boss.work(queue, { newJobCheckInterval: 500 }, async () => { const err = { message } @@ -168,8 +164,28 @@ describe('failure', function () { await delay(2000) - const job = await helper.getJobById(jobId) + const job = await boss.getJobById(jobId) assert.strictEqual(job.output.message, message) }) + + it('dead letter queues are working', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, debug: true }) + const queue = this.test.bossConfig.schema + const deadLetter = `${queue}_dlq` + + const jobId = await boss.send(queue, { key: queue }, { deadLetter }) + + await boss.fetch(queue) + + await boss.fail(jobId) + + await boss.fetch(queue) + + await boss.fail(jobId) + + const job = await boss.fetch(deadLetter) + + assert.strictEqual(job.data.key, queue) + }) }) diff --git a/test/monitoringTest.js b/test/monitoringTest.js index 929209ff..3fb6ded6 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -4,8 +4,7 @@ const helper = require('./testHelper') describe('monitoring', function () { it('should emit state counts', async function () { const defaults = { - monitorStateIntervalSeconds: 1, - maintenanceIntervalSeconds: 10 + monitorStateIntervalSeconds: 1 } const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) diff --git a/test/retryTest.js b/test/retryTest.js index 13f5dbb4..66d3804e 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -3,18 +3,16 @@ const helper = require('./testHelper') const delay = require('delay') describe('retries', function () { - const defaults = { maintenanceIntervalSeconds: 1 } - it('should retry a job that didn\'t complete', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'unreliable' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1, retryLimit: 1 } }) + const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1 } }) const try1 = await boss.fetch(queue) - await delay(5000) + await delay(1000) + await boss.maintain() const try2 = await boss.fetch(queue) @@ -23,7 +21,7 @@ describe('retries', function () { }) it('should retry a job that failed', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queueName = 'retryFailed' const retryLimit = 1 @@ -40,7 +38,7 @@ describe('retries', function () { it('should retry a job that failed with cascaded config', async function () { const retryLimit = 1 - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, retryLimit }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, retryLimit }) const queueName = 'retryFailed-config-cascade' @@ -55,7 +53,7 @@ describe('retries', function () { }) it('should retry with a fixed delay', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'retryDelayFixed' @@ -76,7 +74,7 @@ describe('retries', function () { }) it('should retry with a exponential backoff', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'retryDelayBackoff' @@ -96,23 +94,16 @@ describe('retries', function () { }) it('should set the default retry limit to 1 if missing', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'retryLimitDefault' const jobId = await boss.send(queue, null, { retryDelay: 1 }) - await boss.fetch(queue) await boss.fail(jobId) const job1 = await boss.fetch(queue) - assert.strictEqual(job1, null) - - await delay(1000) - - const job2 = await boss.fetch(queue) - - assert(job2) + assert(job1) }) }) diff --git a/test/workTest.js b/test/workTest.js index 22bd370e..8843fe85 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -208,7 +208,7 @@ describe('work', function () { }) }) - const job = await helper.getJobById(jobId) + const job = await boss.getJobById(jobId) assert.strictEqual(job.state, 'completed') }) @@ -312,9 +312,9 @@ describe('work', function () { await delay(1000) - const job = await helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'completed') + assert.strictEqual(job.state, 'completed') assert.strictEqual(job.output.value, result) }) @@ -329,9 +329,9 @@ describe('work', function () { await delay(1000) - const job = await helper.getJobById(jobId) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'completed') + assert.strictEqual(job.state, 'completed') assert.strictEqual(job.output.something, something) }) From 3fe13a7d42948560f406a3914a0d1cb7828d235b Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 10:26:21 -0500 Subject: [PATCH 04/36] allow jobs with retryLimit=0 again --- src/attorney.js | 9 +++++---- src/boss.js | 2 +- test/failureTest.js | 7 +------ 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index c070f031..b93cc8e9 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -137,8 +137,8 @@ function checkWorkArgs (name, args, defaults) { function checkFetchArgs (name, batchSize, options) { assert(name, 'missing queue name') - if(queueNameHasPatternMatch(name)) { - name = sanitizeQueueNameForFetch(name) + if (queueNameHasPatternMatch(name)) { + name = sanitizeQueueNameForFetch(name) } assert(!batchSize || (Number.isInteger(batchSize) && batchSize >= 1), 'batchSize must be an integer > 0') @@ -277,7 +277,7 @@ function applyExpirationConfig (config, defaults) { function applyRetryConfig (config, defaults) { assert(!('retryDelay' in config) || (Number.isInteger(config.retryDelay) && config.retryDelay >= 0), 'retryDelay must be an integer >= 0') - assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 1), 'retryLimit must be an integer >= 1') + assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 0), 'retryLimit must be an integer >= 0') assert(!('retryBackoff' in config) || (config.retryBackoff === true || config.retryBackoff === false), 'retryBackoff must be either true or false') if (defaults) { @@ -287,7 +287,8 @@ function applyRetryConfig (config, defaults) { } config.retryDelay = config.retryDelay || 0 - config.retryLimit = config.retryLimit || 2 + config.retryLimit = ('retryLimit' in config) ? config.retryLimit : 2 + config.retryBackoff = !!config.retryBackoff config.retryDelay = (config.retryBackoff && !config.retryDelay) ? 1 : config.retryDelay config.retryLimit = (config.retryDelay && !config.retryLimit) ? 1 : config.retryLimit diff --git a/src/boss.js b/src/boss.js index 0704eb5d..211866d0 100644 --- a/src/boss.js +++ b/src/boss.js @@ -219,7 +219,7 @@ class Boss extends EventEmitter { } async expire () { - const output = stringify({ value: { message: 'job failed by timeout in active state' }}) + const output = stringify({ value: { message: 'job failed by timeout in active state' } }) await this.executeSql(this.failJobsByTimeoutCommand, [null, output]) } diff --git a/test/failureTest.js b/test/failureTest.js index b3383303..379fad79 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -170,18 +170,13 @@ describe('failure', function () { }) it('dead letter queues are working', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, debug: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema const deadLetter = `${queue}_dlq` const jobId = await boss.send(queue, { key: queue }, { deadLetter }) await boss.fetch(queue) - - await boss.fail(jobId) - - await boss.fetch(queue) - await boss.fail(jobId) const job = await boss.fetch(deadLetter) From 40d7e449a25cd060652dc50ae93090d8fed00a00 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 10:26:35 -0500 Subject: [PATCH 05/36] defaults for test suite --- test/testHelper.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/testHelper.js b/test/testHelper.js index 16bf6b61..2ef9c9c5 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -39,6 +39,10 @@ function getConfig (options = {}) { config.schema = config.schema || 'pgboss' + config.noSupervisor = true + config.noScheduling = true + config.retryLimit = 0 + const result = { ...config } return Object.assign(result, options) From d3600b946afbe5041ed481f6f22d09a4a1c22526 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 16:29:24 -0500 Subject: [PATCH 06/36] versioning --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index f75611ed..4ab5e08d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "description": "Queueing jobs in Node.js using PostgreSQL like a boss", "main": "./src/index.js", "engines": { From 28fbb8fae39ca7fccbb40b38b0b4990b5f51c013 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 17:19:12 -0500 Subject: [PATCH 07/36] wip - 49 passing --- docs/readme.md | 5 ----- src/attorney.js | 28 ++++++++++++------------- src/boss.js | 7 +------ src/manager.js | 8 ++++---- src/migrationStore.js | 44 ++++++++++++++++++++++++++++------------ src/plans.js | 40 +++++++++++++++++++----------------- test/archiveTest.js | 2 ++ test/completeTest.js | 2 +- test/deleteQueueTest.js | 25 +++++++++++------------ test/expireTest.js | 19 ++--------------- test/fetchTest.js | 45 ----------------------------------------- test/maintenanceTest.js | 9 ++++++++- test/testHelper.js | 8 ++++---- types.d.ts | 2 -- version.json | 2 +- 15 files changed, 100 insertions(+), 146 deletions(-) diff --git a/docs/readme.md b/docs/readme.md index a584237f..e18c1d42 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -562,8 +562,6 @@ Available in constructor as a default, or overridden in send. When used in conjunction with singletonKey, allows a max of 1 job to be queued. - >By default, there is no limit on the number of these jobs that may be active. However, this behavior may be modified by passing the [enforceSingletonQueueActiveLimit](#fetch) option. - ```js boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a jobId boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a null jobId until first job becomes active @@ -807,9 +805,6 @@ The default concurrency for `work()` is 1 job every 2 seconds. Both the interval Same as in [`fetch()`](#fetch) -* **enforceSingletonQueueActiveLimit**, bool - - Same as in [`fetch()`](#fetch) **Polling options** diff --git a/src/attorney.js b/src/attorney.js index b93cc8e9..b65f20a9 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -1,5 +1,5 @@ const assert = require('assert') -const { DEFAULT_SCHEMA, SINGLETON_QUEUE_KEY } = require('./plans') +const { DEFAULT_SCHEMA } = require('./plans') module.exports = { getConfig, @@ -61,7 +61,7 @@ function checkSendArgs (args, defaults) { applyRetryConfig(options, defaults) applyExpirationConfig(options, defaults) applyRetentionConfig(options, defaults) - applySingletonKeyConfig(options) + // applySingletonKeyConfig(options) const { startAfter, singletonSeconds, singletonMinutes, singletonHours } = options @@ -90,17 +90,17 @@ function checkInsertArgs (jobs) { assert(Array.isArray(jobs), `jobs argument should be an array. Received '${typeof jobs}'`) return jobs.map(job => { job = { ...job } - applySingletonKeyConfig(job) + // applySingletonKeyConfig(job) return job }) } -function applySingletonKeyConfig (options) { - if (options.singletonKey && options.useSingletonQueue && options.singletonKey !== SINGLETON_QUEUE_KEY) { - options.singletonKey = SINGLETON_QUEUE_KEY + options.singletonKey - } - delete options.useSingletonQueue -} +// function applySingletonKeyConfig (options) { +// if (options.singletonKey && options.useSingletonQueue && options.singletonKey !== SINGLETON_QUEUE_KEY) { +// options.singletonKey = SINGLETON_QUEUE_KEY + options.singletonKey +// } +// delete options.useSingletonQueue +// } function checkWorkArgs (name, args, defaults) { let options, callback @@ -129,7 +129,6 @@ function checkWorkArgs (name, args, defaults) { assert(!('teamSize' in options) || (Number.isInteger(options.teamSize) && options.teamSize >= 1), 'teamSize must be an integer > 0') assert(!('batchSize' in options) || (Number.isInteger(options.batchSize) && options.batchSize >= 1), 'batchSize must be an integer > 0') assert(!('includeMetadata' in options) || typeof options.includeMetadata === 'boolean', 'includeMetadata must be a boolean') - assert(!('enforceSingletonQueueActiveLimit' in options) || typeof options.enforceSingletonQueueActiveLimit === 'boolean', 'enforceSingletonQueueActiveLimit must be a boolean') return { options, callback } } @@ -143,7 +142,6 @@ function checkFetchArgs (name, batchSize, options) { assert(!batchSize || (Number.isInteger(batchSize) && batchSize >= 1), 'batchSize must be an integer > 0') assert(!('includeMetadata' in options) || typeof options.includeMetadata === 'boolean', 'includeMetadata must be a boolean') - assert(!('enforceSingletonQueueActiveLimit' in options) || typeof options.enforceSingletonQueueActiveLimit === 'boolean', 'enforceSingletonQueueActiveLimit must be a boolean') return { name } } @@ -281,13 +279,13 @@ function applyRetryConfig (config, defaults) { assert(!('retryBackoff' in config) || (config.retryBackoff === true || config.retryBackoff === false), 'retryBackoff must be either true or false') if (defaults) { - config.retryDelay = config.retryDelay || defaults.retryDelay - config.retryLimit = config.retryLimit || defaults.retryLimit - config.retryBackoff = config.retryBackoff || defaults.retryBackoff + config.retryDelay = ('retryDelay' in config) ? config.retryDelay : defaults.retryDelay + config.retryLimit = ('retryLimit' in config) ? config.retryLimit : defaults.retryLimit + config.retryBackoff = ('retryBackoff' in config) ? config.retryBackoff : defaults.retryBackoff } config.retryDelay = config.retryDelay || 0 - config.retryLimit = ('retryLimit' in config) ? config.retryLimit : 2 + config.retryLimit = Number.isInteger(config.retryLimit) ? config.retryLimit : 2 config.retryBackoff = !!config.retryBackoff config.retryDelay = (config.retryBackoff && !config.retryDelay) ? 1 : config.retryDelay diff --git a/src/boss.js b/src/boss.js index 211866d0..a9d01114 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,8 +1,6 @@ const EventEmitter = require('events') -const { serializeError: stringify } = require('serialize-error') const plans = require('./plans') const { states } = require('./plans') -const { COMPLETION_JOB_PREFIX } = plans const queues = { MAINTENANCE: '__pgboss__maintenance', @@ -53,7 +51,6 @@ class Boss extends EventEmitter { async supervise () { this.metaMonitor() - await this.manager.deleteQueue(COMPLETION_JOB_PREFIX + queues.MAINTENANCE) await this.manager.deleteQueue(queues.MAINTENANCE) await this.maintenanceAsync() @@ -65,7 +62,6 @@ class Boss extends EventEmitter { await this.manager.work(queues.MAINTENANCE, maintenanceWorkOptions, (job) => this.onMaintenance(job)) if (this.monitorStates) { - await this.manager.deleteQueue(COMPLETION_JOB_PREFIX + queues.MONITOR_STATES) await this.manager.deleteQueue(queues.MONITOR_STATES) await this.monitorStatesAsync() @@ -219,8 +215,7 @@ class Boss extends EventEmitter { } async expire () { - const output = stringify({ value: { message: 'job failed by timeout in active state' } }) - await this.executeSql(this.failJobsByTimeoutCommand, [null, output]) + await this.executeSql(this.failJobsByTimeoutCommand) } async archive () { diff --git a/src/manager.js b/src/manager.js index 88b9fb76..f4b796d7 100644 --- a/src/manager.js +++ b/src/manager.js @@ -15,7 +15,7 @@ const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) const plans = require('./plans') -const { SINGLETON_QUEUE_KEY } = plans +// const { SINGLETON_TYPE } = plans const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } @@ -58,7 +58,7 @@ class Manager extends EventEmitter { this.completeJobsCommand = plans.completeJobs(config.schema) this.cancelJobsCommand = plans.cancelJobs(config.schema) this.resumeJobsCommand = plans.resumeJobs(config.schema) - this.failJobsCommand = plans.failJobsById(config.schema) + this.failJobsByIdCommand = plans.failJobsById(config.schema) this.getJobByIdCommand = plans.getJobById(config.schema) this.getArchivedJobByIdCommand = plans.getArchivedJobById(config.schema) this.subscribeCommand = plans.subscribe(config.schema) @@ -340,7 +340,7 @@ class Manager extends EventEmitter { async sendSingleton (name, data, options) { options = options ? { ...options } : {} - options.singletonKey = SINGLETON_QUEUE_KEY + // options.singletonKey = SINGLETON_QUEUE_KEY const result = Attorney.checkSendArgs([name, data, options], this.config) @@ -535,7 +535,7 @@ class Manager extends EventEmitter { async fail (id, data, options = {}) { const db = options.db || this.db const ids = this.mapCompletionIdArg(id, 'fail') - const result = await db.executeSql(this.failJobsCommand, [ids, this.mapCompletionDataArg(data)]) + const result = await db.executeSql(this.failJobsByIdCommand, [ids, this.mapCompletionDataArg(data)]) return this.mapCompletionResponse(ids, result) } diff --git a/src/migrationStore.js b/src/migrationStore.js index c3c6de72..a3999336 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -69,32 +69,50 @@ function getAll (schema) { version: 21, previous: 20, install: [ + `DROP INDEX ${schema}.job_singletonKey`, + `DROP INDEX ${schema}.job_singleton_queue`, + `DROP INDEX ${schema}.job_singletonOn`, + `DROP INDEX ${schema}.job_singletonKeyOn`, + `DROP INDEX ${schema}.job_fetch`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, + `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + `DROP TABLE IF EXISTS ${schema}.archive_backup`, + `ALTER TABLE ${schema}.archive RENAME to archive_backup`, `DROP TYPE ${schema}.job_state`, `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','cancelled','failed')`, - `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE job_state`, - `ALTER TABLE ${schema}.archive RENAME to archive_backup`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)`, `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)`, `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, - `DROP INDEX ${schema}.job_singletonKey`, - `DROP INDEX ${schema}.job_singleton_queue`, - `DROP INDEX ${schema}.job_singletonOn`, - `DROP INDEX ${schema}.job_singletonKeyOn`, - `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= 'active' AND singletonKey = '__pgboss__singleton_queue'`, - `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn) WHERE state <= 'completed' AND singletonOn IS NOT NULL AND singletonKey = '__pgboss__singleton_queue'`, - `CREATE UNIQUE INDEX job_throttle_key_on ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= 'completed' AND singletonOn IS NOT NULL AND singletonKey IS NOT NULL` + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, + `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= 'active' AND singletonKey = '__pgboss-singleton-incomplete' AND singletonOn IS NULL`, + `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state <= 'retry' AND singletonKey = '__pgboss-singleton-queued' AND singletonOn IS NULL`, + `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = 'active' AND singletonKey = '__pgboss-singleton-active' AND singletonOn IS NULL`, + `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= 'completed' AND singletonOn IS NOT NULL` ], uninstall: [ - `ALTER TYPE ${schema}.job_state ADD VALUE 'expired' AFTER 'completed'`, - `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, `DROP TABLE IF EXISTS ${schema}.archive_backup`, `DROP INDEX ${schema}.job_singleton`, - `DROP INDEX ${schema}.job_throttle_on`, - `DROP INDEX ${schema}.job_throttle_key_on`, + `DROP INDEX ${schema}.job_singleton_queued`, + `DROP INDEX ${schema}.job_singleton_active`, + `DROP INDEX ${schema}.job_throttle`, + `DROP INDEX ${schema}.job_fetch`, + `DROP INDEX ${schema}.archive_archivedon_idx`, + `DROP INDEX ${schema}.archive_name_idx`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, + `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + `DROP TYPE ${schema}.job_state`, + `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','expired','cancelled','failed')`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, `CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < 'expired' AND singletonKey IS NULL`, `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, diff --git a/src/plans.js b/src/plans.js index de03e177..aeca13f8 100644 --- a/src/plans.js +++ b/src/plans.js @@ -10,12 +10,12 @@ const states = { } const DEFAULT_SCHEMA = 'pgboss' -const COMPLETION_JOB_PREFIX = `__state__${states.completed}__` -const SINGLETON_QUEUE_KEY = '__pgboss__singleton_queue' -// __pgboss-singleton-queued -// __pgboss-singleton-active -// __pgboss-singleton-queued-active -// const SINGLETON_QUEUE_KEY_ESCAPED = SINGLETON_QUEUE_KEY.replace(/_/g, '\\_') + +const SINGLETON_TYPE = { + queued: '__pgboss-singleton-queued', + active: '__pgboss-singleton-active', + incomplete: '__pgboss-singleton-incomplete' +} const MIGRATE_RACE_MESSAGE = 'division by zero' const CREATE_RACE_MESSAGE = 'already exists' @@ -57,8 +57,7 @@ module.exports = { getArchivedJobById, getJobById, states: { ...states }, - COMPLETION_JOB_PREFIX, - SINGLETON_QUEUE_KEY, + SINGLETON_TYPE, MIGRATE_RACE_MESSAGE, CREATE_RACE_MESSAGE, DEFAULT_SCHEMA @@ -86,9 +85,9 @@ function create (schema, version) { createJobTable(schema), createIndexJobName(schema), createIndexJobFetch(schema), + createIndexSingleton(schema), createIndexSingletonQueued(schema), createIndexSingletonActive(schema), - createIndexSingletonQueuedAndActive(schema), createIndexThrottle(schema), createArchiveTable(schema), addPrimaryKeyToArchive(schema), @@ -161,16 +160,16 @@ function createJobTable (schema) { ` } -function createIndexSingletonQueued (schema) { - return `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state = '${states.created}' AND singletonKey = '__pgboss-singleton-queued' AND singletonOn IS NULL` +function createIndexSingleton (schema) { + return `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= '${states.active}' AND singletonKey = '${SINGLETON_TYPE.incomplete}' AND singletonOn IS NULL` } -function createIndexSingletonActive (schema) { - return `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = '${states.active}' AND singletonKey = '__pgboss-singleton-active' AND singletonOn IS NULL` +function createIndexSingletonQueued (schema) { + return `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state <= '${states.retry}' AND singletonKey = '${SINGLETON_TYPE.queued}' AND singletonOn IS NULL` } -function createIndexSingletonQueuedAndActive (schema) { - return `CREATE UNIQUE INDEX job_singleton_queued_active ON ${schema}.job (name, state) WHERE state IN ('${states.created}','${states.active}') AND singletonKey = 'pgboss-singleton-queued-active' AND singletonOn IS NULL` +function createIndexSingletonActive (schema) { + return `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = '${states.active}' AND singletonKey = '${SINGLETON_TYPE.active}' AND singletonOn IS NULL` } function createIndexThrottle (schema) { @@ -385,15 +384,18 @@ function completeJobs (schema) { function failJobsById (schema) { const where = `id IN (SELECT UNNEST($1::uuid[])) AND state < '${states.completed}'` - return failJobs(schema, where) + const output = '$2::jsonb' + + return failJobs(schema, where, output) } function failJobsByTimeout (schema) { const where = `state = '${states.active}' AND (startedOn + expireIn) < now()` - return failJobs(schema, where) + const output = '\'{ "value": { "message": "job failed by timeout in active state" } }\'::jsonb' + return failJobs(schema, where, output) } -function failJobs (schema, where) { +function failJobs (schema, where, output) { return ` WITH results AS ( UPDATE ${schema}.job SET @@ -413,7 +415,7 @@ function failJobs (schema, where) { retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 * random() ) * interval '1' END, - output = $2::jsonb + output = ${output} WHERE ${where} RETURNING * ), dlq_jobs as ( diff --git a/test/archiveTest.js b/test/archiveTest.js index c7a4d8b8..85137974 100644 --- a/test/archiveTest.js +++ b/test/archiveTest.js @@ -20,6 +20,8 @@ describe('archive', function () { await boss.complete(jobId) + await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/completeTest.js b/test/completeTest.js index f8ff41dc..cb476d45 100644 --- a/test/completeTest.js +++ b/test/completeTest.js @@ -114,7 +114,7 @@ describe('complete', function () { assert.strictEqual(called, true) }) - it('should warn with an old onComplete option only once', async function () { + it.skip('should warn with an old onComplete option only once', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, noSupervisor: true }) const queue = this.test.bossConfig.schema diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js index fcb51ef1..f7d68287 100644 --- a/test/deleteQueueTest.js +++ b/test/deleteQueueTest.js @@ -1,5 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') +const delay = require('delay') describe('deleteQueue', function () { it('should clear a specific queue', async function () { @@ -57,19 +58,17 @@ describe('deleteQueue', function () { }) it('clearStorage() should empty both job storage tables', async function () { - const defaults = { - archiveCompletedAfterSeconds: 1 - } - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'clear-storage-works' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) + const queue = this.test.bossConfig.schema const jobId = await boss.send(queue) - const job = await boss.fetch(queue) + await boss.fetch(queue) + await boss.complete(jobId) - assert.strictEqual(job.id, jobId) + await delay(1000) + await boss.maintain() - await boss.complete(jobId) + await boss.send(queue) const db = await helper.getDb() @@ -81,15 +80,15 @@ describe('deleteQueue', function () { const preJobCount = await getJobCount('job') const preArchiveCount = await getJobCount('archive') - assert(preJobCount > 0) - assert(preArchiveCount > 0) + assert.strictEqual(preJobCount, 1) + assert.strictEqual(preArchiveCount, 1) await boss.clearStorage() const postJobCount = await getJobCount('job') const postArchiveCount = await getJobCount('archive') - assert(postJobCount === 0) - assert(postArchiveCount === 0) + assert.strictEqual(postJobCount, 0) + assert.strictEqual(postArchiveCount, 0) }) }) diff --git a/test/expireTest.js b/test/expireTest.js index 4936dc92..54960cc0 100644 --- a/test/expireTest.js +++ b/test/expireTest.js @@ -8,7 +8,7 @@ describe('expire', function () { const queue = this.test.bossConfig.schema const key = this.test.bossConfig.schema - const jobId = await boss.send({ name: queue, data: { key }, options: { expireInSeconds: 1 } }) + const jobId = await boss.send({ name: queue, data: { key }, options: { retryLimit: 0, expireInSeconds: 1 } }) const job1 = await boss.fetch(queue) @@ -18,21 +18,13 @@ describe('expire', function () { await boss.maintain() - const job2 = await boss.fetch(queue) - - assert(job2) - - await delay(1000) - - await boss.maintain() - const job = await boss.getJobById(jobId) assert.strictEqual('failed', job.state) }) it('should expire a job - cascaded config', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, expireInSeconds: 1 }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, expireInSeconds: 1, retryLimit: 0 }) const queue = this.test.bossConfig.schema const jobId = await boss.send(queue) @@ -44,13 +36,6 @@ describe('expire', function () { await boss.maintain() - const { id } = await boss.fetch(queue) - assert.strictEqual(id, jobId) - - await delay(1000) - - await boss.maintain() - const job = await boss.getJobById(jobId) assert.strictEqual('failed', job.state) diff --git a/test/fetchTest.js b/test/fetchTest.js index 092230c4..d2a9a858 100644 --- a/test/fetchTest.js +++ b/test/fetchTest.js @@ -121,49 +121,4 @@ describe('fetch', function () { assert(job.startedon === undefined) assert.strictEqual(calledCounter, 2) }) - - describe('enforceSingletonQueueActiveLimit option', function () { - it('when enforceSingletonQueueActiveLimit=false, should fetch singleton queue job even if there is already an active one', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - const jobOptions = { singletonKey: 'singleton_queue_active_test', useSingletonQueue: true } - const sendArgs = [queue, {}, jobOptions] - const fetchArgs = [queue, undefined, { enforceSingletonQueueActiveLimit: false }] - - const publish1 = await boss.send(...sendArgs) - assert(publish1) - const fetch1 = await boss.fetch(...fetchArgs) - assert(fetch1) - - const publish2 = await boss.send(...sendArgs) - assert(publish2) - const fetch2 = await boss.fetch(...fetchArgs) - assert(fetch2) - }) - - it('when enforceSingletonQueueActiveLimit=true, should not fetch singleton queue job if there is already an active one', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - const jobOptions = { singletonKey: 'singleton_queue_active_test', useSingletonQueue: true } - const sendArgs = [queue, {}, jobOptions] - const fetchArgs = [queue, undefined, { enforceSingletonQueueActiveLimit: true }] - - const publish1 = await boss.send(...sendArgs) - assert(publish1) - const fetch1 = await boss.fetch(...fetchArgs) - assert(fetch1) - - const publish2 = await boss.send(...sendArgs) - assert(publish2) - // Job 1 still active, can't fetch job 2 - const fetch2 = await boss.fetch(...fetchArgs) - assert(fetch2 === null) - - await boss.complete(fetch1.id) - // Job 1 no longer active, should be able to fetch job 2 - const retryFetch2 = await boss.fetch(...fetchArgs) - assert(retryFetch2) - assert(retryFetch2.id === publish2) - }) - }) }) diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 5270d212..56d18fc9 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -4,8 +4,14 @@ const delay = require('delay') const PgBoss = require('../') describe('maintenance', async function () { + const defaults = { noSupervisor: false } + it('should send maintenance job if missing during monitoring', async function () { - const config = { ...this.test.bossConfig, maintenanceIntervalSeconds: 1 } + const config = { + ...this.test.bossConfig, + ...defaults, + maintenanceIntervalSeconds: 1 + } const db = await helper.getDb() @@ -31,6 +37,7 @@ describe('maintenance', async function () { it('meta monitoring error handling works', async function () { const config = { ...this.test.bossConfig, + ...defaults, maintenanceIntervalSeconds: 1, __test__throw_meta_monitor: 'meta monitoring error' } diff --git a/test/testHelper.js b/test/testHelper.js index 2ef9c9c5..34e08959 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -55,12 +55,12 @@ async function init () { await createPgCrypto(database) } -async function getDb (database) { +async function getDb ({ database, debug } = {}) { const config = getConfig() config.database = database || config.database - const db = new Db(config) + const db = new Db({ ...config, debug }) await db.open() @@ -68,7 +68,7 @@ async function getDb (database) { } async function createPgCrypto (database) { - const db = await getDb(database) + const db = await getDb({ database }) await db.executeSql('create extension if not exists pgcrypto') await db.close() } @@ -106,7 +106,7 @@ async function countJobs (schema, where, values) { } async function tryCreateDb (database) { - const db = await getDb('postgres') + const db = await getDb({ database: 'postgres' }) try { await db.executeSql(`CREATE DATABASE ${database}`) diff --git a/types.d.ts b/types.d.ts index e1e60b48..a10ba458 100644 --- a/types.d.ts +++ b/types.d.ts @@ -105,7 +105,6 @@ declare namespace PgBoss { interface CommonJobFetchOptions { includeMetadata?: boolean; - enforceSingletonQueueActiveLimit?: boolean; } type JobFetchOptions = CommonJobFetchOptions & { @@ -123,7 +122,6 @@ declare namespace PgBoss { type FetchOptions = { includeMetadata?: boolean; - enforceSingletonQueueActiveLimit?: boolean; } & ConnectionOptions; interface WorkHandler { diff --git a/version.json b/version.json index efaedb7f..a295918c 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "schema": 20 + "schema": 21 } From b621afbdb56b102a776e1102dcafcf20a97357ae Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 17:33:54 -0500 Subject: [PATCH 08/36] test fixes --- test/backgroundErrorTest.js | 4 ++++ test/multiMasterTest.js | 13 +++---------- test/retryTest.js | 33 +++++++++++++-------------------- 3 files changed, 20 insertions(+), 30 deletions(-) diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 5f688604..47f7b01d 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -8,6 +8,7 @@ describe('background processing error handling', function () { monitorStateIntervalMinutes: 1, maintenanceIntervalSeconds: 1, noScheduling: true, + noSupervisor: false, __test__throw_maint: true } @@ -31,6 +32,7 @@ describe('background processing error handling', function () { it('state monitoring error handling works', async function () { const defaults = { monitorStateIntervalSeconds: 2, + noSupervisor: false, noScheduling: true, __test__throw_monitor: true } @@ -55,6 +57,8 @@ describe('background processing error handling', function () { it('clock monitoring error handling works', async function () { const config = { ...this.test.bossConfig, + noSupervisor: false, + noScheduling: false, clockMonitorIntervalSeconds: 1, __test__throw_clock_monitoring: 'pg-boss mock error: clock monitoring' } diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index 361d7c4d..de6d5dd5 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -63,17 +63,10 @@ describe('multi-master', function () { const { states } = PgBoss const jobCount = 5 - const defaults = { - maintenanceIntervalSeconds: 1, - noSupervisor: true - } - - const config = { ...this.test.bossConfig, ...defaults } - - let boss = new PgBoss(config) + let boss = new PgBoss({ ...this.test.bossConfig, maintenanceIntervalSeconds: 1 }) const queues = boss.boss.getQueueNames() - const countJobs = (state) => helper.countJobs(config.schema, 'name = $1 AND state = $2', [queues.MAINTENANCE, state]) + const countJobs = (state) => helper.countJobs(this.test.bossConfig.schema, 'name = $1 AND state = $2', [queues.MAINTENANCE, state]) await boss.start() @@ -88,7 +81,7 @@ describe('multi-master', function () { await boss.stop({ graceful: false }) - boss = new PgBoss(this.test.bossConfig) + boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) await boss.start() diff --git a/test/retryTest.js b/test/retryTest.js index 66d3804e..f646f56a 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -7,7 +7,7 @@ describe('retries', function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema - const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1 } }) + const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1, retryLimit: 1 } }) const try1 = await boss.fetch(queue) @@ -22,40 +22,35 @@ describe('retries', function () { it('should retry a job that failed', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const queueName = 'retryFailed' - const retryLimit = 1 - - const jobId = await boss.send(queueName, null, { retryLimit }) + const jobId = await boss.send(queue, null, { retryLimit: 1 }) - await boss.fetch(queueName) + await boss.fetch(queue) await boss.fail(jobId) - const job = await boss.fetch(queueName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, jobId) }) it('should retry a job that failed with cascaded config', async function () { - const retryLimit = 1 - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, retryLimit }) - - const queueName = 'retryFailed-config-cascade' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, retryLimit: 1 }) + const queue = this.test.bossConfig.schema - const jobId = await boss.send(queueName) + const jobId = await boss.send(queue) - await boss.fetch(queueName) + await boss.fetch(queue) await boss.fail(jobId) - const job = await boss.fetch(queueName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, jobId) }) it('should retry with a fixed delay', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - - const queue = 'retryDelayFixed' + const queue = this.test.bossConfig.schema const jobId = await boss.send(queue, null, { retryLimit: 1, retryDelay: 1 }) @@ -75,8 +70,7 @@ describe('retries', function () { it('should retry with a exponential backoff', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - - const queue = 'retryDelayBackoff' + const queue = this.test.bossConfig.schema let processCount = 0 const retryLimit = 4 @@ -95,8 +89,7 @@ describe('retries', function () { it('should set the default retry limit to 1 if missing', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - - const queue = 'retryLimitDefault' + const queue = this.test.bossConfig.schema const jobId = await boss.send(queue, null, { retryDelay: 1 }) await boss.fetch(queue) From d4468b6531f766abf290cce8508a5a75b6f2f474 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 27 Aug 2023 22:18:01 -0500 Subject: [PATCH 09/36] suite is passing except for singleton --- README.md | 2 +- src/attorney.js | 4 ++-- src/manager.js | 7 +++---- test/monitoringTest.js | 1 + test/readme.js | 9 ++++----- test/retryTest.js | 4 +++- test/scheduleTest.js | 20 ++++++++++++++------ test/testHelper.js | 3 --- test/wildcardTest.js | 14 +------------- test/workTest.js | 6 ++++-- 10 files changed, 33 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 51506c6a..c588b93c 100644 --- a/README.md +++ b/README.md @@ -43,9 +43,9 @@ This will likely cater the most to teams already familiar with the simplicity of * Cron scheduling * Pub/sub API for fan-out queue relationships * Deferral, retries (with exponential backoff), rate limiting, debouncing -* Completion jobs for orchestrations/sagas * Direct table access for bulk loads via COPY or INSERT * Multi-master compatible (for example, in a Kubernetes ReplicaSet) +* Dead letter queues * Automatic creation and migration of storage tables * Automatic maintenance operations to manage table growth diff --git a/src/attorney.js b/src/attorney.js index b65f20a9..01b34bad 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -7,8 +7,8 @@ module.exports = { checkInsertArgs, checkWorkArgs, checkFetchArgs, - queueNameHasPatternMatch, - warnClockSkew + warnClockSkew, + queueNameHasPatternMatch } const WARNINGS = { diff --git a/src/manager.js b/src/manager.js index f4b796d7..a17b1cac 100644 --- a/src/manager.js +++ b/src/manager.js @@ -201,9 +201,7 @@ class Manager extends EventEmitter { createTeamRefillPromise() } - const patternMatch = Attorney.queueNameHasPatternMatch(name) - - const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, patternMatch }) + const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata }) const onFetch = async (jobs) => { if (this.config.__test__throw_worker) { @@ -459,9 +457,10 @@ class Manager extends EventEmitter { } async fetch (name, batchSize, options = {}) { + const patternMatch = Attorney.queueNameHasPatternMatch(name) const values = Attorney.checkFetchArgs(name, batchSize, options) const db = options.db || this.db - const nextJobSql = this.nextJobCommand(options.includeMetadata || false) + const nextJobSql = this.nextJobCommand(options.includeMetadata || false, patternMatch) const statementValues = [values.name, batchSize || 1] let result diff --git a/test/monitoringTest.js b/test/monitoringTest.js index 3fb6ded6..5eb95667 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -4,6 +4,7 @@ const helper = require('./testHelper') describe('monitoring', function () { it('should emit state counts', async function () { const defaults = { + noSupervisor: false, monitorStateIntervalSeconds: 1 } diff --git a/test/readme.js b/test/readme.js index cac01c37..e60e064e 100644 --- a/test/readme.js +++ b/test/readme.js @@ -14,11 +14,10 @@ async function readme () { console.log(`created cronjob in queue ${queue}`) - await boss.work(queue, someAsyncJobHandler) -} - -async function someAsyncJobHandler (job) { - console.log(`running job ${job.id}`) + await boss.work(queue, async job => { + console.log(`running job ${job.id}`) + boss.unschedule(queue) + }) } readme() diff --git a/test/retryTest.js b/test/retryTest.js index f646f56a..566e9f23 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -91,10 +91,12 @@ describe('retries', function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema - const jobId = await boss.send(queue, null, { retryDelay: 1 }) + const jobId = await boss.send(queue, null, { retryDelay: 1, retryLimit: 0 }) await boss.fetch(queue) await boss.fail(jobId) + await delay(1000) + const job1 = await boss.fetch(queue) assert(job1) diff --git a/test/scheduleTest.js b/test/scheduleTest.js index c3c3fde5..c9b6ba7d 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -11,7 +11,8 @@ describe('schedule', function () { it('should send job based on every minute expression', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + noScheduling: false } const boss = this.test.boss = await helper.start(config) @@ -31,7 +32,8 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + noScheduling: false } const boss = this.test.boss = await helper.start(config) @@ -51,7 +53,8 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + noScheduling: false } const boss = this.test.boss = await helper.start(config) @@ -88,7 +91,7 @@ describe('schedule', function () { await boss.stop() - boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1 }) + boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, noScheduling: false }) await delay(ASSERT_DELAY) @@ -130,7 +133,8 @@ describe('schedule', function () { it('should send job based on current minute in UTC', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + noScheduling: false } const boss = this.test.boss = await helper.start(config) @@ -165,7 +169,8 @@ describe('schedule', function () { it('should send job based on current minute in a specified time zone', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + noScheduling: false } const boss = this.test.boss = await helper.start(config) @@ -202,6 +207,7 @@ describe('schedule', function () { it('should force a clock skew warning', async function () { const config = { ...this.test.bossConfig, + noScheduling: false, __test__force_clock_skew_warning: true } @@ -229,6 +235,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, + noScheduling: false, __test__force_clock_monitoring_error: 'pg-boss mock error: clock skew monitoring' } @@ -252,6 +259,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, + noScheduling: false, __test__force_cron_monitoring_error: 'pg-boss mock error: cron monitoring' } diff --git a/test/testHelper.js b/test/testHelper.js index 34e08959..1306e57c 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -1,7 +1,5 @@ const Db = require('../src/db') const PgBoss = require('../') -const plans = require('../src/plans') -const { COMPLETION_JOB_PREFIX } = plans const crypto = require('crypto') const sha1 = (value) => crypto.createHash('sha1').update(value).digest('hex') @@ -13,7 +11,6 @@ module.exports = { getArchivedJobById, countJobs, findJobs, - COMPLETION_JOB_PREFIX, getConfig, getConnectionString, tryCreateDb, diff --git a/test/wildcardTest.js b/test/wildcardTest.js index 73cd1161..3e369755 100644 --- a/test/wildcardTest.js +++ b/test/wildcardTest.js @@ -3,7 +3,7 @@ const helper = require('./testHelper') describe('wildcard', function () { it('fetch() should return all jobs using a wildcard pattern', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema await boss.send(`${queue}_1234`) @@ -28,16 +28,4 @@ describe('wildcard', function () { }) }) }) - - it('should not accidentally fetch state completion jobs from a pattern', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - await boss.send(`${queue}_1234`) - const job = await boss.fetch(`${queue}_*`) - await boss.complete(job.id) - const job2 = await boss.fetch(`${queue}_*`) - - assert.strictEqual(job2, null) - }) }) diff --git a/test/workTest.js b/test/workTest.js index 8843fe85..de4211a8 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -208,6 +208,8 @@ describe('work', function () { }) }) + await delay(500) + const job = await boss.getJobById(jobId) assert.strictEqual(job.state, 'completed') @@ -359,7 +361,7 @@ describe('work', function () { }) it('should fail job at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss(this.test.bossConfig) + const boss = this.test.boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) @@ -382,7 +384,7 @@ describe('work', function () { }) it('should fail a batch of jobs at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss(this.test.bossConfig) + const boss = this.test.boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) From 6e2959f8db4ead5caad12637a43329498cee27e0 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 1 Sep 2023 23:26:29 -0500 Subject: [PATCH 10/36] paritioning wip --- docs/readme.md | 24 ++-- src/boss.js | 3 +- src/index.js | 89 +++++++------ src/manager.js | 70 ++++++---- src/migrationStore.js | 34 +++-- src/plans.js | 288 +++++++++++++++++++++++++--------------- test/deleteQueueTest.js | 24 ---- test/failureTest.js | 19 +++ test/hooks.js | 5 +- test/singletonTest.js | 24 +--- test/speedTest.js | 2 +- test/testHelper.js | 2 +- types.d.ts | 3 - 13 files changed, 335 insertions(+), 252 deletions(-) diff --git a/docs/readme.md b/docs/readme.md index e18c1d42..bb36f600 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -27,7 +27,6 @@ - [`send(request)`](#sendrequest) - [`sendAfter(name, data, options, seconds | ISO date string | Date)`](#sendaftername-data-options-seconds--iso-date-string--date) - [`sendOnce(name, data, options, key)`](#sendoncename-data-options-key) - - [`sendSingleton(name, data, options)`](#sendsingletonname-data-options) - [`sendThrottled(name, data, options, seconds [, key])`](#sendthrottledname-data-options-seconds--key) - [`sendDebounced(name, data, options, seconds [, key])`](#senddebouncedname-data-options-seconds--key) - [`insert([jobs])`](#insertjobs) @@ -55,8 +54,8 @@ - [`notifyWorker(id)`](#notifyworkerid) - [`getQueueSize(name [, options])`](#getqueuesizename--options) - [`getJobById(id, options)`](#getjobbyidid-options) + - [`createQueue(name, type)`](#createqueuename-type) - [`deleteQueue(name)`](#deletequeuename) - - [`deleteAllQueues()`](#deleteallqueues) - [`clearStorage()`](#clearstorage) @@ -642,11 +641,6 @@ Send a job with a unique key to only allow 1 job to be in created, retry, or act This is a convenience version of `send()` with the `singletonKey` option assigned. -### `sendSingleton(name, data, options)` - -Send a job but only allow 1 job to be in created or retry state at at time. - -This is a convenience version of `send()` with the `singletonKey` option assigned. ### `sendThrottled(name, data, options, seconds [, key])` @@ -1006,13 +1000,21 @@ As an example, the following options object include active jobs along with creat Retrieves a job with all metadata by id in either the primary or archive storage. -## `deleteQueue(name)` +## `createQueue(name, type)` -Deletes all pending jobs in the specified queue from the active job table. All jobs in the archive table are retained. +Creates a typed queue. This is an optional step in order to use unique constraints to limit how many jobs can exist in each state. -## `deleteAllQueues()` +Allowed type values: + +| type | description | +| - | - | +| debounced | Allows only 1 job to be queued, unlimited active | +| singleton | Allows only 1 job to be active, unlimited queued | +| stately | Combination of the above: Allow 1 job to be queued. Allow 1 job to be active | + +## `deleteQueue(name)` -Deletes all pending jobs from all queues in the active job table. All jobs in the archive table are retained. +Deletes a queue and all jobs from the active job table. All jobs in the archive table are retained. ## `clearStorage()` diff --git a/src/boss.js b/src/boss.js index a9d01114..30f5d9a1 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,6 +1,5 @@ const EventEmitter = require('events') const plans = require('./plans') -const { states } = require('./plans') const queues = { MAINTENANCE: '__pgboss__maintenance', @@ -84,7 +83,7 @@ class Boss extends EventEmitter { const { secondsAgo } = await this.getMaintenanceTime() if (secondsAgo > this.maintenanceIntervalSeconds * 2) { - await this.manager.deleteQueue(queues.MAINTENANCE, { before: states.completed }) + await this.manager.deleteQueue(queues.MAINTENANCE) await this.maintenanceAsync() } } catch (err) { diff --git a/src/index.js b/src/index.js index ee8d77e4..c7bbae1c 100644 --- a/src/index.js +++ b/src/index.js @@ -90,10 +90,12 @@ class PgBoss extends EventEmitter { } async start () { - if (!this.stopped) { - return this + if (this.starting || this.started) { + return } + this.starting = true + if (this.db.isOurs && !this.db.opened) { await this.db.open() } @@ -102,9 +104,6 @@ class PgBoss extends EventEmitter { await this.contractor.start() } - this.stopped = false - this.started = true - this.manager.start() if (!this.config.noSupervisor) { @@ -115,19 +114,19 @@ class PgBoss extends EventEmitter { await this.timekeeper.start() } + this.starting = false + this.started = true + this.stopped = false + return this } async stop (options = {}) { - if (this.stoppingOn) { + if (this.stoppingOn || this.stopped) { return } - if (this.stopped) { - this.emit(events.stopped) - } - - let { destroy = false, graceful = true, timeout = 30000 } = options + let { destroy = false, graceful = true, timeout = 30000, wait = false } = options timeout = Math.max(timeout, 1000) @@ -135,46 +134,54 @@ class PgBoss extends EventEmitter { await this.manager.stop() await this.timekeeper.stop() + await this.boss.stop() - const shutdown = async () => { - this.stopped = true - this.stoppingOn = null - - if (this.db.isOurs && this.db.opened && destroy) { - await this.db.close() - } - - this.emit(events.stopped) - } - - if (!graceful) { - await this.boss.stop() - await shutdown() - return - } - - setImmediate(async () => { - let closing = false - + await new Promise((resolve, reject) => { try { - while (Date.now() - this.stoppingOn < timeout) { - if (this.manager.getWipData({ includeInternal: closing }).length === 0) { - if (closing) { - break + const shutdown = async () => { + try { + await this.manager.failWip() + + if (this.db.isOurs && this.db.opened && destroy) { + await this.db.close() } - closing = true + this.stopped = true + this.stoppingOn = null + this.started = false - await this.boss.stop() + this.emit(events.stopped) + resolve() + } catch (err) { + this.emit(events.error, err) + reject(err) } + } - await delay(1000) + if (!graceful) { + return shutdown() } - await this.boss.stop() - await shutdown() + if (!wait) { + resolve() + } + + const isWip = () => this.manager.getWipData({ includeInternal: false }).length > 0 + + setImmediate(async () => { + try { + while ((Date.now() - this.stoppingOn) < timeout && isWip()) { + await delay(500) + } + + await shutdown() + } catch (err) { + this.emit(events.error, err) + reject(err) + } + }) } catch (err) { - this.emit(events.error, err) + reject(err) } }) } diff --git a/src/manager.js b/src/manager.js index a17b1cac..daa17bfd 100644 --- a/src/manager.js +++ b/src/manager.js @@ -15,7 +15,8 @@ const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) const plans = require('./plans') -// const { SINGLETON_TYPE } = plans + +const { QUEUE_POLICY } = plans const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } @@ -84,9 +85,8 @@ class Manager extends EventEmitter { this.sendThrottled, this.sendOnce, this.sendAfter, - this.sendSingleton, + this.createQueue, this.deleteQueue, - this.deleteAllQueues, this.clearStorage, this.getQueueSize, this.getJobById @@ -102,13 +102,21 @@ class Manager extends EventEmitter { async stop () { this.stopping = true - for (const sub of this.workers.values()) { - if (!INTERNAL_QUEUES[sub.name]) { - await this.offWork(sub.name) + for (const worker of this.workers.values()) { + if (!INTERNAL_QUEUES[worker.name]) { + await this.offWork(worker.name) } } } + async failWip () { + const jobIds = Array.from(this.workers.values()).flatMap(w => w.jobs.map(j => j.id)) + + if (jobIds.length) { + await this.fail(jobIds, 'pg-boss shut down while active') + } + } + async work (name, ...args) { const { options, callback } = Attorney.checkWorkArgs(name, args, this.config) return await this.watch(name, options, callback) @@ -335,16 +343,6 @@ class Manager extends EventEmitter { return await this.createJob(result.name, result.data, result.options) } - async sendSingleton (name, data, options) { - options = options ? { ...options } : {} - - // options.singletonKey = SINGLETON_QUEUE_KEY - - const result = Attorney.checkSendArgs([name, data, options], this.config) - - return await this.createJob(result.name, result.data, result.options) - } - async sendAfter (name, data, options, after) { options = options ? { ...options } : {} options.startAfter = after @@ -388,7 +386,7 @@ class Manager extends EventEmitter { retryBackoff, retryLimit, retryDelay, - deadLetter + deadLetter = null } = options const id = uuid[this.config.uuid]() @@ -552,6 +550,38 @@ class Manager extends EventEmitter { return this.mapCompletionResponse(ids, result) } + async createQueue (name, options) { + assert(name, 'Missing queue name argument') + + const { + policy, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } = options + + const params = [ + policy, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + ] + + assert(policy in QUEUE_POLICY, `${policy} is not a valid queue policy`) + + // todo + + const sql = plans.createQueue(this.config.schema)(name) + + await this.db.executeSql(sql, params) + } + async deleteQueue (queue, options) { assert(queue, 'Missing queue name argument') const sql = plans.deleteQueue(this.config.schema, options) @@ -559,12 +589,6 @@ class Manager extends EventEmitter { return result ? result.rowCount : null } - async deleteAllQueues (options) { - const sql = plans.deleteAllQueues(this.config.schema, options) - const result = await this.db.executeSql(sql) - return result ? result.rowCount : null - } - async clearStorage () { const sql = plans.clearStorage(this.config.schema) await this.db.executeSql(sql) diff --git a/src/migrationStore.js b/src/migrationStore.js index a3999336..6dea55ed 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -74,6 +74,7 @@ function getAll (schema) { `DROP INDEX ${schema}.job_singletonOn`, `DROP INDEX ${schema}.job_singletonKeyOn`, `DROP INDEX ${schema}.job_fetch`, + `ALTER TABLE ${schema}.job ADD COLUMN policy text`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, @@ -88,21 +89,34 @@ function getAll (schema) { `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, - `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, - `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= 'active' AND singletonKey = '__pgboss-singleton-incomplete' AND singletonOn IS NULL`, - `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state <= 'retry' AND singletonKey = '__pgboss-singleton-queued' AND singletonOn IS NULL`, - `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = 'active' AND singletonKey = '__pgboss-singleton-active' AND singletonOn IS NULL`, - `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= 'completed' AND singletonOn IS NOT NULL` + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < 'active'`, + `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state <= 'retry' AND policy = 'short'`, + `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = 'active' AND policy = 'singleton'`, + `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, + `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, + `CREATE UNIQUE INDEX job_debounce ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= 'completed'`, + `CREATE TABLE ${schema}.queue ( + name text primary key, + policy text, + retry_limit int, + retry_delay int, + retry_backoff bool, + expire_seconds int, + retention_minutes int, + dead_letter text, + created_on timestamp with time zone not null default now(), + )` ], uninstall: [ `DROP TABLE IF EXISTS ${schema}.archive_backup`, - `DROP INDEX ${schema}.job_singleton`, - `DROP INDEX ${schema}.job_singleton_queued`, - `DROP INDEX ${schema}.job_singleton_active`, + `DROP INDEX ${schema}.job_policy_stately`, + `DROP INDEX ${schema}.job_policy_short`, + `DROP INDEX ${schema}.job_policy_singleton`, `DROP INDEX ${schema}.job_throttle`, `DROP INDEX ${schema}.job_fetch`, `DROP INDEX ${schema}.archive_archivedon_idx`, `DROP INDEX ${schema}.archive_name_idx`, + `ALTER TABLE ${schema}.job DROP COLUMN policy`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, @@ -111,12 +125,14 @@ function getAll (schema) { `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.archive DROP COLUMN policy`, `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, `CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < 'expired' AND singletonKey IS NULL`, `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, - `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'` + `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, + `DROP TABLE ${schema}.queue` ] }, { diff --git a/src/plans.js b/src/plans.js index aeca13f8..0a98a2a4 100644 --- a/src/plans.js +++ b/src/plans.js @@ -10,16 +10,16 @@ const states = { } const DEFAULT_SCHEMA = 'pgboss' - -const SINGLETON_TYPE = { - queued: '__pgboss-singleton-queued', - active: '__pgboss-singleton-active', - incomplete: '__pgboss-singleton-incomplete' -} - const MIGRATE_RACE_MESSAGE = 'division by zero' const CREATE_RACE_MESSAGE = 'already exists' +const QUEUE_POLICY = { + short: 'short', + priority: 'priority', + singleton: 'singleton', + stately: 'stately' +} + module.exports = { create, insertVersion, @@ -44,8 +44,8 @@ module.exports = { archive, purge, countStates, + createQueue, deleteQueue, - deleteAllQueues, clearStorage, getQueueSize, getMaintenanceTime, @@ -57,7 +57,6 @@ module.exports = { getArchivedJobById, getJobById, states: { ...states }, - SINGLETON_TYPE, MIGRATE_RACE_MESSAGE, CREATE_RACE_MESSAGE, DEFAULT_SCHEMA @@ -80,23 +79,31 @@ function locked (schema, query) { function create (schema, version) { const commands = [ createSchema(schema), - createVersionTable(schema), - createJobStateEnum(schema), - createJobTable(schema), + createEnumJobState(schema), + + createTableJob(schema), + createTablePartitionJobDefault(schema), + createPrimaryKeyJob(schema), createIndexJobName(schema), createIndexJobFetch(schema), - createIndexSingleton(schema), - createIndexSingletonQueued(schema), - createIndexSingletonActive(schema), - createIndexThrottle(schema), - createArchiveTable(schema), - addPrimaryKeyToArchive(schema), - addArchivedOnToArchive(schema), - addArchivedOnIndexToArchive(schema), - addNameIndexToArchive(schema), + createIndexJobPolicyStately(schema), + createIndexJobPolicyShort(schema), + createIndexJobPolicySingleton(schema), + createIndexJobThrottle(schema), + createIndexJobDebounce(schema), + + createTableArchive(schema), + createIndexArchiveId(schema), + createColumnArchiveArchivedOn(schema), + createIndexArchiveArchivedOn(schema), + createIndexArchiveName(schema), createArchiveBackupTable(schema), - createScheduleTable(schema), - createSubscriptionTable(schema), + + createTableVersion(schema), + createTableQueue(schema), + createTableSchedule(schema), + createTableSubscription(schema), + insertVersion(schema, version) ] @@ -109,7 +116,7 @@ function createSchema (schema) { ` } -function createVersionTable (schema) { +function createTableVersion (schema) { return ` CREATE TABLE ${schema}.version ( version int primary key, @@ -119,7 +126,7 @@ function createVersionTable (schema) { ` } -function createJobStateEnum (schema) { +function createEnumJobState (schema) { // ENUM definition order is important // base type is numeric and first values are less than last values return ` @@ -134,10 +141,10 @@ function createJobStateEnum (schema) { ` } -function createJobTable (schema) { +function createTableJob (schema) { return ` CREATE TABLE ${schema}.job ( - id uuid primary key not null default gen_random_uuid(), + id uuid not null default gen_random_uuid(), name text not null, priority integer not null default(0), data jsonb, @@ -155,25 +162,38 @@ function createJobTable (schema) { completedOn timestamp with time zone, keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', output jsonb, - deadletter text - ) + deadletter text, + policy text + ) PARTITION BY RANGE (name) ` } -function createIndexSingleton (schema) { - return `CREATE UNIQUE INDEX job_singleton ON ${schema}.job (name, state) WHERE state <= '${states.active}' AND singletonKey = '${SINGLETON_TYPE.incomplete}' AND singletonOn IS NULL` +function createTablePartitionJobDefault (schema) { + return `CREATE TABLE ${schema}.job_default PARTITION OF ${schema}.job DEFAULT` +} + +function createPrimaryKeyJob (schema) { + return `ALTER TABLE ${schema}.job ADD CONSTRAINT job_pkey PRIMARY KEY (name, id)` +} + +function createIndexJobPolicyShort (schema) { + return `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state <= '${states.retry}' AND policy = '${QUEUE_POLICY.short}'` +} + +function createIndexJobPolicySingleton (schema) { + return `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = '${states.active}' AND policy = '${QUEUE_POLICY.singleton}'` } -function createIndexSingletonQueued (schema) { - return `CREATE UNIQUE INDEX job_singleton_queued ON ${schema}.job (name) WHERE state <= '${states.retry}' AND singletonKey = '${SINGLETON_TYPE.queued}' AND singletonOn IS NULL` +function createIndexJobPolicyStately (schema) { + return `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= '${states.active}' AND policy = '${QUEUE_POLICY.stately}'` } -function createIndexSingletonActive (schema) { - return `CREATE UNIQUE INDEX job_singleton_active ON ${schema}.job (name) WHERE state = '${states.active}' AND singletonKey = '${SINGLETON_TYPE.active}' AND singletonOn IS NULL` +function createIndexJobThrottle (schema) { + return `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL AND singletonKey IS NULL` } -function createIndexThrottle (schema) { - return `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL` +function createIndexJobDebounce (schema) { + return `CREATE UNIQUE INDEX job_debounce ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= '${states.completed}'` } function createIndexJobName (schema) { @@ -181,10 +201,10 @@ function createIndexJobName (schema) { } function createIndexJobFetch (schema) { - return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < '${states.active}'` + return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < '${states.active}'` } -function createArchiveTable (schema) { +function createTableArchive (schema) { return `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)` } @@ -192,19 +212,19 @@ function createArchiveBackupTable (schema) { return `CREATE TABLE ${schema}.archive_backup (LIKE ${schema}.job)` } -function addPrimaryKeyToArchive (schema) { - return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)` +function createIndexArchiveId (schema) { + return `CREATE INDEX archive_id on ${schema}.archive (id)` } -function addArchivedOnToArchive (schema) { +function createColumnArchiveArchivedOn (schema) { return `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()` } -function addArchivedOnIndexToArchive (schema) { +function createIndexArchiveArchivedOn (schema) { return `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)` } -function addNameIndexToArchive (schema) { +function createIndexArchiveName (schema) { return `CREATE INDEX archive_name_idx ON ${schema}.archive(name)` } @@ -225,16 +245,45 @@ function getCronTime (schema) { return `SELECT cron_on, EXTRACT( EPOCH FROM (now() - cron_on) ) seconds_ago FROM ${schema}.version` } -function deleteQueue (schema, options = {}) { - options.before = options.before || states.active - assert(options.before in states, `${options.before} is not a valid state`) - return `DELETE FROM ${schema}.job WHERE name = $1 and state < '${options.before}'` +function createQueue (schema) { + return (name) => { + return ` + WITH partition AS ( + CREATE TABLE ${schema}.job_${name} PARTITION OF ${schema}.job + FOR VALUES FROM ('${name}') TO ('${name}') + ) + INSERT INTO ${schema}.queue ( + name, + policy, + retry_limit, + retry_delay, + retry_backoff, + expire_seconds, + retention_minutes, + dead_letter + ) VALUES ( + '${name}', + $1, + $2, + $3, + $4, + $5, + $6, + $7, + ) + + ` + } } -function deleteAllQueues (schema, options = {}) { - options.before = options.before || states.active - assert(options.before in states, `${options.before} is not a valid state`) - return `DELETE FROM ${schema}.job WHERE state < '${options.before}'` +function deleteQueue (schema, name) { + return ` + WITH deleted_queue AS ( + DELETE FROM ${schema}.queue WHERE name = '${name}' + ) + DELETE FROM ${schema}.job WHERE name = '${name}'; + DROP TABLE IF EXISTS job_${name} + ` } function clearStorage (schema) { @@ -247,7 +296,23 @@ function getQueueSize (schema, options = {}) { return `SELECT count(*) as count FROM ${schema}.job WHERE name = $1 AND state < '${options.before}'` } -function createScheduleTable (schema) { +function createTableQueue (schema) { + return ` + CREATE TABLE ${schema}.queue ( + name text primary key, + policy text, + retry_limit int, + retry_delay int, + retry_backoff bool, + expire_seconds int, + retention_minutes int, + dead_letter text, + created_on timestamp with time zone not null default now() + ) + ` +} + +function createTableSchedule (schema) { return ` CREATE TABLE ${schema}.schedule ( name text primary key, @@ -261,7 +326,7 @@ function createScheduleTable (schema) { ` } -function createSubscriptionTable (schema) { +function createTableSubscription (schema) { return ` CREATE TABLE ${schema}.subscription ( event text not null, @@ -467,68 +532,64 @@ function insertJob (schema) { INSERT INTO ${schema}.job ( id, name, + data, priority, state, - retryLimit, startAfter, - expireIn, - data, singletonKey, singletonOn, + expireIn, + retryLimit, retryDelay, retryBackoff, + deadletter, keepUntil, - deadletter + policy ) SELECT id, - name, + j.name, + data, priority, state, - retryLimit, startAfter, - expireIn, - data, singletonKey, singletonOn, - retryDelay, - retryBackoff, - keepUntil, - deadletter + CASE WHEN expireIn IS NOT NULL THEN expireIn + WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' + ELSE interval '15 minutes' + END, + COALESCE(retryLimit, q.retry_limit, 2), + COALESCE(retryDelay, q.retry_delay, 0), + COALESCE(retryBackoff, q.retry_backoff, false), + COALESCE(deadLetter, q.dead_letter), + CASE WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) + ELSE startAfter + CAST(COALESCE(keepUntilValue, (COALESCE(q.retention_minutes, 0) * 60)::text, '0') as interval) + END as keepUntil, + q.policy FROM - ( SELECT *, - CASE - WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) - ELSE startAfter + CAST(COALESCE(keepUntilValue,'0') as interval) - END as keepUntil - FROM - ( SELECT *, + ( SELECT + $1::uuid as id, + $2::text as name, + $3::int as priority, + '${states.created}'::${schema}.job_state as state, + $4::int as retryLimit, + CASE + WHEN right($5::text, 1) = 'Z' THEN CAST($5::text as timestamp with time zone) + ELSE now() + CAST(COALESCE($5::text,'0') as interval) + END as startAfter, + CAST($6 as interval) as expireIn, + $7::jsonb as data, + $8::text as singletonKey, CASE - WHEN right(startAfterValue, 1) = 'Z' THEN CAST(startAfterValue as timestamp with time zone) - ELSE now() + CAST(COALESCE(startAfterValue,'0') as interval) - END as startAfter - FROM - ( SELECT - $1::uuid as id, - $2::text as name, - $3::int as priority, - '${states.created}'::${schema}.job_state as state, - $4::int as retryLimit, - $5::text as startAfterValue, - CAST($6 as interval) as expireIn, - $7::jsonb as data, - $8::text as singletonKey, - CASE - WHEN $9::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($9 * floor((date_part('epoch', now()) + $10) / $9)) - ELSE NULL - END as singletonOn, - $11::int as retryDelay, - $12::bool as retryBackoff, - $13::text as keepUntilValue, - $14::text as deadletter - ) j1 - ) j2 - ) j3 + WHEN $9::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($9 * floor((date_part('epoch', now()) + $10) / $9)) + ELSE NULL + END as singletonOn, + $11::int as retryDelay, + $12::bool as retryBackoff, + $13::text as keepUntilValue, + $14::text as deadletter + ) j LEFT JOIN ${schema}.queue q ON j.name = q.name ON CONFLICT DO NOTHING RETURNING id ` @@ -542,28 +603,34 @@ function insertJobs (schema) { data, priority, startAfter, + singletonKey, expireIn, retryLimit, retryDelay, retryBackoff, - singletonKey, + deadletter, keepUntil, - deadletter + policy ) SELECT COALESCE(id, gen_random_uuid()) as id, - name, + j.name, data, - COALESCE(priority, 0) as priority, - COALESCE("startAfter", now()) as startAfter, - COALESCE("expireInSeconds", 15 * 60) * interval '1s' as expireIn, - COALESCE("retryLimit", 0) as retryLimit, - COALESCE("retryDelay", 0) as retryDelay, - COALESCE("retryBackoff", false) as retryBackoff, + COALESCE(priority, 0), + COALESCE("startAfter", now()), "singletonKey", - COALESCE("keepUntil", now() + interval '14 days') as keepUntil, - "deadLetter" - FROM json_to_recordset($1) as x( + COALESCE("expireInSeconds", q.expire_seconds, 15 * 60) * interval '1s', + COALESCE("retryLimit", q.retry_limit, 2), + COALESCE("retryDelay", q.retry_delay, 0), + COALESCE("retryBackoff", q.retry_backoff, false), + COALESCE("deadLetter", q.dead_letter), + CASE + WHEN "keepUntil" IS NOT NULL THEN "keepUntil" + WHEN q.retention_minutes IS NOT NULL THEN now() + q.retention_minutes * interval '1 minute' + ELSE now() + interval '14 days') + END, + q.policy + FROM json_to_recordset($1) as j ( id uuid, name text, priority integer, @@ -577,6 +644,7 @@ function insertJobs (schema) { "keepUntil" timestamp with time zone, "deadLetter" text ) + LEFT JOIN ${schema}.queue q ON j.name = q.name ON CONFLICT DO NOTHING ` } diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js index f7d68287..1517d970 100644 --- a/test/deleteQueueTest.js +++ b/test/deleteQueueTest.js @@ -33,30 +33,6 @@ describe('deleteQueue', function () { assert.strictEqual(0, q2Count3) }) - it('should clear all queues', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue1 = 'delete-named-queue-11' - const queue2 = 'delete-named-queue-22' - - await boss.send(queue1) - await boss.send(queue2) - - const q1Count1 = await boss.getQueueSize(queue1) - const q2Count1 = await boss.getQueueSize(queue2) - - assert.strictEqual(1, q1Count1) - assert.strictEqual(1, q2Count1) - - await boss.deleteAllQueues() - - const q1Count2 = await boss.getQueueSize(queue1) - const q2Count2 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q1Count2) - assert.strictEqual(0, q2Count2) - }) - it('clearStorage() should empty both job storage tables', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) const queue = this.test.bossConfig.schema diff --git a/test/failureTest.js b/test/failureTest.js index 379fad79..5d099ffa 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -183,4 +183,23 @@ describe('failure', function () { assert.strictEqual(job.data.key, queue) }) + + it('should fail active jobs in a worker during shutdown', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const jobId = await boss.send(queue, null, { retryLimit: 1, expireInSeconds: 60 }) + + await boss.work(queue, async () => await delay(10000)) + + await delay(1000) + + await boss.stop({ wait: true, timeout: 2000 }) + + await boss.start() + + const job = await boss.fetch(queue) + + assert.strictEqual(job?.id, jobId) + }) }) diff --git a/test/hooks.js b/test/hooks.js index eae6392e..0a67410a 100644 --- a/test/hooks.js +++ b/test/hooks.js @@ -26,10 +26,7 @@ async function afterEach () { const { boss } = this.currentTest if (boss) { - await new Promise((resolve) => { - boss.on('stopped', resolve) - helper.stop(boss) - }) + await boss.stop({ wait: true, timeout: 2000 }) } await helper.dropSchema(config.schema) diff --git a/test/singletonTest.js b/test/singletonTest.js index 10e17271..2caa66a5 100644 --- a/test/singletonTest.js +++ b/test/singletonTest.js @@ -4,7 +4,7 @@ const helper = require('./testHelper') describe('singleton', function () { it('should not allow more than 1 pending job at a time with the same key', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, debug: true }) const queue = 'singleton-1-pending' const singletonKey = 'a' @@ -84,28 +84,6 @@ describe('singleton', function () { assert.strictEqual(jobId2, null) }) - it('sendSingleton() works', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = this.test.bossConfig.schema - - const jobId = await boss.sendSingleton(queue) - - assert(jobId) - - const jobId2 = await boss.sendSingleton(queue) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.sendSingleton(queue) - - assert(jobId3) - }) - it('useSingletonQueue allows a second singleton job if first has enetered active state', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) diff --git a/test/speedTest.js b/test/speedTest.js index f66bc933..48b781eb 100644 --- a/test/speedTest.js +++ b/test/speedTest.js @@ -3,7 +3,7 @@ const pMap = require('p-map') describe('speed', function () { const expectedSeconds = 2 - const jobCount = 10000 + const jobCount = 10_000 const queue = 'speedTest' const jobs = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } })) diff --git a/test/testHelper.js b/test/testHelper.js index 1306e57c..b4887e9b 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -127,6 +127,6 @@ async function start (options) { } } -async function stop (boss, timeout = 4000) { +async function stop (boss, timeout = 1000) { await boss.stop({ timeout }) } diff --git a/types.d.ts b/types.d.ts index a10ba458..d3cb49a0 100644 --- a/types.d.ts +++ b/types.d.ts @@ -295,8 +295,6 @@ declare class PgBoss extends EventEmitter { sendOnce(name: string, data: object, options: PgBoss.SendOptions, key: string): Promise; - sendSingleton(name: string, data: object, options: PgBoss.SendOptions): Promise; - sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number): Promise; sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number, key: string): Promise; @@ -351,7 +349,6 @@ declare class PgBoss extends EventEmitter { getJobById(id: string, options?: PgBoss.ConnectionOptions): Promise; deleteQueue(name: string): Promise; - deleteAllQueues(): Promise; clearStorage(): Promise; archive(): Promise; From 273b3228c04441349ccfc0b64bc025d4354bc432 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 3 Sep 2023 21:17:58 -0500 Subject: [PATCH 11/36] added queue ops and tests --- docs/readme.md | 16 +++--- src/attorney.js | 55 ++++++++++++--------- src/index.js | 6 +-- src/manager.js | 98 +++++++++++++++++++++++++++++-------- src/plans.js | 80 +++++++++++++++++------------- test/backgroundErrorTest.js | 9 ++-- test/completeTest.js | 2 +- test/maintenanceTest.js | 6 +-- test/migrationTest.js | 12 +++-- test/monitoringTest.js | 2 +- test/multiMasterTest.js | 6 +-- test/opsTest.js | 27 +++++----- test/queueTest.js | 98 +++++++++++++++++++++++++++++++++++++ test/scheduleTest.js | 22 ++++----- test/speedTest.js | 2 +- test/testHelper.js | 4 +- test/workTest.js | 42 +++++----------- types.d.ts | 4 +- 18 files changed, 322 insertions(+), 169 deletions(-) create mode 100644 test/queueTest.js diff --git a/docs/readme.md b/docs/readme.md index bb36f600..12350f52 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -355,13 +355,17 @@ Queue options contain the following constructor-only settings. Maintenance operations include checking active jobs for expiration, archiving completed jobs from the primary job table, and deleting archived jobs from the archive table. -* **noSupervisor**, bool, default false +* **supervise**, bool, default true - If this is set to true, maintenance and monitoring operations will not be started during a `start()` after the schema is created. This is an advanced use case, as bypassing maintenance operations is not something you would want to do under normal circumstances. + If this is set to false, maintenance and monitoring operations will be disabled on this instance. This is an advanced use case, as bypassing maintenance operations is not something you would want to do under normal circumstances. -* **noScheduling**, bool, default false +* **schedule**, bool, default true - If this is set to true, this instance will not monitor scheduled jobs during `start()`. However, this instance can still use the scheduling api. This is an advanced use case you may want to do for testing or if the clock of the server is skewed and you would like to disable the skew warnings. + If this is set to false, this instance will not monitor or created scheduled jobs during. This is an advanced use case you may want to do for testing or if the clock of the server is skewed and you would like to disable the skew warnings. + +* **migrate**, bool, default true + + If this is set to false, this instance will skip attempts to run schema migratations during `start()`. If schema migrations exist, `start()` will throw and error and block usage. This is an advanced use case when the configured user account does not have schema mutation privileges. **Archive options** @@ -871,7 +875,7 @@ Remove the subscription of queue `name` to `event`. ## Scheduling -Jobs may be sent automatically based on a cron expression. As with other cron-based systems, at least one instance needs to be running for scheduling to work. In order to reduce the amount of evaluations, schedules are checked every 30 seconds, which means the 6-placeholder format should be discouraged in favor of the minute-level precision 5-placeholder format. +Jobs may be created automatically based on a cron expression. As with other cron-based systems, at least one instance needs to be running for scheduling to work. In order to reduce the amount of evaluations, schedules are checked every 30 seconds, which means the 6-placeholder format should be discouraged in favor of the minute-level precision 5-placeholder format. For example, use this format, which implies "any second during 3:30 am every day" @@ -891,7 +895,7 @@ If needed, the default clock monitoring interval can be adjusted using `clockMon ```js { - noScheduling: true + schedule: false } ``` diff --git a/src/attorney.js b/src/attorney.js index 01b34bad..21bed07e 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -4,11 +4,12 @@ const { DEFAULT_SCHEMA } = require('./plans') module.exports = { getConfig, checkSendArgs, - checkInsertArgs, + checkQueueArgs, checkWorkArgs, checkFetchArgs, warnClockSkew, - queueNameHasPatternMatch + queueNameHasPatternMatch, + assertPostgresObjectName } const WARNINGS = { @@ -26,6 +27,18 @@ const WARNINGS = { } } +function checkQueueArgs (name, options = {}) { + assertPostgresObjectName(name) + + assert(!('deadLetter' in options) || (typeof options.deadLetter === 'string'), 'deadLetter must be a string') + + applyRetryConfig(options) + applyExpirationConfig(options) + applyRetentionConfig(options) + + return options +} + function checkSendArgs (args, defaults) { let name, data, options @@ -58,10 +71,11 @@ function checkSendArgs (args, defaults) { assert(!('priority' in options) || (Number.isInteger(options.priority)), 'priority must be an integer') options.priority = options.priority || 0 + assert(!('deadLetter' in options) || (typeof options.deadLetter === 'string'), 'deadLetter must be a string') + applyRetryConfig(options, defaults) applyExpirationConfig(options, defaults) applyRetentionConfig(options, defaults) - // applySingletonKeyConfig(options) const { startAfter, singletonSeconds, singletonMinutes, singletonHours } = options @@ -86,22 +100,6 @@ function checkSendArgs (args, defaults) { return { name, data, options } } -function checkInsertArgs (jobs) { - assert(Array.isArray(jobs), `jobs argument should be an array. Received '${typeof jobs}'`) - return jobs.map(job => { - job = { ...job } - // applySingletonKeyConfig(job) - return job - }) -} - -// function applySingletonKeyConfig (options) { -// if (options.singletonKey && options.useSingletonQueue && options.singletonKey !== SINGLETON_QUEUE_KEY) { -// options.singletonKey = SINGLETON_QUEUE_KEY + options.singletonKey -// } -// delete options.useSingletonQueue -// } - function checkWorkArgs (name, args, defaults) { let options, callback @@ -162,7 +160,7 @@ function getConfig (value) { ? { connectionString: value } : { ...value } - applyDatabaseConfig(config) + applySchemaConfig(config) applyMaintenanceConfig(config) applyArchiveConfig(config) applyArchiveFailedConfig(config) @@ -177,16 +175,21 @@ function getConfig (value) { return config } -function applyDatabaseConfig (config) { +function applySchemaConfig (config) { if (config.schema) { - assert(typeof config.schema === 'string', 'configuration assert: schema must be a string') - assert(config.schema.length <= 50, 'configuration assert: schema name cannot exceed 50 characters') - assert(!/\W/.test(config.schema), `configuration assert: ${config.schema} cannot be used as a schema. Only alphanumeric characters and underscores are allowed`) + assertPostgresObjectName(config.schema) } config.schema = config.schema || DEFAULT_SCHEMA } +function assertPostgresObjectName (name) { + assert(typeof name === 'string', 'Name must be a string') + assert(name.length <= 50, 'Name cannot exceed 50 characters') + assert(!/\W/.test(name), 'Name can only contain alphanumeric characters and underscores are allowed') + assert(!/^d/.test(name), 'Name cannot start with a number') +} + function applyArchiveConfig (config) { const ARCHIVE_DEFAULT = 60 * 60 * 12 @@ -322,6 +325,10 @@ function applyMaintenanceConfig (config) { : ('maintenanceIntervalSeconds' in config) ? config.maintenanceIntervalSeconds : 120 + + config.schedule = ('schedule' in config) ? config.schedule : true + config.maintenance = ('maintenance' in config) ? config.maintenance : true + config.migrate = ('migrate' in config) ? config.migrate : true } function applyDeleteConfig (config) { diff --git a/src/index.js b/src/index.js index c7bbae1c..26da290b 100644 --- a/src/index.js +++ b/src/index.js @@ -100,17 +100,17 @@ class PgBoss extends EventEmitter { await this.db.open() } - if (!this.config.noContractor) { + if (this.config.migrate) { await this.contractor.start() } this.manager.start() - if (!this.config.noSupervisor) { + if (this.config.supervise) { await this.boss.supervise() } - if (!this.config.noScheduling) { + if (this.config.schedule) { await this.timekeeper.start() } diff --git a/src/manager.js b/src/manager.js index daa17bfd..4c998787 100644 --- a/src/manager.js +++ b/src/manager.js @@ -4,20 +4,19 @@ const delay = require('delay') const uuid = require('uuid') const debounce = require('lodash.debounce') const { serializeError: stringify } = require('serialize-error') +const pMap = require('p-map') + const Attorney = require('./attorney') const Worker = require('./worker') +const plans = require('./plans') const Db = require('./db') -const pMap = require('p-map') const { QUEUES: BOSS_QUEUES } = require('./boss') const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') +const { QUEUE_POLICY } = plans const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) -const plans = require('./plans') - -const { QUEUE_POLICY } = plans - const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } @@ -86,7 +85,10 @@ class Manager extends EventEmitter { this.sendOnce, this.sendAfter, this.createQueue, + this.updateQueue, + this.getQueueProperties, this.deleteQueue, + this.purgeQueue, this.clearStorage, this.getQueueSize, this.getJobById @@ -430,11 +432,11 @@ class Manager extends EventEmitter { } async insert (jobs, options = {}) { - const { db: wrapper } = options - const db = wrapper || this.db - const checkedJobs = Attorney.checkInsertArgs(jobs) - const data = JSON.stringify(checkedJobs) - return await db.executeSql(this.insertJobsCommand, [data]) + assert(Array.isArray(jobs), 'jobs argument should be an array') + + const db = options.db || this.db + + return await db.executeSql(this.insertJobsCommand, [JSON.stringify(jobs)]) } getDebounceStartAfter (singletonSeconds, clockOffset) { @@ -550,20 +552,30 @@ class Manager extends EventEmitter { return this.mapCompletionResponse(ids, result) } - async createQueue (name, options) { + async createQueue (name, options = {}) { assert(name, 'Missing queue name argument') + const { policy = QUEUE_POLICY.standard } = options + + assert(policy in QUEUE_POLICY, `${policy} is not a valid queue policy`) + const { - policy, retryLimit, retryDelay, retryBackoff, expireInSeconds, retentionMinutes, deadLetter - } = options + } = Attorney.checkQueueArgs(name, options) + + const paritionSql = plans.createQueueTablePartition(this.config.schema, name) + + await this.db.executeSql(paritionSql) + + const sql = plans.createQueue(this.config.schema, name) const params = [ + name, policy, retryLimit, retryDelay, @@ -573,20 +585,66 @@ class Manager extends EventEmitter { deadLetter ] - assert(policy in QUEUE_POLICY, `${policy} is not a valid queue policy`) + await this.db.executeSql(sql, params) + } - // todo + async updateQueue (name, options = {}) { + assert(name, 'Missing queue name argument') - const sql = plans.createQueue(this.config.schema)(name) + const { + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } = Attorney.checkQueueArgs(name, options) + + const sql = plans.updateQueue(this.config.schema) + + const params = [ + name, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + ] await this.db.executeSql(sql, params) } - async deleteQueue (queue, options) { + async getQueueProperties (name) { + assert(name, 'Missing queue name argument') + + const sql = plans.getQueueByName(this.config.schema) + const result = await this.db.executeSql(sql, [name]) + + return result.rows.length ? result.rows[0] : null + } + + async deleteQueue (name) { + assert(name, 'Missing queue name argument') + + const queueSql = plans.getQueueByName(this.config.schema) + const result = await this.db.executeSql(queueSql, [name]) + + if (result?.rows?.length) { + Attorney.assertPostgresObjectName(name) + const sql = plans.dropQueueTablePartition(this.config.schema, name) + await this.db.executeSql(sql) + } + + const sql = plans.deleteQueueRecords(this.config.schema) + const result2 = await this.db.executeSql(sql, [name]) + return result2?.rowCount || null + } + + async purgeQueue (queue) { assert(queue, 'Missing queue name argument') - const sql = plans.deleteQueue(this.config.schema, options) - const result = await this.db.executeSql(sql, [queue]) - return result ? result.rowCount : null + const sql = plans.purgeQueue(this.config.schema) + await this.db.executeSql(sql, [queue]) } async clearStorage () { diff --git a/src/plans.js b/src/plans.js index 0a98a2a4..85cedcde 100644 --- a/src/plans.js +++ b/src/plans.js @@ -14,6 +14,7 @@ const MIGRATE_RACE_MESSAGE = 'division by zero' const CREATE_RACE_MESSAGE = 'already exists' const QUEUE_POLICY = { + standard: 'standard', short: 'short', priority: 'priority', singleton: 'singleton', @@ -45,9 +46,14 @@ module.exports = { purge, countStates, createQueue, - deleteQueue, - clearStorage, + updateQueue, + createQueueTablePartition, + dropQueueTablePartition, + deleteQueueRecords, + getQueueByName, getQueueSize, + purgeQueue, + clearStorage, getMaintenanceTime, setMaintenanceTime, getCronTime, @@ -56,6 +62,7 @@ module.exports = { assertMigration, getArchivedJobById, getJobById, + QUEUE_POLICY, states: { ...states }, MIGRATE_RACE_MESSAGE, CREATE_RACE_MESSAGE, @@ -246,46 +253,49 @@ function getCronTime (schema) { } function createQueue (schema) { - return (name) => { - return ` - WITH partition AS ( - CREATE TABLE ${schema}.job_${name} PARTITION OF ${schema}.job - FOR VALUES FROM ('${name}') TO ('${name}') - ) - INSERT INTO ${schema}.queue ( - name, - policy, - retry_limit, - retry_delay, - retry_backoff, - expire_seconds, - retention_minutes, - dead_letter - ) VALUES ( - '${name}', - $1, - $2, - $3, - $4, - $5, - $6, - $7, - ) - - ` - } + return ` + INSERT INTO ${schema}.queue (name, policy, retry_limit, retry_delay, retry_backoff, expire_seconds, retention_minutes, dead_letter) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ` } -function deleteQueue (schema, name) { +function updateQueue (schema) { return ` - WITH deleted_queue AS ( - DELETE FROM ${schema}.queue WHERE name = '${name}' + UPDATE ${schema}.queue SET + retry_limit = COALESCE($2, retry_limit), + retry_delay = COALESCE($3, retry_delay), + retry_backoff = COALESCE($4, retry_backoff), + expire_seconds = COALESCE($5, expire_seconds), + retention_minutes = COALESCE($6, retention_minutes), + dead_letter = COALESCE($7, dead_letter) + WHERE name = $1 + ` +} + +function createQueueTablePartition (schema, name) { + return `CREATE TABLE ${schema}.job_${name} PARTITION OF ${schema}.job FOR VALUES FROM ('${name}') TO ('${name}__pgboss__')` +} + +function dropQueueTablePartition (schema, name) { + return `DROP TABLE IF EXISTS ${schema}.job_${name}` +} + +function getQueueByName (schema) { + return `SELECT * FROM ${schema}.queue WHERE name = $1` +} + +function deleteQueueRecords (schema) { + return `WITH dq AS ( + DELETE FROM ${schema}.queue WHERE name = $1 ) - DELETE FROM ${schema}.job WHERE name = '${name}'; - DROP TABLE IF EXISTS job_${name} + DELETE FROM ${schema}.job WHERE name = $1 ` } +function purgeQueue (schema) { + return `DELETE from ${schema}.job WHERE name = $1 and state < '${states.active}'` +} + function clearStorage (schema) { return `TRUNCATE ${schema}.job, ${schema}.archive` } diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 47f7b01d..611cdb5a 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -7,8 +7,7 @@ describe('background processing error handling', function () { const defaults = { monitorStateIntervalMinutes: 1, maintenanceIntervalSeconds: 1, - noScheduling: true, - noSupervisor: false, + supervise: true, __test__throw_maint: true } @@ -32,8 +31,7 @@ describe('background processing error handling', function () { it('state monitoring error handling works', async function () { const defaults = { monitorStateIntervalSeconds: 2, - noSupervisor: false, - noScheduling: true, + supervise: true, __test__throw_monitor: true } @@ -57,8 +55,7 @@ describe('background processing error handling', function () { it('clock monitoring error handling works', async function () { const config = { ...this.test.bossConfig, - noSupervisor: false, - noScheduling: false, + schedule: true, clockMonitorIntervalSeconds: 1, __test__throw_clock_monitoring: 'pg-boss mock error: clock monitoring' } diff --git a/test/completeTest.js b/test/completeTest.js index cb476d45..3dbd36bc 100644 --- a/test/completeTest.js +++ b/test/completeTest.js @@ -115,7 +115,7 @@ describe('complete', function () { }) it.skip('should warn with an old onComplete option only once', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, noSupervisor: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 56d18fc9..05f68733 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -4,12 +4,10 @@ const delay = require('delay') const PgBoss = require('../') describe('maintenance', async function () { - const defaults = { noSupervisor: false } - it('should send maintenance job if missing during monitoring', async function () { const config = { ...this.test.bossConfig, - ...defaults, + supervise: true, maintenanceIntervalSeconds: 1 } @@ -37,7 +35,7 @@ describe('maintenance', async function () { it('meta monitoring error handling works', async function () { const config = { ...this.test.bossConfig, - ...defaults, + supervise: true, maintenanceIntervalSeconds: 1, __test__throw_meta_monitor: 'meta monitoring error' } diff --git a/test/migrationTest.js b/test/migrationTest.js index 8147af30..e315c4d2 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -32,7 +32,7 @@ describe('migration', function () { await contractor.rollback(currentSchemaVersion) - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) @@ -46,7 +46,7 @@ describe('migration', function () { it('should migrate through 2 versions back and forth', async function () { const queue = 'migrate-back-2-and-forward' - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) @@ -98,7 +98,7 @@ describe('migration', function () { const twoVersionsAgo = await contractor.version() assert.strictEqual(twoVersionsAgo, currentSchemaVersion - 2) - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) await boss.start() @@ -118,7 +118,7 @@ describe('migration', function () { }) it('should roll back an error during a migration', async function () { - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } config.migrations = migrationStore.getAll(config.schema) @@ -156,4 +156,8 @@ describe('migration', function () { await boss2.stop({ graceful: false }) }) + + it.skip('should not migrate if migrations option is false', async function () { + assert(false) + }) }) diff --git a/test/monitoringTest.js b/test/monitoringTest.js index 5eb95667..6d70c860 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -4,7 +4,7 @@ const helper = require('./testHelper') describe('monitoring', function () { it('should emit state counts', async function () { const defaults = { - noSupervisor: false, + supervise: true, monitorStateIntervalSeconds: 1 } diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index de6d5dd5..13442011 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -10,7 +10,7 @@ const pMap = require('p-map') describe('multi-master', function () { it('should only allow 1 master to start at a time', async function () { const replicaCount = 20 - const config = { ...this.test.bossConfig, noSupervisor: true, max: 2 } + const config = { ...this.test.bossConfig, max: 2 } const instances = [] for (let i = 0; i < replicaCount; i++) { @@ -28,7 +28,7 @@ describe('multi-master', function () { it('should only allow 1 master to migrate to latest at a time', async function () { const replicaCount = 5 - const config = { ...this.test.bossConfig, noSupervisor: true, max: 2 } + const config = { ...this.test.bossConfig, supervise: true, max: 2 } const db = await helper.getDb() const contractor = new Contractor(db, config) @@ -81,7 +81,7 @@ describe('multi-master', function () { await boss.stop({ graceful: false }) - boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) + boss = new PgBoss({ ...this.test.bossConfig, supervise: true }) await boss.start() diff --git a/test/opsTest.js b/test/opsTest.js index 0e1ff95b..31dbe4c4 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -4,28 +4,23 @@ const { v4: uuid } = require('uuid') const delay = require('delay') describe('ops', function () { - const defaults = { - noSupervisor: true, - noScheduling: true - } - it('should expire manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.expire() }) it('should archive manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.archive() }) it('should purge the archive manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.purge() }) it('stop should re-emit stoppped if already stopped', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const stopPromise1 = new Promise(resolve => boss.once('stopped', resolve)) @@ -41,7 +36,7 @@ describe('ops', function () { }) it('should emit error in worker', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_worker: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, __test__throw_worker: true }) const queue = this.test.bossConfig.schema await boss.send(queue) @@ -51,7 +46,7 @@ describe('ops', function () { }) it('should return null from getJobById if not found', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const jobId = await boss.getJobById(uuid()) @@ -59,19 +54,19 @@ describe('ops', function () { }) it('should force stop', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.stop({ graceful: false }) }) it('should destroy the connection pool', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.stop({ destroy: true, graceful: false }) assert(boss.db.pool.totalCount === 0) }) it('should destroy the connection pool gracefully', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.stop({ destroy: true }) await new Promise((resolve) => { boss.on('stopped', () => resolve()) @@ -81,7 +76,7 @@ describe('ops', function () { }) it('should emit error during graceful stop if worker is busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_stop: true }) + const boss = await helper.start({ ...this.test.bossConfig, __test__throw_stop: true }) const queue = this.test.bossConfig.schema await boss.send(queue) @@ -95,7 +90,7 @@ describe('ops', function () { }) it('should throw error during graceful stop if no workers are busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_stop: true }) + const boss = await helper.start({ ...this.test.bossConfig, __test__throw_stop: true }) try { await boss.stop({ timeout: 1 }) diff --git a/test/queueTest.js b/test/queueTest.js new file mode 100644 index 00000000..c85b4bbb --- /dev/null +++ b/test/queueTest.js @@ -0,0 +1,98 @@ +const assert = require('assert') +const helper = require('./testHelper') + +describe('queues', function () { + it('should create a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + }) + + it('should reject a queue with invalid characters', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = `*${this.test.bossConfig.schema}` + + try { + await boss.createQueue(queue) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should reject a queue that starts with a number', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = `4${this.test.bossConfig.schema}` + + try { + await boss.createQueue(queue) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should reject a queue with invalid policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + try { + await boss.createQueue(queue, { policy: 'something' }) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should create a queue with standard policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'standard' }) + }) + + it('should create a queue with stately policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'stately' }) + }) + + it('should create a queue with singleton policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'singleton' }) + }) + + it('should create a queue with short policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'short' }) + }) + + it('should create a queue with priority policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'priority' }) + }) + + it('should delete a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + await boss.deleteQueue(queue) + }) + + it('should purge a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + await boss.purgeQueue(queue) + }) +}) diff --git a/test/scheduleTest.js b/test/scheduleTest.js index c9b6ba7d..faac371e 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -12,7 +12,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, - noScheduling: false + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -33,7 +33,7 @@ describe('schedule', function () { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, cronWorkerIntervalSeconds: 1, - noScheduling: false + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -54,7 +54,7 @@ describe('schedule', function () { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, cronWorkerIntervalSeconds: 1, - noScheduling: false + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -79,8 +79,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, - noScheduling: true, - noSupervisor: true + schedule: false } let boss = await helper.start(config) @@ -91,7 +90,7 @@ describe('schedule', function () { await boss.stop() - boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, noScheduling: false }) + boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, schedule: true }) await delay(ASSERT_DELAY) @@ -105,7 +104,6 @@ describe('schedule', function () { it('should remove previously scheduled job', async function () { const config = { ...this.test.bossConfig, - noSupervisor: true, cronWorkerIntervalSeconds: 1 } const boss = this.test.boss = await helper.start(config) @@ -134,7 +132,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, - noScheduling: false + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -170,7 +168,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, - noScheduling: false + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -207,7 +205,7 @@ describe('schedule', function () { it('should force a clock skew warning', async function () { const config = { ...this.test.bossConfig, - noScheduling: false, + schedule: true, __test__force_clock_skew_warning: true } @@ -235,7 +233,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, - noScheduling: false, + schedule: true, __test__force_clock_monitoring_error: 'pg-boss mock error: clock skew monitoring' } @@ -259,7 +257,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, - noScheduling: false, + schedule: true, __test__force_cron_monitoring_error: 'pg-boss mock error: cron monitoring' } diff --git a/test/speedTest.js b/test/speedTest.js index 48b781eb..11e65907 100644 --- a/test/speedTest.js +++ b/test/speedTest.js @@ -13,7 +13,7 @@ describe('speed', function () { let boss beforeEach(async function () { - const defaults = { noSupervisor: true, min: 10, max: 10 } + const defaults = { min: 10, max: 10 } boss = await helper.start({ ...this.currentTest.bossConfig, ...defaults }) await pMap(jobs, job => boss.send(job.name, job.data)) }) diff --git a/test/testHelper.js b/test/testHelper.js index b4887e9b..f240a108 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -36,8 +36,8 @@ function getConfig (options = {}) { config.schema = config.schema || 'pgboss' - config.noSupervisor = true - config.noScheduling = true + config.supervise = false + config.schedule = false config.retryLimit = 0 const result = { ...config } diff --git a/test/workTest.js b/test/workTest.js index de4211a8..623923da 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -1,7 +1,6 @@ const delay = require('delay') const assert = require('assert') const helper = require('./testHelper') -const PgBoss = require('../') describe('work', function () { it('should fail with no arguments', async function () { @@ -128,8 +127,8 @@ describe('work', function () { it('should handle a batch of jobs via teamSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-teamSize' const teamSize = 4 let processCount = 0 @@ -152,8 +151,8 @@ describe('work', function () { it('should apply teamConcurrency option', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-teamConcurrency' const teamSize = 4 const teamConcurrency = 4 @@ -179,8 +178,8 @@ describe('work', function () { it('should handle a batch of jobs via batchSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-batchSize' const batchSize = 4 for (let i = 0; i < batchSize; i++) { @@ -217,7 +216,7 @@ describe('work', function () { it('returning promise applies backpressure', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = 'backpressure' + const queue = this.test.bossConfig.schema const jobCount = 4 let processCount = 0 @@ -274,6 +273,7 @@ describe('work', function () { it('does not fetch more than teamSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema + const teamSize = 4 const teamConcurrency = 2 const newJobCheckInterval = 200 @@ -304,8 +304,8 @@ describe('work', function () { it('completion should pass string wrapped in value prop', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const queue = 'processCompletionString' const result = 'success' const jobId = await boss.send(queue) @@ -322,8 +322,7 @@ describe('work', function () { it('completion via Promise resolve() should pass object payload', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - - const queue = 'processCompletionObject' + const queue = this.test.bossConfig.schema const something = 'clever' const jobId = await boss.send(queue) @@ -339,7 +338,7 @@ describe('work', function () { it('should allow multiple workers to the same queue per instance', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = 'multiple-workers' + const queue = this.test.bossConfig.schema await boss.work(queue, () => {}) await boss.work(queue, () => {}) @@ -347,8 +346,7 @@ describe('work', function () { it('should honor the includeMetadata option', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'process-includeMetadata' + const queue = this.test.bossConfig.schema await boss.send(queue) @@ -360,15 +358,8 @@ describe('work', function () { }) }) - it('should fail job at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) - - const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) - - await boss.start() - - await maintenanceTick - + it('should fail job at expiration in worker', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, supervise: false }) const queue = this.test.bossConfig.schema const jobId = await boss.send(queue, null, { expireInSeconds: 1 }) @@ -383,15 +374,8 @@ describe('work', function () { assert(job.output.message.includes('handler execution exceeded')) }) - it('should fail a batch of jobs at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss({ ...this.test.bossConfig, noSupervisor: false }) - - const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) - - await boss.start() - - await maintenanceTick - + it('should fail a batch of jobs at expiration in worker', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, supervise: false }) const queue = this.test.bossConfig.schema const jobId1 = await boss.send(queue, null, { expireInSeconds: 1 }) diff --git a/types.d.ts b/types.d.ts index d3cb49a0..1353c5e8 100644 --- a/types.d.ts +++ b/types.d.ts @@ -26,14 +26,14 @@ declare namespace PgBoss { } interface SchedulingOptions { - noScheduling?: boolean; + schedule?: boolean; clockMonitorIntervalSeconds?: number; clockMonitorIntervalMinutes?: number; } interface MaintenanceOptions { - noSupervisor?: boolean; + supervise?: boolean; deleteAfterSeconds?: number; deleteAfterMinutes?: number; From 6494ebf5dd80304ff282bfa43b21f51ff5592561 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 4 Sep 2023 11:18:28 -0500 Subject: [PATCH 12/36] queue tests and updates --- docs/readme.md | 18 -------- src/boss.js | 10 ++--- src/manager.js | 11 ----- src/migrationStore.js | 11 ++--- src/plans.js | 30 ++++++------- test/deleteQueueTest.js | 10 ++--- test/maintenanceTest.js | 13 +++--- test/migrationTest.js | 2 +- test/queueTest.js | 89 ++++++++++++++++++++++++++++++++++++++ test/singletonTest.js | 94 ++--------------------------------------- types.d.ts | 4 +- 11 files changed, 134 insertions(+), 158 deletions(-) diff --git a/docs/readme.md b/docs/readme.md index 12350f52..30c8ffa6 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -26,7 +26,6 @@ - [`send(name, data, options)`](#sendname-data-options) - [`send(request)`](#sendrequest) - [`sendAfter(name, data, options, seconds | ISO date string | Date)`](#sendaftername-data-options-seconds--iso-date-string--date) - - [`sendOnce(name, data, options, key)`](#sendoncename-data-options-key) - [`sendThrottled(name, data, options, seconds [, key])`](#sendthrottledname-data-options-seconds--key) - [`sendDebounced(name, data, options, seconds [, key])`](#senddebouncedname-data-options-seconds--key) - [`insert([jobs])`](#insertjobs) @@ -559,17 +558,6 @@ Available in constructor as a default, or overridden in send. boss.send('my-job', {}, {singletonKey: '123'}) // resolves a null jobId until first job completed ``` - This can be used in conjunction with throttling explained below. - - * **useSingletonQueue** boolean - - When used in conjunction with singletonKey, allows a max of 1 job to be queued. - - ```js - boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a jobId - boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a null jobId until first job becomes active - ``` - **Throttled jobs** * **singletonSeconds**, int @@ -639,12 +627,6 @@ Send a job that should start after a number of seconds from now, or after a spec This is a convenience version of `send()` with the `startAfter` option assigned. -### `sendOnce(name, data, options, key)` - -Send a job with a unique key to only allow 1 job to be in created, retry, or active state at a time. - -This is a convenience version of `send()` with the `singletonKey` option assigned. - ### `sendThrottled(name, data, options, seconds [, key])` diff --git a/src/boss.js b/src/boss.js index 30f5d9a1..7005787f 100644 --- a/src/boss.js +++ b/src/boss.js @@ -50,7 +50,7 @@ class Boss extends EventEmitter { async supervise () { this.metaMonitor() - await this.manager.deleteQueue(queues.MAINTENANCE) + await this.manager.purgeQueue(queues.MAINTENANCE) await this.maintenanceAsync() @@ -61,7 +61,7 @@ class Boss extends EventEmitter { await this.manager.work(queues.MAINTENANCE, maintenanceWorkOptions, (job) => this.onMaintenance(job)) if (this.monitorStates) { - await this.manager.deleteQueue(queues.MONITOR_STATES) + await this.manager.purgeQueue(queues.MONITOR_STATES) await this.monitorStatesAsync() @@ -83,7 +83,7 @@ class Boss extends EventEmitter { const { secondsAgo } = await this.getMaintenanceTime() if (secondsAgo > this.maintenanceIntervalSeconds * 2) { - await this.manager.deleteQueue(queues.MAINTENANCE) + await this.manager.purgeQueue(queues.MAINTENANCE) await this.maintenanceAsync() } } catch (err) { @@ -138,12 +138,12 @@ class Boss extends EventEmitter { const result = await this.maintain() - this.emit(events.maintenance, result) - if (!this.stopped) { await this.manager.complete(job.id) // pre-complete to bypass throttling await this.maintenanceAsync({ startAfter: this.maintenanceIntervalSeconds }) } + + this.emit(events.maintenance, result) } catch (err) { this.emit(events.error, err) } diff --git a/src/manager.js b/src/manager.js index 4c998787..cec3c84a 100644 --- a/src/manager.js +++ b/src/manager.js @@ -82,7 +82,6 @@ class Manager extends EventEmitter { this.send, this.sendDebounced, this.sendThrottled, - this.sendOnce, this.sendAfter, this.createQueue, this.updateQueue, @@ -335,16 +334,6 @@ class Manager extends EventEmitter { return await this.createJob(name, data, options) } - async sendOnce (name, data, options, key) { - options = options ? { ...options } : {} - - options.singletonKey = key || name - - const result = Attorney.checkSendArgs([name, data, options], this.config) - - return await this.createJob(result.name, result.data, result.options) - } - async sendAfter (name, data, options, after) { options = options ? { ...options } : {} options.startAfter = after diff --git a/src/migrationStore.js b/src/migrationStore.js index 6dea55ed..6092f7f0 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -90,11 +90,11 @@ function getAll (schema) { `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < 'active'`, - `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state <= 'retry' AND policy = 'short'`, + `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = 'created' AND policy = 'short'`, `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = 'active' AND policy = 'singleton'`, `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, - `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, - `CREATE UNIQUE INDEX job_debounce ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= 'completed'`, + `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= 'completed' AND singletonOn IS NULL`, + `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, `CREATE TABLE ${schema}.queue ( name text primary key, policy text, @@ -104,7 +104,7 @@ function getAll (schema) { expire_seconds int, retention_minutes int, dead_letter text, - created_on timestamp with time zone not null default now(), + created_on timestamp with time zone not null default now() )` ], uninstall: [ @@ -112,7 +112,8 @@ function getAll (schema) { `DROP INDEX ${schema}.job_policy_stately`, `DROP INDEX ${schema}.job_policy_short`, `DROP INDEX ${schema}.job_policy_singleton`, - `DROP INDEX ${schema}.job_throttle`, + `DROP INDEX ${schema}.job_throttle_on`, + `DROP INDEX ${schema}.job_throttle_key`, `DROP INDEX ${schema}.job_fetch`, `DROP INDEX ${schema}.archive_archivedon_idx`, `DROP INDEX ${schema}.archive_name_idx`, diff --git a/src/plans.js b/src/plans.js index 85cedcde..9b72ab05 100644 --- a/src/plans.js +++ b/src/plans.js @@ -96,11 +96,11 @@ function create (schema, version) { createIndexJobPolicyStately(schema), createIndexJobPolicyShort(schema), createIndexJobPolicySingleton(schema), - createIndexJobThrottle(schema), - createIndexJobDebounce(schema), + createIndexJobThrottleOn(schema), + createIndexJobThrottleKey(schema), createTableArchive(schema), - createIndexArchiveId(schema), + createPrimaryKeyArchive(schema), createColumnArchiveArchivedOn(schema), createIndexArchiveArchivedOn(schema), createIndexArchiveName(schema), @@ -183,8 +183,12 @@ function createPrimaryKeyJob (schema) { return `ALTER TABLE ${schema}.job ADD CONSTRAINT job_pkey PRIMARY KEY (name, id)` } +function createPrimaryKeyArchive (schema) { + return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (name, id)` +} + function createIndexJobPolicyShort (schema) { - return `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state <= '${states.retry}' AND policy = '${QUEUE_POLICY.short}'` + return `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = '${states.created}' AND policy = '${QUEUE_POLICY.short}'` } function createIndexJobPolicySingleton (schema) { @@ -195,12 +199,12 @@ function createIndexJobPolicyStately (schema) { return `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= '${states.active}' AND policy = '${QUEUE_POLICY.stately}'` } -function createIndexJobThrottle (schema) { - return `CREATE UNIQUE INDEX job_throttle ON ${schema}.job (name, singletonOn) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL AND singletonKey IS NULL` +function createIndexJobThrottleOn (schema) { + return `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL` } -function createIndexJobDebounce (schema) { - return `CREATE UNIQUE INDEX job_debounce ON ${schema}.job (name, singletonOn, singletonKey) WHERE state <= '${states.completed}'` +function createIndexJobThrottleKey (schema) { + return `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= '${states.completed}' AND singletonOn IS NULL` } function createIndexJobName (schema) { @@ -219,10 +223,6 @@ function createArchiveBackupTable (schema) { return `CREATE TABLE ${schema}.archive_backup (LIKE ${schema}.job)` } -function createIndexArchiveId (schema) { - return `CREATE INDEX archive_id on ${schema}.archive (id)` -} - function createColumnArchiveArchivedOn (schema) { return `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()` } @@ -637,7 +637,7 @@ function insertJobs (schema) { CASE WHEN "keepUntil" IS NOT NULL THEN "keepUntil" WHEN q.retention_minutes IS NOT NULL THEN now() + q.retention_minutes * interval '1 minute' - ELSE now() + interval '14 days') + ELSE now() + interval '14 days' END, q.policy FROM json_to_recordset($1) as j ( @@ -676,10 +676,10 @@ function archive (schema, completedInterval, failedInterval = completedInterval) RETURNING * ) INSERT INTO ${schema}.archive ( - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, policy, output ) SELECT - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, policy, output FROM archived_rows ` } diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js index 1517d970..e8411511 100644 --- a/test/deleteQueueTest.js +++ b/test/deleteQueueTest.js @@ -2,12 +2,12 @@ const assert = require('assert') const helper = require('./testHelper') const delay = require('delay') -describe('deleteQueue', function () { +describe('purgeQueue', function () { it('should clear a specific queue', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue2 = 'delete-named-queue-2' - const queue1 = 'delete-named-queue-1' + const queue1 = `${this.test.bossConfig.schema}1` + const queue2 = `${this.test.bossConfig.schema}2` await boss.send(queue1) await boss.send(queue2) @@ -18,7 +18,7 @@ describe('deleteQueue', function () { assert.strictEqual(1, q1Count1) assert.strictEqual(1, q2Count1) - await boss.deleteQueue(queue1) + await boss.purgeQueue(queue1) const q1Count2 = await boss.getQueueSize(queue1) const q2Count2 = await boss.getQueueSize(queue2) @@ -26,7 +26,7 @@ describe('deleteQueue', function () { assert.strictEqual(0, q1Count2) assert.strictEqual(1, q2Count2) - await boss.deleteQueue(queue2) + await boss.purgeQueue(queue2) const q2Count3 = await boss.getQueueSize(queue2) diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 05f68733..9eaeaa8f 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -4,7 +4,7 @@ const delay = require('delay') const PgBoss = require('../') describe('maintenance', async function () { - it('should send maintenance job if missing during monitoring', async function () { + it.skip('should send maintenance job if missing during monitoring', async function () { const config = { ...this.test.bossConfig, supervise: true, @@ -20,15 +20,18 @@ describe('maintenance', async function () { await boss.start() - boss.on('maintenance', async () => { - // force timestamp to an older date - await db.executeSql(`UPDATE ${config.schema}.version SET maintained_on = now() - interval '5 minutes'`) + await new Promise(resolve => { + boss.once('maintenance', async () => { + // force timestamp to an older date + await db.executeSql(`UPDATE ${config.schema}.version SET maintained_on = now() - interval '5 minutes'`) + resolve() + }) }) - // wait for monitoring to check timestamp await delay(4000) const count = await countJobs() + assert(count > 1) }) diff --git a/test/migrationTest.js b/test/migrationTest.js index e315c4d2..759c63eb 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -9,7 +9,7 @@ describe('migration', function () { let contractor beforeEach(async function () { - const db = await helper.getDb() + const db = await helper.getDb({ debug: false }) contractor = new Contractor(db, this.currentTest.bossConfig) }) diff --git a/test/queueTest.js b/test/queueTest.js index c85b4bbb..916a222a 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -95,4 +95,93 @@ describe('queues', function () { await boss.createQueue(queue) await boss.purgeQueue(queue) }) + + it.skip('should update queue properties', async function () { + + }) + + it.skip('jobs should inherit properties from queue', async function () { + + }) + + it('short policy only allows 1 job in queue', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'short' }) + + const jobId = await boss.send(queue) + + assert(jobId) + + const jobId2 = await boss.send(queue) + + assert.strictEqual(jobId2, null) + + const job = await boss.fetch(queue) + + assert.strictEqual(job.id, jobId) + + const jobId3 = await boss.send(queue) + + assert(jobId3) + }) + + it('singleton policy only allows 1 active job', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'singleton' }) + + await boss.send(queue) + + await boss.send(queue) + + const job1 = await boss.fetch(queue) + + const job2 = await boss.fetch(queue) + + assert.strictEqual(job2, null) + + await boss.complete(job1.id) + + const job3 = await boss.fetch(queue) + + assert(job3) + }) + + it('stately policy only allows 1 job per state up to active', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'stately' }) + + const jobId1 = await boss.send(queue, null, { retryLimit: 1 }) + + const blockedId = await boss.send(queue) + + assert.strictEqual(blockedId, null) + + let job1 = await boss.fetch(queue) + + await boss.fail(job1.id) + + job1 = await boss.getJobById(jobId1) + + assert.strictEqual(job1.state, 'retry') + + const jobId2 = await boss.send(queue, null, { retryLimit: 1 }) + + assert(jobId2) + + job1 = await boss.fetch(queue) + + job1 = await boss.getJobById(jobId1) + + assert.strictEqual(job1.state, 'active') + + const blockedSecondActive = await boss.fetch(queue) + + assert.strictEqual(blockedSecondActive, null) + }) }) diff --git a/test/singletonTest.js b/test/singletonTest.js index 2caa66a5..f6bafa0a 100644 --- a/test/singletonTest.js +++ b/test/singletonTest.js @@ -1,12 +1,11 @@ const assert = require('assert') -const { v4: uuid } = require('uuid') const helper = require('./testHelper') -describe('singleton', function () { +describe('singleton keys', function () { it('should not allow more than 1 pending job at a time with the same key', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, debug: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const queue = 'singleton-1-pending' const singletonKey = 'a' const jobId = await boss.send(queue, null, { singletonKey }) @@ -20,8 +19,8 @@ describe('singleton', function () { it('should not allow more than 1 complete job with the same key with an interval', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'singleton-1-complete' const singletonKey = 'a' const singletonMinutes = 1 @@ -47,89 +46,4 @@ describe('singleton', function () { assert(jobId2) }) - - it('sendOnce() should work', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'sendOnce' - const key = 'only-once-plz' - - const jobId = await boss.sendOnce(queue, null, null, key) - - assert(jobId) - - const jobId2 = await boss.sendOnce(queue, null, null, key) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.sendOnce(queue, null, null, key) - - assert.strictEqual(jobId3, null) - }) - - it('sendOnce() without a key should also work', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'sendOnceNoKey' - const jobId = await boss.sendOnce(queue) - - assert(jobId) - - const jobId2 = await boss.sendOnce(queue) - - assert.strictEqual(jobId2, null) - }) - - it('useSingletonQueue allows a second singleton job if first has enetered active state', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'singleton-queue-check' - const singletonKey = 'myKey' - - const jobId = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert(jobId) - - const jobId2 = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert(jobId3) - }) - - it('useSingletonQueue works when using insert', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const name = 'singleton-queue-check' - const singletonKey = 'myKey' - - const jobId = uuid() - await boss.insert([{ id: jobId, name, singletonKey, useSingletonQueue: true }]) - - assert(await boss.getJobById(jobId)) - - const jobId2 = uuid() - await boss.insert([{ id: jobId2, name, singletonKey, useSingletonQueue: true }]) - - assert.strictEqual(await boss.getJobById(jobId2), null) - - const job = await boss.fetch(name) - - assert.strictEqual(job.id, jobId) - - const jobId3 = uuid() - await boss.insert([{ id: jobId3, name, singletonKey, useSingletonQueue: true }]) - - assert(await boss.getJobById(jobId3)) - }) }) diff --git a/types.d.ts b/types.d.ts index 1353c5e8..e8e0cbd4 100644 --- a/types.d.ts +++ b/types.d.ts @@ -80,7 +80,6 @@ declare namespace PgBoss { priority?: number; startAfter?: number | string | Date; singletonKey?: string; - useSingletonQueue?: boolean; singletonSeconds?: number; singletonMinutes?: number; singletonHours?: number; @@ -293,8 +292,6 @@ declare class PgBoss extends EventEmitter { sendAfter(name: string, data: object, options: PgBoss.SendOptions, dateString: string): Promise; sendAfter(name: string, data: object, options: PgBoss.SendOptions, seconds: number): Promise; - sendOnce(name: string, data: object, options: PgBoss.SendOptions, key: string): Promise; - sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number): Promise; sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number, key: string): Promise; @@ -349,6 +346,7 @@ declare class PgBoss extends EventEmitter { getJobById(id: string, options?: PgBoss.ConnectionOptions): Promise; deleteQueue(name: string): Promise; + purgeQueue(name: string): Promise; clearStorage(): Promise; archive(): Promise; From 7cbfefba1174410dca89a08b1e3c6260a656dddf Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 4 Sep 2023 16:17:07 -0500 Subject: [PATCH 13/36] maintenance wip --- src/attorney.js | 12 ++- src/boss.js | 212 ++++++++++++++++------------------------ src/db.js | 30 ++++++ src/index.js | 4 + src/manager.js | 3 +- src/migrationStore.js | 4 +- src/plans.js | 54 ++++++---- test/deleteQueueTest.js | 70 ------------- test/maintenanceTest.js | 68 +++++++------ test/multiMasterTest.js | 36 ------- test/opsTest.js | 2 +- test/queueTest.js | 30 ++++++ 12 files changed, 233 insertions(+), 292 deletions(-) delete mode 100644 test/deleteQueueTest.js diff --git a/src/attorney.js b/src/attorney.js index 21bed07e..5e0ccfde 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -12,6 +12,8 @@ module.exports = { assertPostgresObjectName } +const MAX_INTERVAL_HOURS = 24 + const WARNINGS = { CLOCK_SKEW: { message: 'Timekeeper detected clock skew between this instance and the database server. This will not affect scheduling operations, but this warning is shown any time the skew exceeds 60 seconds.', @@ -250,8 +252,6 @@ function applyExpirationConfig (config, defaults) { emitWarning(WARNINGS.EXPIRE_IN_REMOVED) } - const MAX_EXPIRATION_HOURS = 24 - assert(!('expireInSeconds' in config) || config.expireInSeconds >= 1, 'configuration assert: expireInSeconds must be at least every second') @@ -271,7 +271,7 @@ function applyExpirationConfig (config, defaults) { ? defaults.expireIn : 15 * 60 - assert(expireIn / 60 / 60 < MAX_EXPIRATION_HOURS, `configuration assert: expiration cannot exceed ${MAX_EXPIRATION_HOURS} hours`) + assert(expireIn / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: expiration cannot exceed ${MAX_INTERVAL_HOURS} hours`) config.expireIn = expireIn } @@ -326,6 +326,8 @@ function applyMaintenanceConfig (config) { ? config.maintenanceIntervalSeconds : 120 + assert(config.maintenanceIntervalSeconds / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: maintenance interval cannot exceed ${MAX_INTERVAL_HOURS} hours`) + config.schedule = ('schedule' in config) ? config.schedule : true config.maintenance = ('maintenance' in config) ? config.maintenance : true config.migrate = ('migrate' in config) ? config.migrate : true @@ -371,6 +373,10 @@ function applyMonitoringConfig (config) { ? config.monitorStateIntervalSeconds : null + if (config.monitorStateIntervalSeconds) { + assert(config.monitorStateIntervalSeconds / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: state monitoring interval cannot exceed ${MAX_INTERVAL_HOURS} hours`) + } + const TEN_MINUTES_IN_SECONDS = 600 assert(!('clockMonitorIntervalSeconds' in config) || (config.clockMonitorIntervalSeconds >= 1 && config.clockMonitorIntervalSeconds <= TEN_MINUTES_IN_SECONDS), diff --git a/src/boss.js b/src/boss.js index 7005787f..58aa813f 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,11 +1,6 @@ const EventEmitter = require('events') const plans = require('./plans') -const queues = { - MAINTENANCE: '__pgboss__maintenance', - MONITOR_STATES: '__pgboss__monitor-states' -} - const events = { error: 'error', monitorStates: 'monitor-states', @@ -21,107 +16,111 @@ class Boss extends EventEmitter { this.manager = config.manager this.maintenanceIntervalSeconds = config.maintenanceIntervalSeconds - - this.monitorStates = config.monitorStateIntervalSeconds !== null - - if (this.monitorStates) { - this.monitorIntervalSeconds = config.monitorStateIntervalSeconds - } + this.maintenanceIntervalSeconds = config.monitorStateIntervalSeconds this.events = events this.failJobsByTimeoutCommand = plans.locked(config.schema, plans.failJobsByTimeout(config.schema)) this.archiveCommand = plans.locked(config.schema, plans.archive(config.schema, config.archiveInterval, config.archiveFailedInterval)) - this.purgeCommand = plans.locked(config.schema, plans.purge(config.schema, config.deleteAfter)) + this.dropCommand = plans.locked(config.schema, plans.drop(config.schema, config.deleteAfter)) this.getMaintenanceTimeCommand = plans.getMaintenanceTime(config.schema) this.setMaintenanceTimeCommand = plans.setMaintenanceTime(config.schema) + this.getMonitorTimeCommand = plans.getMonitorTime(config.schema) + this.setMonitorTimeCommand = plans.setMonitorTime(config.schema) this.countStatesCommand = plans.countStates(config.schema) this.functions = [ this.expire, this.archive, - this.purge, + this.drop, this.countStates, - this.getQueueNames, this.maintain ] } - async supervise () { - this.metaMonitor() + async monitor () { + let monitoring - await this.manager.purgeQueue(queues.MAINTENANCE) + this.monitorInterval = setInterval(async () => { + let locker - await this.maintenanceAsync() + try { + if (monitoring) { + return + } - const maintenanceWorkOptions = { - newJobCheckIntervalSeconds: Math.max(1, this.maintenanceIntervalSeconds / 2) - } + monitoring = true - await this.manager.work(queues.MAINTENANCE, maintenanceWorkOptions, (job) => this.onMaintenance(job)) + if (this.config.__test__throw_meta_monitor) { + throw new Error(this.config.__test__throw_meta_monitor) + } - if (this.monitorStates) { - await this.manager.purgeQueue(queues.MONITOR_STATES) + locker = await this.db.lock({ key: 'monitor' }) - await this.monitorStatesAsync() + const { secondsAgo } = await this.getMonitorTime() - const monitorStatesWorkOptions = { - newJobCheckIntervalSeconds: Math.max(1, this.monitorIntervalSeconds / 2) - } + if (secondsAgo > this.monitorIntervalSeconds) { + if (!this.stopped) { + const states = await this.countStates() + this.setMonitorTime() + this.emit(events.monitorStates, states) + } + } + } catch (err) { + this.emit(events.error, err) + } finally { + if (locker?.locked) { + await locker.unlock() + } - await this.manager.work(queues.MONITOR_STATES, monitorStatesWorkOptions, (job) => this.onMonitorStates(job)) - } + monitoring = false + } + }, this.monitorIntervalSeconds * 1000) } - metaMonitor () { - this.metaMonitorInterval = setInterval(async () => { + async supervise () { + let maintaining + + this.maintenanceInterval = setInterval(async () => { + let locker + try { + if (maintaining) { + return + } + + maintaining = true + if (this.config.__test__throw_meta_monitor) { throw new Error(this.config.__test__throw_meta_monitor) } + locker = await this.db.lock({ key: 'maintenance' }) + const { secondsAgo } = await this.getMaintenanceTime() - if (secondsAgo > this.maintenanceIntervalSeconds * 2) { - await this.manager.purgeQueue(queues.MAINTENANCE) - await this.maintenanceAsync() + if (secondsAgo > this.maintenanceIntervalSeconds) { + const result = await this.maintain() + this.emit(events.maintenance, result) } } catch (err) { this.emit(events.error, err) - } - }, this.maintenanceIntervalSeconds * 2 * 1000) - } - - async maintenanceAsync (options = {}) { - const { startAfter } = options - - options = { - startAfter, - retentionSeconds: this.maintenanceIntervalSeconds * 4, - singletonKey: queues.MAINTENANCE - } - - await this.manager.send(queues.MAINTENANCE, null, options) - } - - async monitorStatesAsync (options = {}) { - const { startAfter } = options - - options = { - startAfter, - retentionSeconds: this.monitorIntervalSeconds * 4, - singletonKey: queues.MONITOR_STATES - } + } finally { + if (locker?.locked) { + await locker.unlock() + } - await this.manager.send(queues.MONITOR_STATES, null, options) + maintaining = false + } + }, this.maintenanceIntervalSeconds * 1000) } async maintain () { const started = Date.now() - await this.expire() - await this.archive() - await this.purge() + !this.stopped && await this.expire() + !this.stopped && await this.archive() + !this.stopped && await this.drop() const ended = Date.now() @@ -130,58 +129,18 @@ class Boss extends EventEmitter { return { ms: ended - started } } - async onMaintenance (job) { - try { - if (this.config.__test__throw_maint) { - throw new Error(this.config.__test__throw_maint) - } - - const result = await this.maintain() - - if (!this.stopped) { - await this.manager.complete(job.id) // pre-complete to bypass throttling - await this.maintenanceAsync({ startAfter: this.maintenanceIntervalSeconds }) - } - - this.emit(events.maintenance, result) - } catch (err) { - this.emit(events.error, err) - } - } - - async onMonitorStates (job) { - try { - if (this.config.__test__throw_monitor) { - throw new Error(this.config.__test__throw_monitor) - } - - const states = await this.countStates() - - this.emit(events.monitorStates, states) - - if (!this.stopped && this.monitorStates) { - await this.manager.complete(job.id) // pre-complete to bypass throttling - await this.monitorStatesAsync({ startAfter: this.monitorIntervalSeconds }) - } - } catch (err) { - this.emit(events.error, err) - } - } - async stop () { if (this.config.__test__throw_stop) { throw new Error(this.config.__test__throw_stop) } if (!this.stopped) { - if (this.metaMonitorInterval) { - clearInterval(this.metaMonitorInterval) + if (this.maintenanceInterval) { + clearInterval(this.maintenanceInterval) } - await this.manager.offWork(queues.MAINTENANCE) - - if (this.monitorStates) { - await this.manager.offWork(queues.MONITOR_STATES) + if (this.monitorInterval) { + clearInterval(this.monitorInterval) } this.stopped = true @@ -194,7 +153,7 @@ class Boss extends EventEmitter { Object.keys(stateCountDefault) .forEach(key => { stateCountDefault[key] = 0 }) - const counts = await this.executeSql(this.countStatesCommand) + const counts = await this.db.executeSql(this.countStatesCommand) const states = counts.rows.reduce((acc, item) => { if (item.name) { @@ -214,43 +173,44 @@ class Boss extends EventEmitter { } async expire () { - await this.executeSql(this.failJobsByTimeoutCommand) + await this.db.executeSql(this.failJobsByTimeoutCommand) } async archive () { - await this.executeSql(this.archiveCommand) + await this.db.executeSql(this.archiveCommand) } - async purge () { - await this.executeSql(this.purgeCommand) + async drop () { + await this.db.executeSql(this.dropCommand) } async setMaintenanceTime () { - await this.executeSql(this.setMaintenanceTimeCommand) + await this.db.executeSql(this.setMaintenanceTimeCommand) } async getMaintenanceTime () { - if (!this.stopped) { - const { rows } = await this.db.executeSql(this.getMaintenanceTimeCommand) + const { rows } = await this.db.executeSql(this.getMaintenanceTimeCommand) - let { maintained_on: maintainedOn, seconds_ago: secondsAgo } = rows[0] + let { maintained_on: maintainedOn, seconds_ago: secondsAgo } = rows[0] - secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : this.maintenanceIntervalSeconds * 10 + secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : 999_999_999 - return { maintainedOn, secondsAgo } - } + return { maintainedOn, secondsAgo } } - getQueueNames () { - return queues + async setMonitorTime () { + await this.db.executeSql(this.setMonitorTimeCommand) } - async executeSql (sql, params) { - if (!this.stopped) { - return await this.db.executeSql(sql, params) - } + async getMonitorTime () { + const { rows } = await this.db.executeSql(this.getMonitorTimeCommand) + + let { monitored_on: monitoredOn, seconds_ago: secondsAgo } = rows[0] + + secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : 999_999_999 + + return { monitoredOn, secondsAgo } } } module.exports = Boss -module.exports.QUEUES = queues diff --git a/src/db.js b/src/db.js index 6bf93b71..9298c8a3 100644 --- a/src/db.js +++ b/src/db.js @@ -1,5 +1,6 @@ const EventEmitter = require('events') const pg = require('pg') +const { advisoryLock } = require('./plans') class Db extends EventEmitter { constructor (config) { @@ -39,6 +40,35 @@ class Db extends EventEmitter { } } + async lock ({ timeout = 30, key } = {}) { + // const lockedClient = new pg.Client(this.config) + // await lockedClient.connect() + const lockedClient = await this.pool.connect() + + const query = ` + BEGIN; + SET LOCAL lock_timeout = '${timeout}s'; + SET LOCAL idle_in_transaction_session_timeout = '3600s'; + ${advisoryLock(key)}; + ` + + await lockedClient.query(query) + + const locker = { + locked: true, + unlock: async function () { + try { + await lockedClient.query('COMMIT') + await lockedClient.end() + } finally { + this.locked = false + } + } + } + + return locker + } + static quotePostgresStr (str) { const delimeter = '$sanitize$' if (str.includes(delimeter)) { diff --git a/src/index.js b/src/index.js index 26da290b..cc8f19d3 100644 --- a/src/index.js +++ b/src/index.js @@ -110,6 +110,10 @@ class PgBoss extends EventEmitter { await this.boss.supervise() } + if (this.config.monitorStateIntervalSeconds) { + await this.boss.monitor() + } + if (this.config.schedule) { await this.timekeeper.start() } diff --git a/src/manager.js b/src/manager.js index cec3c84a..bca1c5d7 100644 --- a/src/manager.js +++ b/src/manager.js @@ -11,11 +11,10 @@ const Worker = require('./worker') const plans = require('./plans') const Db = require('./db') -const { QUEUES: BOSS_QUEUES } = require('./boss') const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') const { QUEUE_POLICY } = plans -const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) +const INTERNAL_QUEUES = Object.values(TIMEKEEPER_QUEUES).reduce((acc, i) => ({ ...acc, [i]: i }), {}) const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } diff --git a/src/migrationStore.js b/src/migrationStore.js index 6092f7f0..5364c0cf 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -95,6 +95,7 @@ function getAll (schema) { `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= 'completed' AND singletonOn IS NULL`, `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, + `ALTER TABLE ${schema}.version ADD COLUMN monitored_on timestamp with time zone`, `CREATE TABLE ${schema}.queue ( name text primary key, policy text, @@ -133,7 +134,8 @@ function getAll (schema) { `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, - `DROP TABLE ${schema}.queue` + `DROP TABLE ${schema}.queue`, + `ALTER TABLE ${schema}.version DROP COLUMN monitored_on` ] }, { diff --git a/src/plans.js b/src/plans.js index 9b72ab05..d7257cdf 100644 --- a/src/plans.js +++ b/src/plans.js @@ -43,7 +43,7 @@ module.exports = { unsubscribe, getQueuesForEvent, archive, - purge, + drop, countStates, createQueue, updateQueue, @@ -56,9 +56,12 @@ module.exports = { clearStorage, getMaintenanceTime, setMaintenanceTime, + getMonitorTime, + setMonitorTime, getCronTime, setCronTime, locked, + advisoryLock, assertMigration, getArchivedJobById, getJobById, @@ -69,20 +72,6 @@ module.exports = { DEFAULT_SCHEMA } -function locked (schema, query) { - if (Array.isArray(query)) { - query = query.join(';\n') - } - - return ` - BEGIN; - SET LOCAL statement_timeout = '30s'; - ${advisoryLock(schema)}; - ${query}; - COMMIT; - ` -} - function create (schema, version) { const commands = [ createSchema(schema), @@ -128,7 +117,8 @@ function createTableVersion (schema) { CREATE TABLE ${schema}.version ( version int primary key, maintained_on timestamp with time zone, - cron_on timestamp with time zone + cron_on timestamp with time zone, + monitored_on timestamp with time zone ) ` } @@ -235,12 +225,20 @@ function createIndexArchiveName (schema) { return `CREATE INDEX archive_name_idx ON ${schema}.archive(name)` } +function getMaintenanceTime (schema) { + return `SELECT maintained_on, EXTRACT( EPOCH FROM (now() - maintained_on) ) seconds_ago FROM ${schema}.version` +} + function setMaintenanceTime (schema) { return `UPDATE ${schema}.version SET maintained_on = now()` } -function getMaintenanceTime (schema) { - return `SELECT maintained_on, EXTRACT( EPOCH FROM (now() - maintained_on) ) seconds_ago FROM ${schema}.version` +function getMonitorTime (schema) { + return `SELECT monitored_on, EXTRACT( EPOCH FROM (now() - monitored_on) ) seconds_ago FROM ${schema}.version` +} + +function setMonitorTime (schema) { + return `UPDATE ${schema}.version SET monitored_on = now()` } function setCronTime (schema, time) { @@ -659,7 +657,7 @@ function insertJobs (schema) { ` } -function purge (schema, interval) { +function drop (schema, interval) { return ` DELETE FROM ${schema}.archive WHERE archivedOn < (now() - interval '${interval}') @@ -692,9 +690,23 @@ function countStates (schema) { ` } -function advisoryLock (schema) { +function locked (schema, query) { + if (Array.isArray(query)) { + query = query.join(';\n') + } + + return ` + BEGIN; + SET LOCAL lock_timeout = '30s'; + ${advisoryLock(schema)}; + ${query}; + COMMIT; + ` +} + +function advisoryLock (schema, key) { return `SELECT pg_advisory_xact_lock( - ('x' || md5(current_database() || '.pgboss.${schema}'))::bit(64)::bigint + ('x' || md5(current_database() || '.pgboss.${schema}${key || ''}'))::bit(64)::bigint )` } diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js deleted file mode 100644 index e8411511..00000000 --- a/test/deleteQueueTest.js +++ /dev/null @@ -1,70 +0,0 @@ -const assert = require('assert') -const helper = require('./testHelper') -const delay = require('delay') - -describe('purgeQueue', function () { - it('should clear a specific queue', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue1 = `${this.test.bossConfig.schema}1` - const queue2 = `${this.test.bossConfig.schema}2` - - await boss.send(queue1) - await boss.send(queue2) - - const q1Count1 = await boss.getQueueSize(queue1) - const q2Count1 = await boss.getQueueSize(queue2) - - assert.strictEqual(1, q1Count1) - assert.strictEqual(1, q2Count1) - - await boss.purgeQueue(queue1) - - const q1Count2 = await boss.getQueueSize(queue1) - const q2Count2 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q1Count2) - assert.strictEqual(1, q2Count2) - - await boss.purgeQueue(queue2) - - const q2Count3 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q2Count3) - }) - - it('clearStorage() should empty both job storage tables', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) - const queue = this.test.bossConfig.schema - - const jobId = await boss.send(queue) - await boss.fetch(queue) - await boss.complete(jobId) - - await delay(1000) - await boss.maintain() - - await boss.send(queue) - - const db = await helper.getDb() - - const getJobCount = async table => { - const jobCountResult = await db.executeSql(`SELECT count(*)::int as job_count FROM ${this.test.bossConfig.schema}.${table}`) - return jobCountResult.rows[0].job_count - } - - const preJobCount = await getJobCount('job') - const preArchiveCount = await getJobCount('archive') - - assert.strictEqual(preJobCount, 1) - assert.strictEqual(preArchiveCount, 1) - - await boss.clearStorage() - - const postJobCount = await getJobCount('job') - const postArchiveCount = await getJobCount('archive') - - assert.strictEqual(postJobCount, 0) - assert.strictEqual(postArchiveCount, 0) - }) -}) diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 9eaeaa8f..2939169e 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -4,58 +4,62 @@ const delay = require('delay') const PgBoss = require('../') describe('maintenance', async function () { - it.skip('should send maintenance job if missing during monitoring', async function () { + it('error handling works', async function () { const config = { ...this.test.bossConfig, supervise: true, - maintenanceIntervalSeconds: 1 + maintenanceIntervalSeconds: 1, + __test__throw_meta_monitor: 'monitoring error' } - const db = await helper.getDb() + let errorCount = 0 const boss = this.test.boss = new PgBoss(config) - const queues = boss.boss.getQueueNames() - const countJobs = () => helper.countJobs(config.schema, 'name = $1', [queues.MAINTENANCE]) + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__throw_meta_monitor) + errorCount++ + }) await boss.start() - await new Promise(resolve => { - boss.once('maintenance', async () => { - // force timestamp to an older date - await db.executeSql(`UPDATE ${config.schema}.version SET maintained_on = now() - interval '5 minutes'`) - resolve() - }) - }) + await delay(6000) - await delay(4000) + assert.strictEqual(errorCount, 1) + }) - const count = await countJobs() + it('clearStorage() should empty both job storage tables', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) + const queue = this.test.bossConfig.schema - assert(count > 1) - }) + const jobId = await boss.send(queue) + await boss.fetch(queue) + await boss.complete(jobId) - it('meta monitoring error handling works', async function () { - const config = { - ...this.test.bossConfig, - supervise: true, - maintenanceIntervalSeconds: 1, - __test__throw_meta_monitor: 'meta monitoring error' - } + await delay(1000) + await boss.maintain() - let errorCount = 0 + await boss.send(queue) - const boss = this.test.boss = new PgBoss(config) + const db = await helper.getDb() - boss.once('error', (error) => { - assert.strictEqual(error.message, config.__test__throw_meta_monitor) - errorCount++ - }) + const getJobCount = async table => { + const jobCountResult = await db.executeSql(`SELECT count(*)::int as job_count FROM ${this.test.bossConfig.schema}.${table}`) + return jobCountResult.rows[0].job_count + } - await boss.start() + const preJobCount = await getJobCount('job') + const preArchiveCount = await getJobCount('archive') - await delay(6000) + assert.strictEqual(preJobCount, 1) + assert.strictEqual(preArchiveCount, 1) - assert.strictEqual(errorCount, 1) + await boss.clearStorage() + + const postJobCount = await getJobCount('job') + const postArchiveCount = await getJobCount('archive') + + assert.strictEqual(postJobCount, 0) + assert.strictEqual(postArchiveCount, 0) }) }) diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index 13442011..c1246a2c 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -1,5 +1,4 @@ const assert = require('assert') -const delay = require('delay') const helper = require('./testHelper') const PgBoss = require('../') const Contractor = require('../src/contractor') @@ -58,39 +57,4 @@ describe('multi-master', function () { await pMap(instances, i => i.stop({ graceful: false })) } }) - - it('should clear maintenance queue before supervising', async function () { - const { states } = PgBoss - const jobCount = 5 - - let boss = new PgBoss({ ...this.test.bossConfig, maintenanceIntervalSeconds: 1 }) - - const queues = boss.boss.getQueueNames() - const countJobs = (state) => helper.countJobs(this.test.bossConfig.schema, 'name = $1 AND state = $2', [queues.MAINTENANCE, state]) - - await boss.start() - - // create extra maintenace jobs manually - for (let i = 0; i < jobCount; i++) { - await boss.send(queues.MAINTENANCE) - } - - const beforeCount = await countJobs(states.created) - - assert.strictEqual(beforeCount, jobCount) - - await boss.stop({ graceful: false }) - - boss = new PgBoss({ ...this.test.bossConfig, supervise: true }) - - await boss.start() - - await delay(3000) - - const completedCount = await countJobs(states.completed) - - assert.strictEqual(completedCount, 1) - - await boss.stop({ graceful: false }) - }) }) diff --git a/test/opsTest.js b/test/opsTest.js index 31dbe4c4..63afa03d 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -16,7 +16,7 @@ describe('ops', function () { it('should purge the archive manually', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - await boss.purge() + await boss.drop() }) it('stop should re-emit stoppped if already stopped', async function () { diff --git a/test/queueTest.js b/test/queueTest.js index 916a222a..9f964a86 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -184,4 +184,34 @@ describe('queues', function () { assert.strictEqual(blockedSecondActive, null) }) + + it('should clear a specific queue', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + + const queue1 = `${this.test.bossConfig.schema}1` + const queue2 = `${this.test.bossConfig.schema}2` + + await boss.send(queue1) + await boss.send(queue2) + + const q1Count1 = await boss.getQueueSize(queue1) + const q2Count1 = await boss.getQueueSize(queue2) + + assert.strictEqual(1, q1Count1) + assert.strictEqual(1, q2Count1) + + await boss.purgeQueue(queue1) + + const q1Count2 = await boss.getQueueSize(queue1) + const q2Count2 = await boss.getQueueSize(queue2) + + assert.strictEqual(0, q1Count2) + assert.strictEqual(1, q2Count2) + + await boss.purgeQueue(queue2) + + const q2Count3 = await boss.getQueueSize(queue2) + + assert.strictEqual(0, q2Count3) + }) }) From 6e7a8f981c3ec813953e4873f12d63ef65722334 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 4 Sep 2023 17:49:47 -0500 Subject: [PATCH 14/36] fixes --- src/boss.js | 26 +++++++----------- src/timekeeper.js | 6 ++-- test/backgroundErrorTest.js | 55 ++++++++++--------------------------- test/maintenanceTest.js | 25 ----------------- test/monitoringTest.js | 6 ++-- test/opsTest.js | 42 ---------------------------- test/scheduleTest.js | 24 ++++++++++++++++ test/speedTest.js | 3 +- 8 files changed, 56 insertions(+), 131 deletions(-) diff --git a/src/boss.js b/src/boss.js index 58aa813f..4affca1e 100644 --- a/src/boss.js +++ b/src/boss.js @@ -16,7 +16,7 @@ class Boss extends EventEmitter { this.manager = config.manager this.maintenanceIntervalSeconds = config.maintenanceIntervalSeconds - this.maintenanceIntervalSeconds = config.monitorStateIntervalSeconds + this.monitorStateIntervalSeconds = config.monitorStateIntervalSeconds this.events = events @@ -51,20 +51,18 @@ class Boss extends EventEmitter { monitoring = true - if (this.config.__test__throw_meta_monitor) { - throw new Error(this.config.__test__throw_meta_monitor) + if (this.config.__test__throw_monitor) { + throw new Error(this.config.__test__throw_monitor) } locker = await this.db.lock({ key: 'monitor' }) const { secondsAgo } = await this.getMonitorTime() - if (secondsAgo > this.monitorIntervalSeconds) { - if (!this.stopped) { - const states = await this.countStates() - this.setMonitorTime() - this.emit(events.monitorStates, states) - } + if (secondsAgo > this.monitorStateIntervalSeconds && !this.stopped) { + const states = await this.countStates() + this.setMonitorTime() + this.emit(events.monitorStates, states) } } catch (err) { this.emit(events.error, err) @@ -75,7 +73,7 @@ class Boss extends EventEmitter { monitoring = false } - }, this.monitorIntervalSeconds * 1000) + }, this.monitorStateIntervalSeconds * 1000) } async supervise () { @@ -91,8 +89,8 @@ class Boss extends EventEmitter { maintaining = true - if (this.config.__test__throw_meta_monitor) { - throw new Error(this.config.__test__throw_meta_monitor) + if (this.config.__test__throw_maint) { + throw new Error(this.config.__test__throw_maint) } locker = await this.db.lock({ key: 'maintenance' }) @@ -130,10 +128,6 @@ class Boss extends EventEmitter { } async stop () { - if (this.config.__test__throw_stop) { - throw new Error(this.config.__test__throw_stop) - } - if (!this.stopped) { if (this.maintenanceInterval) { clearInterval(this.maintenanceInterval) diff --git a/src/timekeeper.js b/src/timekeeper.js index a42ffc04..c77499f6 100644 --- a/src/timekeeper.js +++ b/src/timekeeper.js @@ -144,9 +144,9 @@ class Timekeeper extends EventEmitter { if (this.stopped) return try { - if (this.config.__test__throw_clock_monitoring) { - throw new Error(this.config.__test__throw_clock_monitoring) - } + // if (this.config.__test__throw_clock_monitoring) { + // throw new Error(this.config.__test__throw_clock_monitoring) + // } const items = await this.getSchedules() diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 611cdb5a..56a652c5 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -5,73 +5,48 @@ const delay = require('delay') describe('background processing error handling', function () { it('maintenance error handling works', async function () { const defaults = { - monitorStateIntervalMinutes: 1, maintenanceIntervalSeconds: 1, supervise: true, - __test__throw_maint: true + __test__throw_maint: 'my maintenance error' } const config = { ...this.test.bossConfig, ...defaults } const boss = this.test.boss = new PgBoss(config) - return new Promise((resolve) => { - let resolved = false - - boss.on('error', () => { - if (!resolved) { - resolved = true - resolve() - } - }) + let errorCount = 0 - boss.start().then(() => {}) + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__throw_maint) + errorCount++ }) + + await boss.start() + + await delay(3000) + + assert.strictEqual(errorCount, 1) }) it('state monitoring error handling works', async function () { const defaults = { - monitorStateIntervalSeconds: 2, + monitorStateIntervalSeconds: 1, supervise: true, - __test__throw_monitor: true + __test__throw_monitor: 'my monitor error' } const config = { ...this.test.bossConfig, ...defaults } const boss = this.test.boss = new PgBoss(config) - return new Promise((resolve) => { - let resolved = false - - boss.on('error', () => { - if (!resolved) { - resolved = true - resolve() - } - }) - - boss.start().then(() => {}) - }) - }) - - it('clock monitoring error handling works', async function () { - const config = { - ...this.test.bossConfig, - schedule: true, - clockMonitorIntervalSeconds: 1, - __test__throw_clock_monitoring: 'pg-boss mock error: clock monitoring' - } - let errorCount = 0 - const boss = this.test.boss = new PgBoss(config) - boss.once('error', (error) => { - assert.strictEqual(error.message, config.__test__throw_clock_monitoring) + assert.strictEqual(error.message, config.__test__throw_monitor) errorCount++ }) await boss.start() - await delay(8000) + await delay(3000) assert.strictEqual(errorCount, 1) }) diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 2939169e..7a99f432 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -1,33 +1,8 @@ const assert = require('assert') const helper = require('./testHelper') const delay = require('delay') -const PgBoss = require('../') describe('maintenance', async function () { - it('error handling works', async function () { - const config = { - ...this.test.bossConfig, - supervise: true, - maintenanceIntervalSeconds: 1, - __test__throw_meta_monitor: 'monitoring error' - } - - let errorCount = 0 - - const boss = this.test.boss = new PgBoss(config) - - boss.once('error', (error) => { - assert.strictEqual(error.message, config.__test__throw_meta_monitor) - errorCount++ - }) - - await boss.start() - - await delay(6000) - - assert.strictEqual(errorCount, 1) - }) - it('clearStorage() should empty both job storage tables', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) const queue = this.test.bossConfig.schema diff --git a/test/monitoringTest.js b/test/monitoringTest.js index 6d70c860..ad7c1e90 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -3,12 +3,12 @@ const helper = require('./testHelper') describe('monitoring', function () { it('should emit state counts', async function () { - const defaults = { - supervise: true, + const config = { + ...this.test.bossConfig, monitorStateIntervalSeconds: 1 } - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start(config) const queue = 'monitorMe' diff --git a/test/opsTest.js b/test/opsTest.js index 63afa03d..760ff844 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -1,7 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') const { v4: uuid } = require('uuid') -const delay = require('delay') describe('ops', function () { it('should expire manually', async function () { @@ -19,22 +18,6 @@ describe('ops', function () { await boss.drop() }) - it('stop should re-emit stoppped if already stopped', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - - const stopPromise1 = new Promise(resolve => boss.once('stopped', resolve)) - - await boss.stop({ timeout: 1 }) - - await stopPromise1 - - const stopPromise2 = new Promise(resolve => boss.once('stopped', resolve)) - - await boss.stop({ timeout: 1 }) - - await stopPromise2 - }) - it('should emit error in worker', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, __test__throw_worker: true }) const queue = this.test.bossConfig.schema @@ -74,29 +57,4 @@ describe('ops', function () { assert(boss.db.pool.totalCount === 0) }) - - it('should emit error during graceful stop if worker is busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, __test__throw_stop: true }) - const queue = this.test.bossConfig.schema - - await boss.send(queue) - await boss.work(queue, () => delay(2000)) - - await delay(500) - - await boss.stop({ timeout: 5000 }) - - await new Promise(resolve => boss.on('error', resolve)) - }) - - it('should throw error during graceful stop if no workers are busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, __test__throw_stop: true }) - - try { - await boss.stop({ timeout: 1 }) - assert(false) - } catch (err) { - assert(true) - } - }) }) diff --git a/test/scheduleTest.js b/test/scheduleTest.js index faac371e..044fb06d 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -276,4 +276,28 @@ describe('schedule', function () { assert.strictEqual(errorCount, 1) }) + + it('clock monitoring error handling works', async function () { + const config = { + ...this.test.bossConfig, + schedule: true, + clockMonitorIntervalSeconds: 1, + __test__force_clock_monitoring_error: 'pg-boss mock error: clock monitoring' + } + + let errorCount = 0 + + const boss = this.test.boss = new PgBoss(config) + + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__force_clock_monitoring_error) + errorCount++ + }) + + await boss.start() + + await delay(4000) + + assert.strictEqual(errorCount, 1) + }) }) diff --git a/test/speedTest.js b/test/speedTest.js index 11e65907..282d9790 100644 --- a/test/speedTest.js +++ b/test/speedTest.js @@ -1,5 +1,4 @@ const helper = require('./testHelper') -const pMap = require('p-map') describe('speed', function () { const expectedSeconds = 2 @@ -15,7 +14,7 @@ describe('speed', function () { beforeEach(async function () { const defaults = { min: 10, max: 10 } boss = await helper.start({ ...this.currentTest.bossConfig, ...defaults }) - await pMap(jobs, job => boss.send(job.name, job.data)) + await boss.insert(jobs) }) afterEach(async function () { await helper.stop(boss) }) From 616343fdae5d63b85c88e2f47504a18ef7cfe1a4 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 4 Sep 2023 18:39:32 -0500 Subject: [PATCH 15/36] test fixes --- src/attorney.js | 4 +++ src/manager.js | 31 +++++++++++++++++--- test/completeTest.js | 2 +- test/queueTest.js | 68 ++++++++++++++++++++++++++++++++++++++++++-- test/workTest.js | 8 ++---- 5 files changed, 100 insertions(+), 13 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index 5e0ccfde..c6041ae0 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -99,6 +99,10 @@ function checkSendArgs (args, defaults) { assert(!singletonSeconds || singletonSeconds <= defaults.archiveSeconds, `throttling interval ${singletonSeconds}s cannot exceed archive interval ${defaults.archiveSeconds}s`) + if (options.onComplete) { + emitWarning(WARNINGS.ON_COMPLETE_REMOVED) + } + return { name, data, options } } diff --git a/src/manager.js b/src/manager.js index bca1c5d7..096d542b 100644 --- a/src/manager.js +++ b/src/manager.js @@ -84,11 +84,11 @@ class Manager extends EventEmitter { this.sendAfter, this.createQueue, this.updateQueue, - this.getQueueProperties, + this.getQueue, this.deleteQueue, this.purgeQueue, - this.clearStorage, this.getQueueSize, + this.clearStorage, this.getJobById ] @@ -603,13 +603,36 @@ class Manager extends EventEmitter { await this.db.executeSql(sql, params) } - async getQueueProperties (name) { + async getQueue (name) { assert(name, 'Missing queue name argument') const sql = plans.getQueueByName(this.config.schema) const result = await this.db.executeSql(sql, [name]) - return result.rows.length ? result.rows[0] : null + if (result.rows.length === 0) { + return null + } + + const { + policy, + retry_limit: retryLimit, + retry_delay: retryDelay, + retry_backoff: retryBackoff, + expire_seconds: expireInSeconds, + retention_minutes: retentionMinutes, + dead_letter: deadLetter + } = result.rows[0] + + return { + name, + policy, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } } async deleteQueue (name) { diff --git a/test/completeTest.js b/test/completeTest.js index 3dbd36bc..e61aaeab 100644 --- a/test/completeTest.js +++ b/test/completeTest.js @@ -114,7 +114,7 @@ describe('complete', function () { assert.strictEqual(called, true) }) - it.skip('should warn with an old onComplete option only once', async function () { + it('should warn with an old onComplete option only once', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema diff --git a/test/queueTest.js b/test/queueTest.js index 9f964a86..27e3397c 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -96,12 +96,76 @@ describe('queues', function () { await boss.purgeQueue(queue) }) - it.skip('should update queue properties', async function () { + it('should update queue properties', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const createProps = { + retryLimit: 1, + retryBackoff: false, + retryDelay: 1, + expireInSeconds: 1, + retentionMinutes: 1, + deadLetter: `${queue}_1` + } + + await boss.createQueue(queue, createProps) + + let queueObj = await boss.getQueue(queue) + + assert.strictEqual(createProps.retryLimit, queueObj.retryLimit) + assert.strictEqual(createProps.retryBackoff, queueObj.retryBackoff) + assert.strictEqual(createProps.retryDelay, queueObj.retryDelay) + assert.strictEqual(createProps.expireInSeconds, queueObj.expireInSeconds) + assert.strictEqual(createProps.retentionMinutes, queueObj.retentionMinutes) + assert.strictEqual(createProps.deadLetter, queueObj.deadLetter) + + const updateProps = { + retryLimit: 2, + retryBackoff: true, + retryDelay: 2, + expireInSeconds: 2, + retentionMinutes: 2, + deadLetter: `${queue}_2` + } + + await boss.updateQueue(queue, updateProps) + queueObj = await boss.getQueue(queue) + + assert.strictEqual(updateProps.retryLimit, queueObj.retryLimit) + assert.strictEqual(updateProps.retryBackoff, queueObj.retryBackoff) + assert.strictEqual(updateProps.retryDelay, queueObj.retryDelay) + assert.strictEqual(updateProps.expireInSeconds, queueObj.expireInSeconds) + assert.strictEqual(updateProps.retentionMinutes, queueObj.retentionMinutes) + assert.strictEqual(updateProps.deadLetter, queueObj.deadLetter) }) - it.skip('jobs should inherit properties from queue', async function () { + it('jobs should inherit properties from queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const createProps = { + retryLimit: 1, + retryBackoff: true, + retryDelay: 2, + expireInSeconds: 3, + retentionMinutes: 4, + deadLetter: `${queue}_1` + } + + await boss.createQueue(queue, createProps) + + const jobId = await boss.send(queue) + + const job = await boss.getJobById(jobId) + assert.strictEqual(createProps.retryLimit, job.retrylimit) + assert.strictEqual(createProps.retryBackoff, job.retrybackoff) + assert.strictEqual(createProps.retryDelay, job.retrydelay) + assert.strictEqual(createProps.expireInSeconds, job.expireIn) + assert.strictEqual(createProps.retentionMinutes, job.retentionMinutes) + assert.strictEqual(createProps.deadLetter, job.deadletter) }) it('short policy only allows 1 job in queue', async function () { diff --git a/test/workTest.js b/test/workTest.js index 623923da..1647d94c 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -419,9 +419,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1 }) - - await delay(500) + boss.stop({ timeout: 1, wait: true }) try { await boss.work(queue) @@ -435,9 +433,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1 }) - - await delay(500) + boss.stop({ timeout: 1, wait: true }) await boss.send(queue) }) From 6e13cc474e05c8d9b56d75d7f5d2e1c86eeacd4b Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Mon, 4 Sep 2023 22:20:52 -0500 Subject: [PATCH 16/36] tests --- src/attorney.js | 4 -- src/boss.js | 110 ++++++++++++++++++++-------------------- src/db.js | 16 +++--- src/timekeeper.js | 6 +-- test/multiMasterTest.js | 12 +++-- test/queueTest.js | 2 +- test/scheduleTest.js | 45 ++++++++++++++++ test/workTest.js | 4 +- 8 files changed, 122 insertions(+), 77 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index c6041ae0..4300970b 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -252,10 +252,6 @@ function applyRetentionConfig (config, defaults) { } function applyExpirationConfig (config, defaults) { - if ('expireIn' in config) { - emitWarning(WARNINGS.EXPIRE_IN_REMOVED) - } - assert(!('expireInSeconds' in config) || config.expireInSeconds >= 1, 'configuration assert: expireInSeconds must be at least every second') diff --git a/src/boss.js b/src/boss.js index 4affca1e..47747ade 100644 --- a/src/boss.js +++ b/src/boss.js @@ -38,79 +38,79 @@ class Boss extends EventEmitter { ] } - async monitor () { - let monitoring + async supervise () { + this.maintenanceInterval = setInterval(() => this.onSupervise(), this.maintenanceIntervalSeconds * 1000) + } - this.monitorInterval = setInterval(async () => { - let locker + async monitor () { + this.monitorInterval = setInterval(() => this.onMonitor(), this.monitorStateIntervalSeconds * 1000) + } - try { - if (monitoring) { - return - } + async onMonitor () { + let locker - monitoring = true + try { + if (this.monitoring) { + return + } - if (this.config.__test__throw_monitor) { - throw new Error(this.config.__test__throw_monitor) - } + this.monitoring = true - locker = await this.db.lock({ key: 'monitor' }) + if (this.config.__test__throw_monitor) { + throw new Error(this.config.__test__throw_monitor) + } - const { secondsAgo } = await this.getMonitorTime() + locker = await this.db.lock({ key: 'monitor' }) - if (secondsAgo > this.monitorStateIntervalSeconds && !this.stopped) { - const states = await this.countStates() - this.setMonitorTime() - this.emit(events.monitorStates, states) - } - } catch (err) { - this.emit(events.error, err) - } finally { - if (locker?.locked) { - await locker.unlock() - } + const { secondsAgo } = await this.getMonitorTime() - monitoring = false + if (secondsAgo > this.monitorStateIntervalSeconds && !this.stopped) { + const states = await this.countStates() + this.setMonitorTime() + this.emit(events.monitorStates, states) + } + } catch (err) { + this.emit(events.error, err) + } finally { + if (locker?.locked) { + await locker.unlock() } - }, this.monitorStateIntervalSeconds * 1000) - } - - async supervise () { - let maintaining - this.maintenanceInterval = setInterval(async () => { - let locker + this.monitoring = false + } + } - try { - if (maintaining) { - return - } + async onSupervise () { + let locker - maintaining = true + try { + if (this.maintaining) { + return + } - if (this.config.__test__throw_maint) { - throw new Error(this.config.__test__throw_maint) - } + this.maintaining = true - locker = await this.db.lock({ key: 'maintenance' }) + if (this.config.__test__throw_maint) { + throw new Error(this.config.__test__throw_maint) + } - const { secondsAgo } = await this.getMaintenanceTime() + locker = await this.db.lock({ key: 'maintenance' }) - if (secondsAgo > this.maintenanceIntervalSeconds) { - const result = await this.maintain() - this.emit(events.maintenance, result) - } - } catch (err) { - this.emit(events.error, err) - } finally { - if (locker?.locked) { - await locker.unlock() - } + const { secondsAgo } = await this.getMaintenanceTime() - maintaining = false + if (secondsAgo > this.maintenanceIntervalSeconds) { + const result = await this.maintain() + this.emit(events.maintenance, result) + } + } catch (err) { + this.emit(events.error, err) + } finally { + if (locker?.locked) { + await locker.unlock() } - }, this.maintenanceIntervalSeconds * 1000) + + this.maintaining = false + } } async maintain () { diff --git a/src/db.js b/src/db.js index 9298c8a3..f86e9485 100644 --- a/src/db.js +++ b/src/db.js @@ -26,15 +26,15 @@ class Db extends EventEmitter { async executeSql (text, values) { if (this.opened) { - if (this.config.debug === true) { - console.log(`${new Date().toISOString()}: DEBUG SQL`) - console.log(text) + // if (this.config.debug === true) { + // console.log(`${new Date().toISOString()}: DEBUG SQL`) + // console.log(text) - if (values) { - console.log(`${new Date().toISOString()}: DEBUG VALUES`) - console.log(values) - } - } + // if (values) { + // console.log(`${new Date().toISOString()}: DEBUG VALUES`) + // console.log(values) + // } + // } return await this.pool.query(text, values) } diff --git a/src/timekeeper.js b/src/timekeeper.js index c77499f6..0cd038cb 100644 --- a/src/timekeeper.js +++ b/src/timekeeper.js @@ -144,9 +144,9 @@ class Timekeeper extends EventEmitter { if (this.stopped) return try { - // if (this.config.__test__throw_clock_monitoring) { - // throw new Error(this.config.__test__throw_clock_monitoring) - // } + if (this.config.__test__throw_cron_processing) { + throw new Error(this.config.__test__throw_cron_processing) + } const items = await this.getSchedules() diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index c1246a2c..2364bd69 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -9,7 +9,7 @@ const pMap = require('p-map') describe('multi-master', function () { it('should only allow 1 master to start at a time', async function () { const replicaCount = 20 - const config = { ...this.test.bossConfig, max: 2 } + const config = { ...this.test.bossConfig, supervise: false, max: 2 } const instances = [] for (let i = 0; i < replicaCount; i++) { @@ -26,8 +26,12 @@ describe('multi-master', function () { }) it('should only allow 1 master to migrate to latest at a time', async function () { - const replicaCount = 5 - const config = { ...this.test.bossConfig, supervise: true, max: 2 } + const config = { + ...this.test.bossConfig, + supervise: true, + maintenanceIntervalSeconds: 1, + max: 2 + } const db = await helper.getDb() const contractor = new Contractor(db, config) @@ -45,7 +49,7 @@ describe('multi-master', function () { const instances = [] - for (let i = 0; i < replicaCount; i++) { + for (let i = 0; i < 5; i++) { instances.push(new PgBoss(config)) } diff --git a/test/queueTest.js b/test/queueTest.js index 27e3397c..9bbad320 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -141,7 +141,7 @@ describe('queues', function () { assert.strictEqual(updateProps.deadLetter, queueObj.deadLetter) }) - it('jobs should inherit properties from queue', async function () { + it.skip('jobs should inherit properties from queue', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema diff --git a/test/scheduleTest.js b/test/scheduleTest.js index 044fb06d..c1e738e7 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -28,6 +28,27 @@ describe('schedule', function () { assert(job) }) + it('should not enable scheduling if archive config is < 60s', async function () { + const config = { + ...this.test.bossConfig, + clockMonitorIntervalSeconds: 1, + cronWorkerIntervalSeconds: 1, + archiveCompletedAfterSeconds: 1, + schedule: true + } + + const boss = this.test.boss = await helper.start(config) + const queue = this.test.bossConfig.schema + + await boss.schedule(queue, '* * * * *') + + await delay(ASSERT_DELAY) + + const job = await boss.fetch(queue) + + assert.strictEqual(job, null) + }) + it('should accept a custom clock monitoring interval in seconds', async function () { const config = { ...this.test.bossConfig, @@ -277,6 +298,30 @@ describe('schedule', function () { assert.strictEqual(errorCount, 1) }) + it('errors during cron processing should emit', async function () { + const config = { + ...this.test.bossConfig, + cronWorkerIntervalSeconds: 1, + schedule: true, + __test__throw_cron_processing: 'cron processing' + } + + let errorCount = 0 + + const boss = this.test.boss = new PgBoss(config) + + boss.once('error', error => { + assert.strictEqual(error.message, config.__test__throw_cron_processing) + errorCount++ + }) + + await boss.start() + + await delay(2000) + + assert.strictEqual(errorCount, 1) + }) + it('clock monitoring error handling works', async function () { const config = { ...this.test.bossConfig, diff --git a/test/workTest.js b/test/workTest.js index 1647d94c..bb45e474 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -419,13 +419,13 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1, wait: true }) + await boss.stop({ timeout: 1, wait: true }) try { await boss.work(queue) assert(false) } catch (err) { - assert(err.message.includes('stopping')) + assert(true) } }) From 240c55403add6b953c58f8eb264ddfb5a725d80e Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Tue, 5 Sep 2023 16:24:49 -0500 Subject: [PATCH 17/36] tests --- src/boss.js | 9 ++++ src/index.js | 89 +++++++++++++++++-------------------- src/manager.js | 8 ++-- test/archiveTest.js | 9 +++- test/backgroundErrorTest.js | 83 ++++++++++++++++++++++++++++++++++ test/queueTest.js | 6 +++ test/workTest.js | 6 +-- 7 files changed, 155 insertions(+), 55 deletions(-) diff --git a/src/boss.js b/src/boss.js index 47747ade..c1ce0395 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,5 +1,6 @@ const EventEmitter = require('events') const plans = require('./plans') +const delay = require('delay') const events = { error: 'error', @@ -56,6 +57,10 @@ class Boss extends EventEmitter { this.monitoring = true + if (this.config.__test__delay_monitor) { + await delay(this.config.__test__delay_monitor) + } + if (this.config.__test__throw_monitor) { throw new Error(this.config.__test__throw_monitor) } @@ -90,6 +95,10 @@ class Boss extends EventEmitter { this.maintaining = true + if (this.config.__test__delay_maintenance) { + await delay(this.config.__test__delay_maintenance) + } + if (this.config.__test__throw_maint) { throw new Error(this.config.__test__throw_maint) } diff --git a/src/index.js b/src/index.js index cc8f19d3..26a86679 100644 --- a/src/index.js +++ b/src/index.js @@ -72,16 +72,7 @@ class PgBoss extends EventEmitter { } function promoteFunction (obj, func) { - this[func.name] = (...args) => { - const shouldRun = !this.started || !(func.name === 'work' && (this.stopped || this.stoppingOn)) - - if (shouldRun) { - return func.apply(obj, args) - } else { - const state = this.stoppingOn ? 'stopping' : this.stopped ? 'stopped' : !this.started ? 'not started' : 'started' - return Promise.reject(new Error(`pg-boss is ${state}.`)) - } - } + this[func.name] = (...args) => func.apply(obj, args) } function promoteEvent (emitter, event) { @@ -141,52 +132,56 @@ class PgBoss extends EventEmitter { await this.boss.stop() await new Promise((resolve, reject) => { - try { - const shutdown = async () => { - try { - await this.manager.failWip() - - if (this.db.isOurs && this.db.opened && destroy) { - await this.db.close() - } - - this.stopped = true - this.stoppingOn = null - this.started = false - - this.emit(events.stopped) - resolve() - } catch (err) { - this.emit(events.error, err) - reject(err) + const shutdown = async () => { + try { + if (this.config.__test__throw_shutdown) { + throw new Error(this.config.__test__throw_shutdown) } - } - if (!graceful) { - return shutdown() - } + await this.manager.failWip() - if (!wait) { + if (this.db.isOurs && this.db.opened && destroy) { + await this.db.close() + } + + this.stopped = true + this.stoppingOn = null + this.started = false + + this.emit(events.stopped) resolve() + } catch (err) { + this.emit(events.error, err) + reject(err) } + } - const isWip = () => this.manager.getWipData({ includeInternal: false }).length > 0 + if (!graceful) { + return shutdown() + } - setImmediate(async () => { - try { - while ((Date.now() - this.stoppingOn) < timeout && isWip()) { - await delay(500) - } + if (!wait) { + resolve() + } - await shutdown() - } catch (err) { - this.emit(events.error, err) - reject(err) + setImmediate(async () => { + try { + if (this.config.__test__throw_stop_monitor) { + throw new Error(this.config.__test__throw_stop_monitor) } - }) - } catch (err) { - reject(err) - } + + const isWip = () => this.manager.getWipData({ includeInternal: false }).length > 0 + + while ((Date.now() - this.stoppingOn) < timeout && isWip()) { + await delay(500) + } + + await shutdown() + } catch (err) { + reject(err) + this.emit(events.error, err) + } + }) }) } } diff --git a/src/manager.js b/src/manager.js index 096d542b..d5fa63b2 100644 --- a/src/manager.js +++ b/src/manager.js @@ -96,11 +96,11 @@ class Manager extends EventEmitter { } start () { - this.stopping = false + this.stopped = false } async stop () { - this.stopping = true + this.stopped = true for (const worker of this.workers.values()) { if (!INTERNAL_QUEUES[worker.name]) { @@ -175,8 +175,8 @@ class Manager extends EventEmitter { } async watch (name, options, callback) { - if (this.stopping) { - throw new Error('Workers are disabled. pg-boss is stopping.') + if (this.stopped) { + throw new Error('Workers are disabled. pg-boss is stopped') } const { diff --git a/test/archiveTest.js b/test/archiveTest.js index 85137974..ee5c00ff 100644 --- a/test/archiveTest.js +++ b/test/archiveTest.js @@ -5,7 +5,8 @@ const { states } = require('../src/plans') describe('archive', function () { const defaults = { - archiveCompletedAfterSeconds: 1 + archiveCompletedAfterSeconds: 1, + supervise: true } it('should archive a completed job', async function () { @@ -42,6 +43,8 @@ describe('archive', function () { await boss.complete(jobId) + await delay(1000) + await boss.maintain() const archivedJob = await boss.getJobById(jobId) @@ -58,6 +61,7 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -74,6 +78,7 @@ describe('archive', function () { const jobId = await boss.send(queue) await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -93,6 +98,7 @@ describe('archive', function () { await boss.fail(jobId, failPayload) await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -111,6 +117,7 @@ describe('archive', function () { await boss.fail(jobId, failPayload) await delay(1000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 56a652c5..fee81123 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -27,6 +27,47 @@ describe('background processing error handling', function () { assert.strictEqual(errorCount, 1) }) + it('slow maintenance will back off loop interval', async function () { + const config = { + ...this.test.bossConfig, + maintenanceIntervalSeconds: 1, + supervise: true, + __test__delay_maintenance: 4000 + } + + const boss = this.test.boss = new PgBoss(config) + + let eventCount = 0 + + boss.on('maintenance', () => eventCount++) + + await boss.start() + + await delay(7000) + + assert.strictEqual(eventCount, 1) + }) + + it('slow monitoring will back off loop interval', async function () { + const config = { + ...this.test.bossConfig, + monitorStateIntervalSeconds: 1, + __test__delay_monitor: 4000 + } + + const boss = this.test.boss = new PgBoss(config) + + let eventCount = 0 + + boss.on('monitor-states', () => eventCount++) + + await boss.start() + + await delay(7000) + + assert.strictEqual(eventCount, 1) + }) + it('state monitoring error handling works', async function () { const defaults = { monitorStateIntervalSeconds: 1, @@ -50,4 +91,46 @@ describe('background processing error handling', function () { assert.strictEqual(errorCount, 1) }) + + it('shutdown monitoring error handling works', async function () { + const config = { + ...this.test.bossConfig, + __test__throw_shutdown: 'shutdown error' + } + + const boss = this.test.boss = new PgBoss(config) + + let errorCount = 0 + + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__throw_shutdown) + errorCount++ + }) + + await boss.start() + + await boss.stop() + + await delay(1000) + + assert.strictEqual(errorCount, 1) + }) + + it('shutdown error handling works', async function () { + const config = { + ...this.test.bossConfig, + __test__throw_stop_monitor: 'monitor error' + } + + const boss = this.test.boss = new PgBoss(config) + + await boss.start() + + try { + await boss.stop() + assert(false) + } catch (err) { + assert(true) + } + }) }) diff --git a/test/queueTest.js b/test/queueTest.js index 9bbad320..9b2caa1b 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -96,6 +96,12 @@ describe('queues', function () { await boss.purgeQueue(queue) }) + it('getQueue() returns null when missing', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = await boss.getQueue(this.test.bossConfig.schema) + assert.strictEqual(queue, null) + }) + it('should update queue properties', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema diff --git a/test/workTest.js b/test/workTest.js index bb45e474..1a998310 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -419,10 +419,10 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - await boss.stop({ timeout: 1, wait: true }) + await boss.stop({ wait: true }) try { - await boss.work(queue) + await boss.work(queue, () => {}) assert(false) } catch (err) { assert(true) @@ -433,7 +433,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1, wait: true }) + boss.stop({ wait: true }) await boss.send(queue) }) From b79008d943c226f70c26a22a82c91691814724f5 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Tue, 5 Sep 2023 22:02:45 -0500 Subject: [PATCH 18/36] allow non-migration use cases --- src/contractor.js | 14 +++++++++++++ src/index.js | 2 ++ test/migrationTest.js | 46 +++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 60 insertions(+), 2 deletions(-) diff --git a/src/contractor.js b/src/contractor.js index a77bad9b..7c2dd242 100644 --- a/src/contractor.js +++ b/src/contractor.js @@ -47,6 +47,20 @@ class Contractor { } } + async check () { + const installed = await this.isInstalled() + + if (!installed) { + throw new Error('pg-boss is not installed') + } + + const version = await this.version() + + if (schemaVersion !== version) { + throw new Error('pg-boss database requires migrations') + } + } + async create () { try { const commands = plans.create(this.config.schema, schemaVersion) diff --git a/src/index.js b/src/index.js index 26a86679..e17125b2 100644 --- a/src/index.js +++ b/src/index.js @@ -93,6 +93,8 @@ class PgBoss extends EventEmitter { if (this.config.migrate) { await this.contractor.start() + } else { + await this.contractor.check() } this.manager.start() diff --git a/test/migrationTest.js b/test/migrationTest.js index 759c63eb..2a829044 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -157,7 +157,49 @@ describe('migration', function () { await boss2.stop({ graceful: false }) }) - it.skip('should not migrate if migrations option is false', async function () { - assert(false) + it('should not install if migrate option is false', async function () { + const config = { ...this.test.bossConfig, migrate: false } + const boss = this.test.boss = new PgBoss(config) + try { + await boss.start() + assert(false) + } catch (err) { + assert(true) + } + }) + it('should not migrate if migrate option is false', async function () { + await contractor.create() + + await contractor.rollback(currentSchemaVersion) + + const config = { ...this.test.bossConfig, migrate: false } + const boss = this.test.boss = new PgBoss(config) + + try { + await boss.start() + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should still work if migrate option is false', async function () { + await contractor.create() + + const config = { ...this.test.bossConfig, migrate: false } + const queue = this.test.bossConfig.schema + + const boss = this.test.boss = new PgBoss(config) + + try { + await boss.start() + await boss.send(queue) + const job = await boss.fetch(queue) + await boss.complete(job.id) + + assert(false) + } catch (err) { + assert(true) + } }) }) From ac23fa4a0acaa165bfc28469d8d14df4801909be Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Wed, 6 Sep 2023 17:35:24 -0500 Subject: [PATCH 19/36] config inheritance wip --- src/attorney.js | 43 +++++++++++--------------------- src/manager.js | 44 ++++++++++++++++++++------------- src/plans.js | 63 +++++++++++++++++++++++------------------------ test/queueTest.js | 2 +- 4 files changed, 73 insertions(+), 79 deletions(-) diff --git a/src/attorney.js b/src/attorney.js index 4300970b..3bd04e69 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -223,7 +223,7 @@ function applyArchiveFailedConfig (config) { } } -function applyRetentionConfig (config, defaults) { +function applyRetentionConfig (config, defaults = {}) { assert(!('retentionSeconds' in config) || config.retentionSeconds >= 1, 'configuration assert: retentionSeconds must be at least every second') @@ -244,14 +244,13 @@ function applyRetentionConfig (config, defaults) { ? `${config.retentionMinutes} minutes` : ('retentionSeconds' in config) ? `${config.retentionSeconds} seconds` - : defaults - ? defaults.keepUntil - : '14 days' + : null config.keepUntil = keepUntil + config.keepUntilDefault = defaults?.keepUntil } -function applyExpirationConfig (config, defaults) { +function applyExpirationConfig (config, defaults = {}) { assert(!('expireInSeconds' in config) || config.expireInSeconds >= 1, 'configuration assert: expireInSeconds must be at least every second') @@ -267,13 +266,12 @@ function applyExpirationConfig (config, defaults) { ? config.expireInMinutes * 60 : ('expireInSeconds' in config) ? config.expireInSeconds - : defaults && defaults.expireIn - ? defaults.expireIn - : 15 * 60 + : null - assert(expireIn / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: expiration cannot exceed ${MAX_INTERVAL_HOURS} hours`) + assert(!expireIn || expireIn / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: expiration cannot exceed ${MAX_INTERVAL_HOURS} hours`) config.expireIn = expireIn + config.expireInDefault = defaults?.expireIn } function applyRetryConfig (config, defaults) { @@ -281,36 +279,23 @@ function applyRetryConfig (config, defaults) { assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 0), 'retryLimit must be an integer >= 0') assert(!('retryBackoff' in config) || (config.retryBackoff === true || config.retryBackoff === false), 'retryBackoff must be either true or false') - if (defaults) { - config.retryDelay = ('retryDelay' in config) ? config.retryDelay : defaults.retryDelay - config.retryLimit = ('retryLimit' in config) ? config.retryLimit : defaults.retryLimit - config.retryBackoff = ('retryBackoff' in config) ? config.retryBackoff : defaults.retryBackoff - } - - config.retryDelay = config.retryDelay || 0 - config.retryLimit = Number.isInteger(config.retryLimit) ? config.retryLimit : 2 - - config.retryBackoff = !!config.retryBackoff - config.retryDelay = (config.retryBackoff && !config.retryDelay) ? 1 : config.retryDelay - config.retryLimit = (config.retryDelay && !config.retryLimit) ? 1 : config.retryLimit + config.retryDelayDefault = defaults?.retryDelay + config.retryLimitDefault = defaults?.retryLimit + config.retryBackoffDefault = defaults?.retryBackoff } function applyNewJobCheckInterval (config, defaults) { - const second = 1000 - - assert(!('newJobCheckInterval' in config) || config.newJobCheckInterval >= 100, - 'configuration assert: newJobCheckInterval must be at least every 100ms') + assert(!('newJobCheckInterval' in config) || config.newJobCheckInterval >= 500, + 'configuration assert: newJobCheckInterval must be at least every 500ms') assert(!('newJobCheckIntervalSeconds' in config) || config.newJobCheckIntervalSeconds >= 1, 'configuration assert: newJobCheckIntervalSeconds must be at least every second') config.newJobCheckInterval = ('newJobCheckIntervalSeconds' in config) - ? config.newJobCheckIntervalSeconds * second + ? config.newJobCheckIntervalSeconds * 1000 : ('newJobCheckInterval' in config) ? config.newJobCheckInterval - : defaults - ? defaults.newJobCheckInterval - : second * 2 + : defaults?.newJobCheckInterval || 2000 } function applyMaintenanceConfig (config) { diff --git a/src/manager.js b/src/manager.js index d5fa63b2..ed0642cd 100644 --- a/src/manager.js +++ b/src/manager.js @@ -366,37 +366,47 @@ class Manager extends EventEmitter { async createJob (name, data, options, singletonOffset = 0) { const { + id = null, db: wrapper, - expireIn, priority, startAfter, - keepUntil, singletonKey = null, singletonSeconds, - retryBackoff, + deadLetter = null, + expireIn, + expireInDefault, + keepUntil, + keepUntilDefault, retryLimit, + retryLimitDefault, retryDelay, - deadLetter = null + retryDelayDefault, + retryBackoff, + retryBackoffDefault } = options - const id = uuid[this.config.uuid]() - const values = [ id, // 1 name, // 2 - priority, // 3 - retryLimit, // 4 + data, // 3 + priority, // 4 startAfter, // 5 - expireIn, // 6 - data, // 7 - singletonKey, // 8 - singletonSeconds, // 9 - singletonOffset, // 10 - retryDelay, // 11 - retryBackoff, // 12 - keepUntil, // 13 - deadLetter // 14 + singletonKey, // 6 + singletonSeconds, // 7 + singletonOffset, // 8 + deadLetter, // 9 + expireIn, // 10 + expireInDefault, // 11 + keepUntil, // 12 + keepUntilDefault, // 13 + retryLimit, // 14 + retryLimitDefault, // 15 + retryDelay, // 16 + retryDelayDefault, // 17 + retryBackoff, // 18 + retryBackoffDefault // 19 ] + const db = wrapper || this.db const result = await db.executeSql(this.insertJobCommand, values) diff --git a/src/plans.js b/src/plans.js index d7257cdf..29670079 100644 --- a/src/plans.js +++ b/src/plans.js @@ -542,16 +542,15 @@ function insertJob (schema) { name, data, priority, - state, startAfter, singletonKey, singletonOn, + deadletter, expireIn, + keepUntil, retryLimit, retryDelay, retryBackoff, - deadletter, - keepUntil, policy ) SELECT @@ -559,44 +558,44 @@ function insertJob (schema) { j.name, data, priority, - state, startAfter, singletonKey, singletonOn, - CASE WHEN expireIn IS NOT NULL THEN expireIn + COALESCE(deadLetter, q.dead_letter), + CASE + WHEN expireIn IS NOT NULL THEN CAST(expireIn as interval) WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' ELSE interval '15 minutes' - END, + END as expireIn, + CASE + WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) + ELSE startAfter + CAST(COALESCE(keepUntilValue, (q.retention_minutes * 60)::text, '14 days') as interval) + END as keepUntil, COALESCE(retryLimit, q.retry_limit, 2), COALESCE(retryDelay, q.retry_delay, 0), COALESCE(retryBackoff, q.retry_backoff, false), - COALESCE(deadLetter, q.dead_letter), - CASE WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) - ELSE startAfter + CAST(COALESCE(keepUntilValue, (COALESCE(q.retention_minutes, 0) * 60)::text, '0') as interval) - END as keepUntil, q.policy FROM ( SELECT - $1::uuid as id, - $2::text as name, - $3::int as priority, - '${states.created}'::${schema}.job_state as state, - $4::int as retryLimit, + COALESCE($1::uuid, gen_random_uuid()) as id, + $2 as name, + $3::jsonb as data, + COALESCE($4::int, 0) as priority, CASE - WHEN right($5::text, 1) = 'Z' THEN CAST($5::text as timestamp with time zone) - ELSE now() + CAST(COALESCE($5::text,'0') as interval) + WHEN right($5, 1) = 'Z' THEN CAST($5 as timestamp with time zone) + ELSE now() + CAST(COALESCE($5,'0') as interval) END as startAfter, - CAST($6 as interval) as expireIn, - $7::jsonb as data, - $8::text as singletonKey, + $6 as singletonKey, CASE - WHEN $9::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($9 * floor((date_part('epoch', now()) + $10) / $9)) + WHEN $7::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($7 * floor((date_part('epoch', now()) + $8) / $7)) ELSE NULL END as singletonOn, - $11::int as retryDelay, - $12::bool as retryBackoff, - $13::text as keepUntilValue, - $14::text as deadletter + $9 as deadletter, + COALESCE($10,$11) as expireIn, + COALESCE($12,$13) as keepUntilValue, + COALESCE($14::int,$15::int) as retryLimit, + COALESCE($16::int,$17::int) as retryDelay, + COALESCE($18::bool,$19::bool) as retryBackoff ) j LEFT JOIN ${schema}.queue q ON j.name = q.name ON CONFLICT DO NOTHING RETURNING id @@ -612,12 +611,12 @@ function insertJobs (schema) { priority, startAfter, singletonKey, + deadletter, expireIn, + keepUntil, retryLimit, retryDelay, retryBackoff, - deadletter, - keepUntil, policy ) SELECT @@ -627,26 +626,26 @@ function insertJobs (schema) { COALESCE(priority, 0), COALESCE("startAfter", now()), "singletonKey", - COALESCE("expireInSeconds", q.expire_seconds, 15 * 60) * interval '1s', - COALESCE("retryLimit", q.retry_limit, 2), - COALESCE("retryDelay", q.retry_delay, 0), - COALESCE("retryBackoff", q.retry_backoff, false), COALESCE("deadLetter", q.dead_letter), + COALESCE("expireInSeconds", q.expire_seconds, 15 * 60) * interval '1s', CASE WHEN "keepUntil" IS NOT NULL THEN "keepUntil" WHEN q.retention_minutes IS NOT NULL THEN now() + q.retention_minutes * interval '1 minute' ELSE now() + interval '14 days' END, + COALESCE("retryLimit", q.retry_limit, 2), + COALESCE("retryDelay", q.retry_delay, 0), + COALESCE("retryBackoff", q.retry_backoff, false), q.policy FROM json_to_recordset($1) as j ( id uuid, name text, priority integer, data jsonb, + "startAfter" timestamp with time zone, "retryLimit" integer, "retryDelay" integer, "retryBackoff" boolean, - "startAfter" timestamp with time zone, "singletonKey" text, "expireInSeconds" integer, "keepUntil" timestamp with time zone, diff --git a/test/queueTest.js b/test/queueTest.js index 9b2caa1b..f2ae0ca5 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -147,7 +147,7 @@ describe('queues', function () { assert.strictEqual(updateProps.deadLetter, queueObj.deadLetter) }) - it.skip('jobs should inherit properties from queue', async function () { + it('jobs should inherit properties from queue', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema From 09e8ce887dbc86cada1b3f7a2862e515ae050614 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Thu, 7 Sep 2023 23:56:02 -0500 Subject: [PATCH 20/36] drop uuid and partition migration --- docs/readme.md | 4 ---- package-lock.json | 15 +++------------ package.json | 3 +-- src/attorney.js | 8 +------- src/manager.js | 4 ++-- src/migrationStore.js | 40 +++++++++++++++++++++++++++++++++++++++- src/plans.js | 41 ++++++++++++++++++++++++----------------- test/config.json | 1 - test/insertTest.js | 6 +++--- test/opsTest.js | 4 ++-- types.d.ts | 1 - 11 files changed, 75 insertions(+), 52 deletions(-) diff --git a/docs/readme.md b/docs/readme.md index 30c8ffa6..8ec8b87d 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -321,10 +321,6 @@ The following options can be set as properties in an object for additional confi Queue options contain the following constructor-only settings. -* **uuid** - string, defaults to "v4" - - job uuid format used, "v1" or "v4" - * **archiveCompletedAfterSeconds** Specifies how long in seconds completed jobs get archived. Note: a warning will be emitted if set to lower than 60s and cron processing will be disabled. diff --git a/package-lock.json b/package-lock.json index 878a73fd..c2b776c3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "license": "MIT", "dependencies": { "cron-parser": "^4.0.0", @@ -14,8 +14,7 @@ "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", - "serialize-error": "^8.1.0", - "uuid": "^9.0.0" + "serialize-error": "^8.1.0" }, "devDependencies": { "@types/node": "^20.3.3", @@ -5014,14 +5013,6 @@ "punycode": "^2.1.0" } }, - "node_modules/uuid": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", - "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==", - "bin": { - "uuid": "dist/bin/uuid" - } - }, "node_modules/version-guard": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/version-guard/-/version-guard-1.1.1.tgz", diff --git a/package.json b/package.json index 4ab5e08d..6ee6e6d4 100644 --- a/package.json +++ b/package.json @@ -12,8 +12,7 @@ "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", - "serialize-error": "^8.1.0", - "uuid": "^9.0.0" + "serialize-error": "^8.1.0" }, "devDependencies": { "@types/node": "^20.3.3", diff --git a/src/attorney.js b/src/attorney.js index 3bd04e69..5b56d51b 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -172,7 +172,6 @@ function getConfig (value) { applyArchiveFailedConfig(config) applyDeleteConfig(config) applyMonitoringConfig(config) - applyUuidConfig(config) applyNewJobCheckInterval(config) applyExpirationConfig(config) @@ -314,7 +313,7 @@ function applyMaintenanceConfig (config) { assert(config.maintenanceIntervalSeconds / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: maintenance interval cannot exceed ${MAX_INTERVAL_HOURS} hours`) config.schedule = ('schedule' in config) ? config.schedule : true - config.maintenance = ('maintenance' in config) ? config.maintenance : true + config.supervise = ('supervise' in config) ? config.supervise : true config.migrate = ('migrate' in config) ? config.migrate : true } @@ -394,11 +393,6 @@ function applyMonitoringConfig (config) { : 4 } -function applyUuidConfig (config) { - assert(!('uuid' in config) || config.uuid === 'v1' || config.uuid === 'v4', 'configuration assert: uuid option only supports v1 or v4') - config.uuid = config.uuid || 'v4' -} - function warnClockSkew (message) { emitWarning(WARNINGS.CLOCK_SKEW, message, { force: true }) } diff --git a/src/manager.js b/src/manager.js index ed0642cd..463fd703 100644 --- a/src/manager.js +++ b/src/manager.js @@ -1,7 +1,7 @@ const assert = require('assert') const EventEmitter = require('events') +const { randomUUID } = require('crypto') const delay = require('delay') -const uuid = require('uuid') const debounce = require('lodash.debounce') const { serializeError: stringify } = require('serialize-error') const pMap = require('p-map') @@ -188,7 +188,7 @@ class Manager extends EventEmitter { includeMetadata = false } = options - const id = uuid.v4() + const id = randomUUID({ disableEntropyCache: true }) let queueSize = 0 diff --git a/src/migrationStore.js b/src/migrationStore.js index 5364c0cf..0f2f7aff 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -74,7 +74,11 @@ function getAll (schema) { `DROP INDEX ${schema}.job_singletonOn`, `DROP INDEX ${schema}.job_singletonKeyOn`, `DROP INDEX ${schema}.job_fetch`, + + `ALTER TABLE ${schema}.job ADD COLUMN deadletter text`, `ALTER TABLE ${schema}.job ADD COLUMN policy text`, + + // update state enum `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, @@ -84,6 +88,36 @@ function getAll (schema) { `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','cancelled','failed')`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + + // set up job partitioning + `ALTER TABLE ${schema}.job RENAME TO job_default`, + + `CREATE TABLE ${schema}.job ( + id uuid not null default gen_random_uuid(), + name text not null, + priority integer not null default(0), + data jsonb, + state ${schema}.job_state not null default('created'), + retryLimit integer not null default(0), + retryCount integer not null default(0), + retryDelay integer not null default(0), + retryBackoff boolean not null default false, + startAfter timestamp with time zone not null default now(), + startedOn timestamp with time zone, + singletonKey text, + singletonOn timestamp without time zone, + expireIn interval not null default interval '15 minutes', + createdOn timestamp with time zone not null default now(), + completedOn timestamp with time zone, + keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', + output jsonb, + deadletter text, + policy text, + CONSTRAINT job_pkey PRIMARY KEY (name, id) + ) PARTITION BY RANGE (name)`, + + `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT`, + `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)`, `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)`, `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, @@ -109,15 +143,19 @@ function getAll (schema) { )` ], uninstall: [ - `DROP TABLE IF EXISTS ${schema}.archive_backup`, `DROP INDEX ${schema}.job_policy_stately`, `DROP INDEX ${schema}.job_policy_short`, `DROP INDEX ${schema}.job_policy_singleton`, `DROP INDEX ${schema}.job_throttle_on`, `DROP INDEX ${schema}.job_throttle_key`, `DROP INDEX ${schema}.job_fetch`, + `ALTER TABLE ${schema}.job DETACH PARTITION ${schema}.job_default`, + `DROP TABLE ${schema}.job`, + `ALTER TABLE ${schema}.job_default RENAME TO job`, + `DROP TABLE IF EXISTS ${schema}.archive_backup`, `DROP INDEX ${schema}.archive_archivedon_idx`, `DROP INDEX ${schema}.archive_name_idx`, + `ALTER TABLE ${schema}.job DROP COLUMN deadletter`, `ALTER TABLE ${schema}.job DROP COLUMN policy`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, diff --git a/src/plans.js b/src/plans.js index 29670079..fed09167 100644 --- a/src/plans.js +++ b/src/plans.js @@ -78,8 +78,8 @@ function create (schema, version) { createEnumJobState(schema), createTableJob(schema), - createTablePartitionJobDefault(schema), - createPrimaryKeyJob(schema), + createTableJobDefault(schema), + attachPartitionJobDefault(schema), createIndexJobName(schema), createIndexJobFetch(schema), createIndexJobPolicyStately(schema), @@ -160,17 +160,18 @@ function createTableJob (schema) { keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', output jsonb, deadletter text, - policy text + policy text, + CONSTRAINT job_pkey PRIMARY KEY (name, id) ) PARTITION BY RANGE (name) ` } -function createTablePartitionJobDefault (schema) { - return `CREATE TABLE ${schema}.job_default PARTITION OF ${schema}.job DEFAULT` +function createTableJobDefault (schema) { + return `CREATE TABLE ${schema}.job_default (LIKE ${schema}.job INCLUDING DEFAULTS INCLUDING CONSTRAINTS)` } -function createPrimaryKeyJob (schema) { - return `ALTER TABLE ${schema}.job ADD CONSTRAINT job_pkey PRIMARY KEY (name, id)` +function attachPartitionJobDefault (schema) { + return `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT` } function createPrimaryKeyArchive (schema) { @@ -565,15 +566,16 @@ function insertJob (schema) { CASE WHEN expireIn IS NOT NULL THEN CAST(expireIn as interval) WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' + WHEN expireInDefault IS NOT NULL THEN CAST(expireInDefault as interval) ELSE interval '15 minutes' END as expireIn, CASE - WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) - ELSE startAfter + CAST(COALESCE(keepUntilValue, (q.retention_minutes * 60)::text, '14 days') as interval) + WHEN right(keepUntil, 1) = 'Z' THEN CAST(keepUntil as timestamp with time zone) + ELSE startAfter + CAST(COALESCE(keepUntil, (q.retention_minutes * 60)::text, keepUntilDefault, '14 days') as interval) END as keepUntil, - COALESCE(retryLimit, q.retry_limit, 2), - COALESCE(retryDelay, q.retry_delay, 0), - COALESCE(retryBackoff, q.retry_backoff, false), + COALESCE(retryLimit, q.retry_limit, retryLimitDefault, 2), + COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0), + COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false), q.policy FROM ( SELECT @@ -591,11 +593,16 @@ function insertJob (schema) { ELSE NULL END as singletonOn, $9 as deadletter, - COALESCE($10,$11) as expireIn, - COALESCE($12,$13) as keepUntilValue, - COALESCE($14::int,$15::int) as retryLimit, - COALESCE($16::int,$17::int) as retryDelay, - COALESCE($18::bool,$19::bool) as retryBackoff + $10 as expireIn, + $11 as expireInDefault, + $12 as keepUntil, + $13 as keepUntilDefault, + $14::int as retryLimit, + $15::int as retryLimitDefault, + $16::int as retryDelay, + $17::int as retryDelayDefault, + $18::bool as retryBackoff, + $19::bool as retryBackoffDefault ) j LEFT JOIN ${schema}.queue q ON j.name = q.name ON CONFLICT DO NOTHING RETURNING id diff --git a/test/config.json b/test/config.json index 7a99f9cd..b3a0a710 100644 --- a/test/config.json +++ b/test/config.json @@ -4,6 +4,5 @@ "database": "pgboss", "user": "postgres", "password": "postgres", - "uuid": "v4", "max": 3 } diff --git a/test/insertTest.js b/test/insertTest.js index c3018e16..9be7e7e9 100644 --- a/test/insertTest.js +++ b/test/insertTest.js @@ -1,5 +1,5 @@ const assert = require('assert') -const { v4: uuid } = require('uuid') +const { randomUUID } = require('crypto') const helper = require('./testHelper') describe('insert', function () { @@ -21,7 +21,7 @@ describe('insert', function () { const queue = this.test.bossConfig.schema const input = { - id: uuid(), + id: randomUUID(), name: queue, priority: 1, data: { some: 'data' }, @@ -58,7 +58,7 @@ describe('insert', function () { const queue = this.test.bossConfig.schema const input = { - id: uuid(), + id: randomUUID(), name: queue, priority: 1, data: { some: 'data' }, diff --git a/test/opsTest.js b/test/opsTest.js index 760ff844..8d56423d 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const { v4: uuid } = require('uuid') +const { randomUUID } = require('crypto') describe('ops', function () { it('should expire manually', async function () { @@ -31,7 +31,7 @@ describe('ops', function () { it('should return null from getJobById if not found', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - const jobId = await boss.getJobById(uuid()) + const jobId = await boss.getJobById(randomUUID()) assert.strictEqual(jobId, null) }) diff --git a/types.d.ts b/types.d.ts index e8e0cbd4..f8948a58 100644 --- a/types.d.ts +++ b/types.d.ts @@ -20,7 +20,6 @@ declare namespace PgBoss { } interface QueueOptions { - uuid?: "v1" | "v4"; monitorStateIntervalSeconds?: number; monitorStateIntervalMinutes?: number; } From a6a6ad6b7439070c7ea8d4dd1256158b729d2aa6 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 8 Sep 2023 00:06:10 -0500 Subject: [PATCH 21/36] queue cascade config --- src/plans.js | 2 +- test/queueTest.js | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/plans.js b/src/plans.js index fed09167..f258b887 100644 --- a/src/plans.js +++ b/src/plans.js @@ -730,5 +730,5 @@ function getArchivedJobById (schema) { } function getJobByTableAndId (schema, table) { - return `SELECT * From ${schema}.${table} WHERE id = $1` + return `SELECT * FROM ${schema}.${table} WHERE id = $1` } diff --git a/test/queueTest.js b/test/queueTest.js index f2ae0ca5..fcf440ec 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -166,11 +166,13 @@ describe('queues', function () { const job = await boss.getJobById(jobId) + const retentionMinutes = (new Date(job.keepuntil) - new Date(job.createdon)) / 1000 / 60 + assert.strictEqual(createProps.retryLimit, job.retrylimit) assert.strictEqual(createProps.retryBackoff, job.retrybackoff) assert.strictEqual(createProps.retryDelay, job.retrydelay) - assert.strictEqual(createProps.expireInSeconds, job.expireIn) - assert.strictEqual(createProps.retentionMinutes, job.retentionMinutes) + assert.strictEqual(createProps.expireInSeconds, job.expirein.seconds) + assert.strictEqual(createProps.retentionMinutes, retentionMinutes) assert.strictEqual(createProps.deadLetter, job.deadletter) }) From c69b56de9bcd2772f8415de916c8e8561af654a6 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 8 Sep 2023 17:28:44 -0500 Subject: [PATCH 22/36] wip --- releasenotesv10.md | 63 +++++++++++++++---------------------- src/boss.js | 18 +++++------ src/db.js | 18 +++++------ src/manager.js | 4 +-- src/plans.js | 33 ++++++++++++------- test/backgroundErrorTest.js | 8 ++--- test/migrationTest.js | 8 ++--- test/monitoringTest.js | 20 +++++------- test/retryTest.js | 15 --------- test/speedTest.js | 24 ++++++-------- test/workTest.js | 62 ++++++++++++++++++------------------ types.d.ts | 4 ++- 12 files changed, 125 insertions(+), 152 deletions(-) diff --git a/releasenotesv10.md b/releasenotesv10.md index e9dd1d15..3372fe31 100644 --- a/releasenotesv10.md +++ b/releasenotesv10.md @@ -1,48 +1,35 @@ -1. Replace index semantics for throttling and singleton - -```sql - -- anything with singletonKey means "only 1 job can be queued or active at a time" - -- this doesn't seem very useful, since you lose the ability to queue a job that needs to be run later NUKE - CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) - WHERE state < '${states.completed}' - AND singletonOn IS NULL - AND NOT singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - - -- "singleton queue" means "only 1 job can be queued at a time" - -- this seems more like what people want when they think "one job at a time" - CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) - WHERE state < '${states.active}' - AND singletonOn IS NULL - AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - - -- anything with singletonOn means "only 1 job within this time period, queued, active or completed" - -- Keeping completed jobs and preventing queueing a new one until after the maintenance runs? Doesn't seem very helpful - -- this is only for job creation throttling, so we probably need to keep it - CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) - WHERE state < '${states.expired}' - AND singletonKey IS NULL - - -- anything with both singletonOn and singletonKey means "only 1 job within this time period with this key, queued, active or completed" - -- Same as previous, but scoped to a filter key - CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) - WHERE state < '${states.expired}' +## v10 -``` - -2. Should we implement message group ids like SQS? This would require a new tracking table for in-flight groups and opt-in filtering - -3. consolidate failed states: expired => failed + +* Create explicit queues. Each queue is partitioned into dedicated storage -4. Introduce dead letter queue config +* Introduce dead letter queue config * Removes completion jobs and onComplete config * Allows retries in dlq, since they become just like any other queue -5. Add primary key to archive +* Add primary key to archive * allows replication of database for read-replica and/or HA use cases * Existing archive table will be renamed to archive_backup and kept until the next release of pgboss -6. Allow instances to connect without trying to migrate to latest version (instances that should be able to process jobs, but not have access to schema changes or upgrades) +* Allow instances to connect without trying to migrate to latest version (instances that should be able to process jobs, but not have access to schema changes or upgrades) + +New constructor option +```js + migrate: false +``` +* Update existing constructor options for maintenance and scheduling: +```js + supervise: true, + schedule: true +``` +* consolidate failed states: expired => failed + +* Add manual maintenance API for one-off upgrade API without processing queues +```js + await boss.maintain() +``` -7. Add peek API for running TOP N queries against job tables + ## TODO -8. Add manual maintenance API for one-off upgrade API without processing queues +* Add peek API for running TOP N queries against job tables +* cascade configuration for boss.insert([jobs]) \ No newline at end of file diff --git a/src/boss.js b/src/boss.js index c1ce0395..95f90116 100644 --- a/src/boss.js +++ b/src/boss.js @@ -95,14 +95,18 @@ class Boss extends EventEmitter { this.maintaining = true - if (this.config.__test__delay_maintenance) { - await delay(this.config.__test__delay_maintenance) + if (this.config.__test__delay_maintenance && !this.stopped) { + this.__testDelayPromise = await delay(this.config.__test__delay_maintenance) } if (this.config.__test__throw_maint) { throw new Error(this.config.__test__throw_maint) } + if (this.stopped) { + return + } + locker = await this.db.lock({ key: 'maintenance' }) const { secondsAgo } = await this.getMaintenanceTime() @@ -138,13 +142,9 @@ class Boss extends EventEmitter { async stop () { if (!this.stopped) { - if (this.maintenanceInterval) { - clearInterval(this.maintenanceInterval) - } - - if (this.monitorInterval) { - clearInterval(this.monitorInterval) - } + if (this.__testDelayPromise) this.__testDelayPromise.clear() + if (this.maintenanceInterval) clearInterval(this.maintenanceInterval) + if (this.monitorInterval) clearInterval(this.monitorInterval) this.stopped = true } diff --git a/src/db.js b/src/db.js index f86e9485..1139adce 100644 --- a/src/db.js +++ b/src/db.js @@ -26,15 +26,15 @@ class Db extends EventEmitter { async executeSql (text, values) { if (this.opened) { - // if (this.config.debug === true) { - // console.log(`${new Date().toISOString()}: DEBUG SQL`) - // console.log(text) + if (this.config.debug === true) { + console.log(`${new Date().toISOString()}: DEBUG SQL`) + console.log(text) - // if (values) { - // console.log(`${new Date().toISOString()}: DEBUG VALUES`) - // console.log(values) - // } - // } + if (values) { + console.log(`${new Date().toISOString()}: DEBUG VALUES`) + console.log(values) + } + } return await this.pool.query(text, values) } @@ -49,7 +49,7 @@ class Db extends EventEmitter { BEGIN; SET LOCAL lock_timeout = '${timeout}s'; SET LOCAL idle_in_transaction_session_timeout = '3600s'; - ${advisoryLock(key)}; + ${advisoryLock(this.config.schema, key)}; ` await lockedClient.query(query) diff --git a/src/manager.js b/src/manager.js index 463fd703..2b55d516 100644 --- a/src/manager.js +++ b/src/manager.js @@ -566,7 +566,7 @@ class Manager extends EventEmitter { deadLetter } = Attorney.checkQueueArgs(name, options) - const paritionSql = plans.createQueueTablePartition(this.config.schema, name) + const paritionSql = plans.partitionCreateJobName(this.config.schema, name) await this.db.executeSql(paritionSql) @@ -653,7 +653,7 @@ class Manager extends EventEmitter { if (result?.rows?.length) { Attorney.assertPostgresObjectName(name) - const sql = plans.dropQueueTablePartition(this.config.schema, name) + const sql = plans.dropJobTablePartition(this.config.schema, name) await this.db.executeSql(sql) } diff --git a/src/plans.js b/src/plans.js index f258b887..8b393be3 100644 --- a/src/plans.js +++ b/src/plans.js @@ -47,8 +47,8 @@ module.exports = { countStates, createQueue, updateQueue, - createQueueTablePartition, - dropQueueTablePartition, + partitionCreateJobName, + dropJobTablePartition, deleteQueueRecords, getQueueByName, getQueueSize, @@ -162,7 +162,7 @@ function createTableJob (schema) { deadletter text, policy text, CONSTRAINT job_pkey PRIMARY KEY (name, id) - ) PARTITION BY RANGE (name) + ) PARTITION BY LIST (name) ` } @@ -174,6 +174,18 @@ function attachPartitionJobDefault (schema) { return `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT` } +function partitionCreateJobName (schema, name) { + return ` + CREATE TABLE ${schema}.job_${name} (LIKE ${schema}.job INCLUDING DEFAULTS INCLUDING CONSTRAINTS); + ALTER TABLE ${schema}.job_${name} ADD CONSTRAINT job_check_${name} CHECK (name='${name}'); + ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_${name} FOR VALUES IN ('${name}'); + ` +} + +function dropJobTablePartition (schema, name) { + return `DROP TABLE IF EXISTS ${schema}.job_${name}` +} + function createPrimaryKeyArchive (schema) { return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (name, id)` } @@ -271,14 +283,6 @@ function updateQueue (schema) { ` } -function createQueueTablePartition (schema, name) { - return `CREATE TABLE ${schema}.job_${name} PARTITION OF ${schema}.job FOR VALUES FROM ('${name}') TO ('${name}__pgboss__')` -} - -function dropQueueTablePartition (schema, name) { - return `DROP TABLE IF EXISTS ${schema}.job_${name}` -} - function getQueueByName (schema) { return `SELECT * FROM ${schema}.queue WHERE name = $1` } @@ -574,7 +578,11 @@ function insertJob (schema) { ELSE startAfter + CAST(COALESCE(keepUntil, (q.retention_minutes * 60)::text, keepUntilDefault, '14 days') as interval) END as keepUntil, COALESCE(retryLimit, q.retry_limit, retryLimitDefault, 2), - COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0), + CASE + WHEN COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false) + THEN GREATEST(COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0), 1) + ELSE COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0) + END, COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false), q.policy FROM @@ -638,6 +646,7 @@ function insertJobs (schema) { CASE WHEN "keepUntil" IS NOT NULL THEN "keepUntil" WHEN q.retention_minutes IS NOT NULL THEN now() + q.retention_minutes * interval '1 minute' + -- todo - add default fallback ELSE now() + interval '14 days' END, COALESCE("retryLimit", q.retry_limit, 2), diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index fee81123..4a5f30c8 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -32,7 +32,7 @@ describe('background processing error handling', function () { ...this.test.bossConfig, maintenanceIntervalSeconds: 1, supervise: true, - __test__delay_maintenance: 4000 + __test__delay_maintenance: 2000 } const boss = this.test.boss = new PgBoss(config) @@ -43,7 +43,7 @@ describe('background processing error handling', function () { await boss.start() - await delay(7000) + await delay(5000) assert.strictEqual(eventCount, 1) }) @@ -52,7 +52,7 @@ describe('background processing error handling', function () { const config = { ...this.test.bossConfig, monitorStateIntervalSeconds: 1, - __test__delay_monitor: 4000 + __test__delay_monitor: 2000 } const boss = this.test.boss = new PgBoss(config) @@ -63,7 +63,7 @@ describe('background processing error handling', function () { await boss.start() - await delay(7000) + await delay(4000) assert.strictEqual(eventCount, 1) }) diff --git a/test/migrationTest.js b/test/migrationTest.js index 2a829044..9b69a6b1 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -59,10 +59,6 @@ describe('migration', function () { const job = await boss.fetch(queue) await boss.complete(job.id) - // active job - await boss.send(queue) - await boss.fetch(queue) - // created job await boss.send(queue) @@ -85,6 +81,10 @@ describe('migration', function () { const version = await contractor.version() assert.strictEqual(version, currentSchemaVersion) + + await boss.send(queue) + const job2 = await boss.fetch(queue) + await boss.complete(job2.id) }) it('should migrate to latest during start if on previous 2 schema versions', async function () { diff --git a/test/monitoringTest.js b/test/monitoringTest.js index ad7c1e90..9d362e87 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -9,8 +9,7 @@ describe('monitoring', function () { } const boss = this.test.boss = await helper.start(config) - - const queue = 'monitorMe' + const queue = this.test.bossConfig.schema await boss.send(queue) await boss.send(queue) @@ -43,18 +42,13 @@ describe('monitoring', function () { assert.strictEqual(2, states4.queues[queue].active, 'active count is wrong after 3 sendes and 3 fetches and 1 complete') assert.strictEqual(1, states4.queues[queue].completed, 'completed count is wrong after 3 sendes and 3 fetches and 1 complete') - return new Promise((resolve) => { - let resolved = false - - boss.on('monitor-states', async states => { - if (!resolved) { - resolved = true - assert.strictEqual(states4.queues[queue].created, states.queues[queue].created, 'created count from monitor-states doesn\'t match') - assert.strictEqual(states4.queues[queue].active, states.queues[queue].active, 'active count from monitor-states doesn\'t match') - assert.strictEqual(states4.queues[queue].completed, states.queues[queue].completed, 'completed count from monitor-states doesn\'t match') + await new Promise((resolve) => { + boss.once('monitor-states', async states => { + assert.strictEqual(states4.queues[queue].created, states.queues[queue].created, 'created count from monitor-states doesn\'t match') + assert.strictEqual(states4.queues[queue].active, states.queues[queue].active, 'active count from monitor-states doesn\'t match') + assert.strictEqual(states4.queues[queue].completed, states.queues[queue].completed, 'completed count from monitor-states doesn\'t match') - resolve() - } + resolve() }) }) }) diff --git a/test/retryTest.js b/test/retryTest.js index 566e9f23..ad3d8f3b 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -86,19 +86,4 @@ describe('retries', function () { assert(processCount < retryLimit) }) - - it('should set the default retry limit to 1 if missing', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - const queue = this.test.bossConfig.schema - - const jobId = await boss.send(queue, null, { retryDelay: 1, retryLimit: 0 }) - await boss.fetch(queue) - await boss.fail(jobId) - - await delay(1000) - - const job1 = await boss.fetch(queue) - - assert(job1) - }) }) diff --git a/test/speedTest.js b/test/speedTest.js index 282d9790..c7552d9c 100644 --- a/test/speedTest.js +++ b/test/speedTest.js @@ -1,30 +1,24 @@ const helper = require('./testHelper') +const assert = require('assert') describe('speed', function () { - const expectedSeconds = 2 + const expectedSeconds = 4 const jobCount = 10_000 const queue = 'speedTest' - - const jobs = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } })) - + const data = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } })) const testTitle = `should be able to fetch and complete ${jobCount} jobs in ${expectedSeconds} seconds` - let boss - - beforeEach(async function () { - const defaults = { min: 10, max: 10 } - boss = await helper.start({ ...this.currentTest.bossConfig, ...defaults }) - await boss.insert(jobs) - }) - - afterEach(async function () { await helper.stop(boss) }) - it(testTitle, async function () { this.timeout(expectedSeconds * 1000) this.slow(0) - this.retries(1) + const config = { ...this.test.bossConfig, min: 10, max: 10 } + const boss = this.test.boss = await helper.start(config) + await boss.insert(data) const jobs = await boss.fetch(queue, jobCount) + + assert.strictEqual(jobCount, jobs.length) + await boss.complete(jobs.map(job => job.id)) }) }) diff --git a/test/workTest.js b/test/workTest.js index 1a998310..48c3ce4f 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -240,30 +240,34 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - this.timeout(1000) + this.timeout(2000) - const teamSize = 4 - const teamConcurrency = 2 + const jobCount = 6 - let processCount = 0 + let workCount = 0 - for (let i = 0; i < 6; i++) { + for (let i = 0; i < jobCount; i++) { await boss.send(queue) } - const newJobCheckInterval = 100 + const options = { + teamSize: 4, + teamConcurrency: 2, + newJobCheckInterval: 500, + teamRefill: true + } + + await new Promise((resolve) => { + boss.work(queue, options, async () => { + workCount++ - return new Promise((resolve) => { - boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - processCount++ - if (processCount === 1) { - // Test would timeout if all were blocked on this first - // process - await new Promise(resolve => setTimeout(resolve, 500)) + if (workCount === 1) { + // Test would timeout if all were blocked on + await new Promise(resolve => setTimeout(resolve, 1000)) return } - if (processCount === 6) { + if (workCount === jobCount) { resolve() } }) @@ -274,32 +278,30 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - const teamSize = 4 - const teamConcurrency = 2 - const newJobCheckInterval = 200 - let processCount = 0 - let remainCount = 0 + const options = { + teamSize: 4, + teamConcurrency: 2, + newJobCheckInterval: 500, + teamRefill: true + } + + let workCount = 0 for (let i = 0; i < 7; i++) { await boss.send(queue) } // This should consume 5 jobs, all will block after the first job - await boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - processCount++ - if (processCount > 1) await new Promise(resolve => setTimeout(resolve, 1000)) + await boss.work(queue, options, async () => { + workCount++ + if (workCount > 1) await new Promise(resolve => setTimeout(resolve, 2000)) }) - await new Promise(resolve => setTimeout(resolve, 400)) - - // this should pick up the last 2 jobs - await boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - remainCount++ - }) + await new Promise(resolve => setTimeout(resolve, 1000)) - await new Promise(resolve => setTimeout(resolve, 400)) + const remainingJobs = await boss.fetch(queue, 2) - assert(remainCount === 2) + assert.strictEqual(2, remainingJobs.length) }) it('completion should pass string wrapped in value prop', async function () { diff --git a/types.d.ts b/types.d.ts index f8948a58..6ffda71f 100644 --- a/types.d.ts +++ b/types.d.ts @@ -33,6 +33,7 @@ declare namespace PgBoss { interface MaintenanceOptions { supervise?: boolean; + migrate?: boolean; deleteAfterSeconds?: number; deleteAfterMinutes?: number; @@ -190,7 +191,7 @@ declare namespace PgBoss { createdon: Date; completedon: Date | null; keepuntil: Date; - deadletter: boolean, + deadletter: string, output: object } @@ -344,6 +345,7 @@ declare class PgBoss extends EventEmitter { getQueueSize(name: string, options?: object): Promise; getJobById(id: string, options?: PgBoss.ConnectionOptions): Promise; + createQueue(name: string, policy: string): Promise; deleteQueue(name: string): Promise; purgeQueue(name: string): Promise; clearStorage(): Promise; From d904256a2fe534cc0d4bdb1b85bee7df183b47f0 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 8 Sep 2023 23:12:48 -0500 Subject: [PATCH 23/36] wip --- README.md | 6 ++--- src/boss.js | 3 ++- src/db.js | 16 ++++++------ src/manager.js | 13 ++++++++-- src/plans.js | 59 +++++++++++++++++++++++++++----------------- test/priorityTest.js | 14 +++++------ test/queueTest.js | 7 ------ test/testHelper.js | 5 ---- types.d.ts | 2 +- 9 files changed, 68 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index c588b93c..bde97d58 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ This will likely cater the most to teams already familiar with the simplicity of * Backpressure-compatible polling workers * Cron scheduling * Pub/sub API for fan-out queue relationships -* Deferral, retries (with exponential backoff), rate limiting, debouncing +* Priority, deferral, retries (with exponential backoff), rate limiting, debouncing * Direct table access for bulk loads via COPY or INSERT * Multi-master compatible (for example, in a Kubernetes ReplicaSet) * Dead letter queues @@ -50,8 +50,8 @@ This will likely cater the most to teams already familiar with the simplicity of * Automatic maintenance operations to manage table growth ## Requirements -* Node 16 or higher -* PostgreSQL 11 or higher +* Node 18 or higher +* PostgreSQL 12 or higher ## Installation diff --git a/src/boss.js b/src/boss.js index 95f90116..cfd770ee 100644 --- a/src/boss.js +++ b/src/boss.js @@ -96,7 +96,8 @@ class Boss extends EventEmitter { this.maintaining = true if (this.config.__test__delay_maintenance && !this.stopped) { - this.__testDelayPromise = await delay(this.config.__test__delay_maintenance) + this.__testDelayPromise = delay(this.config.__test__delay_maintenance) + await this.__testDelayPromise } if (this.config.__test__throw_maint) { diff --git a/src/db.js b/src/db.js index 1139adce..bc19bfe7 100644 --- a/src/db.js +++ b/src/db.js @@ -26,15 +26,15 @@ class Db extends EventEmitter { async executeSql (text, values) { if (this.opened) { - if (this.config.debug === true) { - console.log(`${new Date().toISOString()}: DEBUG SQL`) - console.log(text) + // if (this.config.debug === true) { + // console.log(`${new Date().toISOString()}: DEBUG SQL`) + // console.log(text) - if (values) { - console.log(`${new Date().toISOString()}: DEBUG VALUES`) - console.log(values) - } - } + // if (values) { + // console.log(`${new Date().toISOString()}: DEBUG VALUES`) + // console.log(values) + // } + // } return await this.pool.query(text, values) } diff --git a/src/manager.js b/src/manager.js index 2b55d516..b4d8d79e 100644 --- a/src/manager.js +++ b/src/manager.js @@ -434,7 +434,16 @@ class Manager extends EventEmitter { const db = options.db || this.db - return await db.executeSql(this.insertJobsCommand, [JSON.stringify(jobs)]) + const params = [ + JSON.stringify(jobs), // 1 + this.config.expireIn, // 2 + this.config.keepUntil, // 3 + this.config.retryLimit, // 4 + this.config.retryDelay, // 5 + this.config.retryBackoff // 6 + ] + + return await db.executeSql(this.insertJobsCommand, params) } getDebounceStartAfter (singletonSeconds, clockOffset) { @@ -458,7 +467,7 @@ class Manager extends EventEmitter { const patternMatch = Attorney.queueNameHasPatternMatch(name) const values = Attorney.checkFetchArgs(name, batchSize, options) const db = options.db || this.db - const nextJobSql = this.nextJobCommand(options.includeMetadata || false, patternMatch) + const nextJobSql = this.nextJobCommand({ ...options, patternMatch }) const statementValues = [values.name, batchSize || 1] let result diff --git a/src/plans.js b/src/plans.js index 8b393be3..3f158483 100644 --- a/src/plans.js +++ b/src/plans.js @@ -16,7 +16,6 @@ const CREATE_RACE_MESSAGE = 'already exists' const QUEUE_POLICY = { standard: 'standard', short: 'short', - priority: 'priority', singleton: 'singleton', stately: 'stately' } @@ -215,7 +214,7 @@ function createIndexJobName (schema) { } function createIndexJobFetch (schema) { - return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < '${states.active}'` + return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn, id) WHERE state < '${states.active}'` } function createTableArchive (schema) { @@ -423,14 +422,14 @@ function insertVersion (schema, version) { } function fetchNextJob (schema) { - return (includeMetadata, patternMatch) => ` - WITH nextJob as ( + return ({ includeMetadata, patternMatch, priority = true } = {}) => ` + WITH next as ( SELECT id - FROM ${schema}.job j + FROM ${schema}.job WHERE state < '${states.active}' AND name ${patternMatch ? 'LIKE' : '='} $1 AND startAfter < now() - ORDER BY priority desc, createdOn, id + ORDER BY ${priority && 'priority desc, '} createdOn, id LIMIT $2 FOR UPDATE SKIP LOCKED ) @@ -438,8 +437,8 @@ function fetchNextJob (schema) { state = '${states.active}', startedOn = now(), retryCount = CASE WHEN startedOn IS NOT NULL THEN retryCount + 1 ELSE retryCount END - FROM nextJob - WHERE j.id = nextJob.id + FROM next + WHERE j.id = next.id RETURNING ${includeMetadata ? 'j.*' : 'j.id, name, data'}, EXTRACT(epoch FROM expireIn) as expire_in_seconds ` @@ -566,7 +565,7 @@ function insertJob (schema) { startAfter, singletonKey, singletonOn, - COALESCE(deadLetter, q.dead_letter), + COALESCE(deadLetter, q.dead_letter) as deadletter, CASE WHEN expireIn IS NOT NULL THEN CAST(expireIn as interval) WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' @@ -577,13 +576,13 @@ function insertJob (schema) { WHEN right(keepUntil, 1) = 'Z' THEN CAST(keepUntil as timestamp with time zone) ELSE startAfter + CAST(COALESCE(keepUntil, (q.retention_minutes * 60)::text, keepUntilDefault, '14 days') as interval) END as keepUntil, - COALESCE(retryLimit, q.retry_limit, retryLimitDefault, 2), + COALESCE(retryLimit, q.retry_limit, retryLimitDefault, 2) as retryLimit, CASE WHEN COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false) - THEN GREATEST(COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0), 1) + THEN GREATEST(COALESCE(retryDelay, q.retry_delay, retryDelayDefault), 1) ELSE COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0) - END, - COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false), + END as retryDelay, + COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false) as retryBackoff, q.policy FROM ( SELECT @@ -619,6 +618,14 @@ function insertJob (schema) { function insertJobs (schema) { return ` + WITH defaults as ( + SELECT + $2 as expireIn, + $3 as keepUntil, + $4::int as retryLimit, + $5::int as retryDelay, + $6::bool as retryBackoff + ) INSERT INTO ${schema}.job ( id, name, @@ -642,16 +649,23 @@ function insertJobs (schema) { COALESCE("startAfter", now()), "singletonKey", COALESCE("deadLetter", q.dead_letter), - COALESCE("expireInSeconds", q.expire_seconds, 15 * 60) * interval '1s', + CASE + WHEN "expireInSeconds" IS NOT NULL THEN "expireInSeconds" * interval '1s' + WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' + WHEN defaults.expireIn IS NOT NULL THEN CAST(defaults.expireIn as interval) + ELSE interval '15 minutes' + END as expireIn, CASE WHEN "keepUntil" IS NOT NULL THEN "keepUntil" - WHEN q.retention_minutes IS NOT NULL THEN now() + q.retention_minutes * interval '1 minute' - -- todo - add default fallback - ELSE now() + interval '14 days' - END, - COALESCE("retryLimit", q.retry_limit, 2), - COALESCE("retryDelay", q.retry_delay, 0), - COALESCE("retryBackoff", q.retry_backoff, false), + ELSE COALESCE("startAfter", now()) + CAST(COALESCE((q.retention_minutes * 60)::text, defaults.keepUntil, '14 days') as interval) + END as keepUntil, + COALESCE("retryLimit", q.retry_limit, defaults.retryLimit, 2), + CASE + WHEN COALESCE("retryBackoff", q.retry_backoff, defaults.retryBackoff, false) + THEN GREATEST(COALESCE("retryDelay", q.retry_delay, defaults.retryDelay), 1) + ELSE COALESCE("retryDelay", q.retry_delay, defaults.retryDelay, 0) + END as retryDelay, + COALESCE("retryBackoff", q.retry_backoff, defaults.retryBackoff, false) as retryBackoff, q.policy FROM json_to_recordset($1) as j ( id uuid, @@ -667,7 +681,8 @@ function insertJobs (schema) { "keepUntil" timestamp with time zone, "deadLetter" text ) - LEFT JOIN ${schema}.queue q ON j.name = q.name + LEFT JOIN ${schema}.queue q ON j.name = q.name, + defaults ON CONFLICT DO NOTHING ` } diff --git a/test/priorityTest.js b/test/priorityTest.js index 468fab72..abd55b92 100644 --- a/test/priorityTest.js +++ b/test/priorityTest.js @@ -4,22 +4,20 @@ const helper = require('./testHelper') describe('priority', function () { it('higher priority job', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const jobName = 'priority-test' + await boss.send(queue) - await boss.send(jobName) + const high = await boss.send(queue, null, { priority: 1 }) - const high = await boss.send(jobName, null, { priority: 1 }) - - const job = await boss.fetch(jobName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, high) }) it('descending priority order', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'multiple-priority-test' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema const low = await boss.send(queue, null, { priority: 1 }) const medium = await boss.send(queue, null, { priority: 5 }) diff --git a/test/queueTest.js b/test/queueTest.js index fcf440ec..5d18072e 100644 --- a/test/queueTest.js +++ b/test/queueTest.js @@ -73,13 +73,6 @@ describe('queues', function () { await boss.createQueue(queue, { policy: 'short' }) }) - it('should create a queue with priority policy', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - const queue = this.test.bossConfig.schema - - await boss.createQueue(queue, { policy: 'priority' }) - }) - it('should delete a queue', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema diff --git a/test/testHelper.js b/test/testHelper.js index f240a108..b4555d9a 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -6,7 +6,6 @@ const sha1 = (value) => crypto.createHash('sha1').update(value).digest('hex') module.exports = { dropSchema, start, - stop, getDb, getArchivedJobById, countJobs, @@ -126,7 +125,3 @@ async function start (options) { } } } - -async function stop (boss, timeout = 1000) { - await boss.stop({ timeout }) -} diff --git a/types.d.ts b/types.d.ts index 6ffda71f..0fe25dae 100644 --- a/types.d.ts +++ b/types.d.ts @@ -345,7 +345,7 @@ declare class PgBoss extends EventEmitter { getQueueSize(name: string, options?: object): Promise; getJobById(id: string, options?: PgBoss.ConnectionOptions): Promise; - createQueue(name: string, policy: string): Promise; + createQueue(name: string, policy: 'standard' | 'short' | 'singleton' | 'stately'): Promise; deleteQueue(name: string): Promise; purgeQueue(name: string): Promise; clearStorage(): Promise; From 74af5303d9559d91a80cf260763969f9b7ec328d Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 10 Sep 2023 16:47:08 -0500 Subject: [PATCH 24/36] drop delay dep --- package-lock.json | 1107 ++++++++++++++++++++++++----------- package.json | 1 - releasenotesv10.md | 8 +- src/boss.js | 4 +- src/index.js | 2 +- src/manager.js | 9 +- src/tools.js | 28 + src/worker.js | 6 +- test/archiveTest.js | 2 +- test/backgroundErrorTest.js | 2 +- test/delayTest.js | 2 +- test/expireTest.js | 2 +- test/failureTest.js | 2 +- test/maintenanceTest.js | 2 +- test/managerTest.js | 2 +- test/retryTest.js | 2 +- test/scheduleTest.js | 2 +- test/throttleTest.js | 2 +- test/workTest.js | 13 +- 19 files changed, 828 insertions(+), 370 deletions(-) create mode 100644 src/tools.js diff --git a/package-lock.json b/package-lock.json index c2b776c3..c3db11a6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,6 @@ "license": "MIT", "dependencies": { "cron-parser": "^4.0.0", - "delay": "^5.0.0", "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", @@ -50,47 +49,119 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz", - "integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "dependencies": { - "@babel/highlight": "^7.22.5" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", - "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==", + "version": "7.22.9", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz", + "integrity": "sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", - "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.17.tgz", + "integrity": "sha512-2EENLmhpwplDux5PSsZnSbnSkB3tZ6QTksgO25xwEL7pIDcNOMhF5v/s6RzwjMZzZzw9Ofc30gHv5ChCC8pifQ==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helpers": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.22.15", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-module-transforms": "^7.22.17", + "@babel/helpers": "^7.22.15", + "@babel/parser": "^7.22.16", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.22.17", + "@babel/types": "^7.22.17", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" + "json5": "^2.2.3", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -101,12 +172,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz", - "integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.15.tgz", + "integrity": "sha512-Zu9oWARBqeVOW0dZOjXc3JObrzuqothQ3y/n1kUtrjCoCPLkXUwMvOo/F/TCfoHMbWIFlWwpZtkZVb9ga4U2pA==", "dev": true, "dependencies": { - "@babel/types": "^7.22.5", + "@babel/types": "^7.22.15", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -116,22 +187,19 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", - "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", + "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "browserslist": "^4.21.3", + "@babel/compat-data": "^7.22.9", + "@babel/helper-validator-option": "^7.22.15", + "browserslist": "^4.21.9", "lru-cache": "^5.1.1", - "semver": "^6.3.0" + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-environment-visitor": { @@ -169,34 +237,34 @@ } }, "node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", "dev": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", - "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.17.tgz", + "integrity": "sha512-XouDDhQESrLHTpnBtCKExJdyY4gJCdrvH2Pyv8r8kovX2U8G0dRUOT45T9XlbLtuu9CLXP15eusnkprhoPV5iQ==", "dev": true, "dependencies": { "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-module-imports": "^7.22.15", "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.15" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-simple-access": { @@ -212,9 +280,9 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz", - "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "dependencies": { "@babel/types": "^7.22.5" @@ -233,45 +301,45 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.15.tgz", + "integrity": "sha512-4E/F9IIEi8WR94324mbDUMo074YTheJmd7eZF5vITTeYchqAi6sYXRLHUVsmkdmY4QjfKTcB2jB7dVP3NaBElQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", - "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", + "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", - "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.15.tgz", + "integrity": "sha512-7pAjK0aSdxOwR+CcYAqgWOGy5dcfvzsTIfFTb2odQqW47MDfv14UaJDY6eng8ylM2EaeKXdxaSWESbkmaQHTmw==", "dev": true, "dependencies": { - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz", - "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", + "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", "dev": true, "dependencies": { "@babel/helper-validator-identifier": "^7.22.5", - "chalk": "^2.0.0", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -350,9 +418,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz", - "integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==", + "version": "7.22.16", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.16.tgz", + "integrity": "sha512-+gPfKv8UWeKKeJTUxe59+OobVcrYHETCsORl61EmSkmgymguYk/X5bp7GuUIXaFsc6y++v8ZxPsLSSuujqDphA==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -362,33 +430,33 @@ } }, "node_modules/@babel/template": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", - "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz", - "integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.17.tgz", + "integrity": "sha512-xK4Uwm0JnAMvxYZxOVecss85WxTEIbTa7bnGyf/+EgCL5Zt3U7htUpEOWv9detPlamGKuRzCqw74xVglDWpPdg==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.22.15", "@babel/helper-environment-visitor": "^7.22.5", "@babel/helper-function-name": "^7.22.5", "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.22.16", + "@babel/types": "^7.22.17", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -397,13 +465,13 @@ } }, "node_modules/@babel/types": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz", - "integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.17.tgz", + "integrity": "sha512-YSQPHLFtQNE5xN9tHuZnzu8vPr61wVTBZdfv1meex1NBosa4iT05k/Jw06ddJugi4bk7The/oSwQGFcksmEJQg==", "dev": true, "dependencies": { "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.15", "to-fast-properties": "^2.0.0" }, "engines": { @@ -426,18 +494,18 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.1.tgz", - "integrity": "sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==", + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.8.0.tgz", + "integrity": "sha512-JylOEEzDiOryeUnFbQz+oViCXS0KsvR1mvHkoMiu5+UiBvy+RYX7tzlIIIEstF/gVa2tj9AQXk3dgnxv6KxhFg==", "dev": true, "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, "node_modules/@eslint/eslintrc": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.0.tgz", - "integrity": "sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.2.tgz", + "integrity": "sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==", "dev": true, "dependencies": { "ajv": "^6.12.4", @@ -468,9 +536,9 @@ } }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "version": "13.21.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.21.0.tgz", + "integrity": "sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -507,18 +575,18 @@ } }, "node_modules/@eslint/js": { - "version": "8.44.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.44.0.tgz", - "integrity": "sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.49.0.tgz", + "integrity": "sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz", - "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==", + "version": "0.11.11", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.11.tgz", + "integrity": "sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==", "dev": true, "dependencies": { "@humanwhocodes/object-schema": "^1.2.1", @@ -684,9 +752,9 @@ } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", "dev": true, "engines": { "node": ">=6.0.0" @@ -708,21 +776,15 @@ "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.18", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", - "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", "dev": true, "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", - "dev": true - }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -765,15 +827,15 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.3.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.3.tgz", - "integrity": "sha512-wheIYdr4NYML61AjC8MKj/2jrR/kDQri/CIpVoZwldwhnIrD/j9jIU5bJ8yBKuB2VhpFV7Ab6G2XkBjv9r9Zzw==", + "version": "20.6.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.6.0.tgz", + "integrity": "sha512-najjVq5KN2vsH2U/xyh2opaSEz6cZMR2SetLIlxlj08nOcmPOemJmUK2o4kUzfLqfrWE0PIrNeE16XhYDd3nqg==", "dev": true }, "node_modules/acorn": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.9.0.tgz", - "integrity": "sha512-jaVNAFBHNLXspO543WnNNPZFRtavh3skAkITqD0/2aeMkKZTN+254PyhwxFYrk3vQ1xfY+2wbesJMs/JC8/PwQ==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -903,15 +965,15 @@ } }, "node_modules/array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.7.tgz", + "integrity": "sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", "is-string": "^1.0.7" }, "engines": { @@ -921,15 +983,34 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.3.tgz", + "integrity": "sha512-LzLoiOMAxvy+Gd3BAq3B7VeIgPdo+Q8hthvKtXybMvRV0jrXfJM/t8mw7nNlpEcVlVUnCnM2KSX4XU5HmpodOA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -940,14 +1021,14 @@ } }, "node_modules/array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -958,16 +1039,46 @@ } }, "node_modules/array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.2.tgz", + "integrity": "sha512-HuQCHOlk1Weat5jzStICBCd83NxiIMwqDg/dHEsoefabn/hJRj5pVdWcPUSpRrwhwxZOsQassMpgN/xRYFBMIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" + "get-intrinsic": "^1.2.1" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.2.tgz", + "integrity": "sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "is-array-buffer": "^3.0.2", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/asynciterator.prototype": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz", + "integrity": "sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg==", + "dev": true, + "dependencies": { + "has-symbols": "^1.0.3" } }, "node_modules/available-typed-arrays": { @@ -1025,9 +1136,9 @@ "dev": true }, "node_modules/browserslist": { - "version": "4.21.9", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", - "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", + "version": "4.21.10", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", + "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", "dev": true, "funding": [ { @@ -1044,9 +1155,9 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001503", - "electron-to-chromium": "^1.4.431", - "node-releases": "^2.0.12", + "caniuse-lite": "^1.0.30001517", + "electron-to-chromium": "^1.4.477", + "node-releases": "^2.0.13", "update-browserslist-db": "^1.0.11" }, "bin": { @@ -1086,9 +1197,9 @@ } }, "node_modules/builtins/node_modules/semver": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz", - "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1153,9 +1264,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001510", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001510.tgz", - "integrity": "sha512-z35lD6xjHklPNgjW4P68R30Er5OCIZE0C1bUf8IMXSh34WJYXfIy+GxIEraQXYJ2dvTU8TumjYAeLrPhpMlsuw==", + "version": "1.0.30001532", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001532.tgz", + "integrity": "sha512-FbDFnNat3nMnrROzqrsg314zhqN5LGQ1kyyMk2opcrwGbVGpHRhgCWtAgD5YJUqNAiQ+dklreil/c3Qf1dfCTw==", "dev": true, "funding": [ { @@ -1283,9 +1394,9 @@ "dev": true }, "node_modules/cron-parser": { - "version": "4.8.1", - "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.8.1.tgz", - "integrity": "sha512-jbokKWGcyU4gl6jAfX97E1gDpY12DJ1cLJZmoDzaAln/shZ+S3KBFBuA2Q6WeUN4gJf/8klnV1EfvhA2lK5IRQ==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz", + "integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==", "dependencies": { "luxon": "^3.2.1" }, @@ -1376,17 +1487,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/delay": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz", - "integrity": "sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/diff": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", @@ -1409,9 +1509,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.447", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.447.tgz", - "integrity": "sha512-sxX0LXh+uL41hSJsujAN86PjhrV/6c79XmpY0TvjZStV6VxIgarf8SRkUoUTuYmFcZQTemsoqo8qXOGw5npWfw==", + "version": "1.4.513", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.513.tgz", + "integrity": "sha512-cOB0xcInjm+E5qIssHeXJ29BaUyWpMyFKT5RB3bsLENDheCja0wMkHJyiPl0NBE/VzDI7JDuNEQWhe6RitEUcw==", "dev": true }, "node_modules/emoji-regex": { @@ -1430,18 +1530,19 @@ } }, "node_modules/es-abstract": { - "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.1.tgz", + "integrity": "sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw==", "dev": true, "dependencies": { "array-buffer-byte-length": "^1.0.0", + "arraybuffer.prototype.slice": "^1.0.1", "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", "es-set-tostringtag": "^2.0.1", "es-to-primitive": "^1.2.1", "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.2.0", + "get-intrinsic": "^1.2.1", "get-symbol-description": "^1.0.0", "globalthis": "^1.0.3", "gopd": "^1.0.1", @@ -1461,14 +1562,18 @@ "object-inspect": "^1.12.3", "object-keys": "^1.1.1", "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", + "regexp.prototype.flags": "^1.5.0", + "safe-array-concat": "^1.0.0", "safe-regex-test": "^1.0.0", "string.prototype.trim": "^1.2.7", "string.prototype.trimend": "^1.0.6", "string.prototype.trimstart": "^1.0.6", + "typed-array-buffer": "^1.0.0", + "typed-array-byte-length": "^1.0.0", + "typed-array-byte-offset": "^1.0.0", "typed-array-length": "^1.0.4", "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" + "which-typed-array": "^1.1.10" }, "engines": { "node": ">= 0.4" @@ -1477,6 +1582,28 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/es-iterator-helpers": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.14.tgz", + "integrity": "sha512-JgtVnwiuoRuzLvqelrvN3Xu7H9bu2ap/kQ2CrM62iidP8SKuD99rWU3CJy++s7IVL2qb/AjXPGR/E7i9ngd/Cw==", + "dev": true, + "dependencies": { + "asynciterator.prototype": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-set-tostringtag": "^2.0.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "iterator.prototype": "^1.1.0", + "safe-array-concat": "^1.0.0" + } + }, "node_modules/es-set-tostringtag": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", @@ -1545,27 +1672,27 @@ } }, "node_modules/eslint": { - "version": "8.44.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.44.0.tgz", - "integrity": "sha512-0wpHoUbDUHgNCyvFB5aXLiQVfK9B0at6gUvzy83k4kAsQ/u769TQDX6iKC+aO4upIHO9WSaA3QoXYQDHbNwf1A==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.49.0.tgz", + "integrity": "sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.1.0", - "@eslint/js": "8.44.0", - "@humanwhocodes/config-array": "^0.11.10", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.2", + "@eslint/js": "8.49.0", + "@humanwhocodes/config-array": "^0.11.11", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", + "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.0", - "eslint-visitor-keys": "^3.4.1", - "espree": "^9.6.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -1575,7 +1702,6 @@ "globals": "^13.19.0", "graphemer": "^1.4.0", "ignore": "^5.2.0", - "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", @@ -1587,7 +1713,6 @@ "natural-compare": "^1.4.0", "optionator": "^0.9.3", "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" }, "bin": { @@ -1654,14 +1779,14 @@ } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", "dev": true, "dependencies": { "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { @@ -1743,26 +1868,28 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", + "version": "2.28.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.28.1.tgz", + "integrity": "sha512-9I9hFlITvOV55alzoKBI+K9q74kv0iKMeY6av5+umsNwayt59fz692daGyjR+oStBQgx6nwR9rXldDev3Clw+A==", "dev": true, "dependencies": { "array-includes": "^3.1.6", + "array.prototype.findlastindex": "^1.2.2", "array.prototype.flat": "^1.3.1", "array.prototype.flatmap": "^1.3.1", "debug": "^3.2.7", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", + "eslint-module-utils": "^2.8.0", "has": "^1.0.3", - "is-core-module": "^2.11.0", + "is-core-module": "^2.13.0", "is-glob": "^4.0.3", "minimatch": "^3.1.2", + "object.fromentries": "^2.0.6", + "object.groupby": "^1.0.0", "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" + "semver": "^6.3.1", + "tsconfig-paths": "^3.14.2" }, "engines": { "node": ">=4" @@ -1874,9 +2001,9 @@ } }, "node_modules/eslint-plugin-n/node_modules/semver": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz", - "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1907,15 +2034,16 @@ } }, "node_modules/eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", + "version": "7.33.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz", + "integrity": "sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==", "dev": true, "dependencies": { "array-includes": "^3.1.6", "array.prototype.flatmap": "^1.3.1", "array.prototype.tosorted": "^1.1.1", "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", @@ -1925,7 +2053,7 @@ "object.values": "^1.1.6", "prop-types": "^15.8.1", "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", + "semver": "^6.3.1", "string.prototype.matchall": "^4.0.8" }, "engines": { @@ -1987,9 +2115,9 @@ } }, "node_modules/eslint-scope": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.0.tgz", - "integrity": "sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -2030,9 +2158,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz", - "integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -2064,9 +2192,9 @@ } }, "node_modules/eslint/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "version": "13.21.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.21.0.tgz", + "integrity": "sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -2103,9 +2231,9 @@ } }, "node_modules/espree": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.0.tgz", - "integrity": "sha512-1FH/IiruXZ84tpUlm0aCUEwMl2Ho5ilqVh0VvQXw+byAz/4SAciyHLlfmL5WYqsvD38oymdUwBss0LtK8m4s/A==", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, "dependencies": { "acorn": "^8.9.0", @@ -2268,16 +2396,17 @@ } }, "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.1.0.tgz", + "integrity": "sha512-OHx4Qwrrt0E4jEIcI5/Xb+f+QmJYNj2rrK8wiIdQOIrB9WrrJL8cjZvXdXuBTkkEwEqLycb5BeZDV1o2i9bTew==", "dev": true, "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.7", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=12.0.0" } }, "node_modules/flatted": { @@ -2335,9 +2464,9 @@ "dev": true }, "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, "optional": true, @@ -2355,15 +2484,15 @@ "dev": true }, "node_modules/function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" }, "engines": { "node": ">= 0.4" @@ -2766,6 +2895,21 @@ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", "dev": true }, + "node_modules/is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-bigint": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", @@ -2819,9 +2963,9 @@ } }, "node_modules/is-core-module": { - "version": "2.12.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", - "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", + "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", "dev": true, "dependencies": { "has": "^1.0.3" @@ -2854,6 +2998,18 @@ "node": ">=0.10.0" } }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -2863,6 +3019,21 @@ "node": ">=8" } }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -2875,6 +3046,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-map": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", + "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-negative-zero": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", @@ -2945,6 +3125,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-set": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", + "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-shared-array-buffer": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", @@ -3000,16 +3189,12 @@ } }, "node_modules/is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.12.tgz", + "integrity": "sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" + "which-typed-array": "^1.1.11" }, "engines": { "node": ">= 0.4" @@ -3036,6 +3221,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/is-weakmap": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", + "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-weakref": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", @@ -3048,6 +3242,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-weakset": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", + "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -3057,6 +3264,12 @@ "node": ">=0.10.0" } }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -3128,27 +3341,60 @@ "node": ">=8" } }, - "node_modules/istanbul-lib-processinfo/node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, - "bin": { - "uuid": "dist/bin/uuid" - } - }, "node_modules/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, "dependencies": { "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^3.0.0", + "make-dir": "^4.0.0", "supports-color": "^7.1.0" }, "engines": { - "node": ">=8" + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/istanbul-lib-report/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/istanbul-lib-report/node_modules/supports-color": { @@ -3163,6 +3409,12 @@ "node": ">=8" } }, + "node_modules/istanbul-lib-report/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, "node_modules/istanbul-lib-source-maps": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", @@ -3178,9 +3430,9 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", - "integrity": "sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", + "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", "dev": true, "dependencies": { "html-escaper": "^2.0.0", @@ -3190,6 +3442,18 @@ "node": ">=8" } }, + "node_modules/iterator.prototype": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.1.tgz", + "integrity": "sha512-9E+nePc8C9cnQldmNl6bgpTY6zI4OPRZd97fhJ/iVZ1GifIUDVV5F6x1nEDqpe8KaMEZGT4xgrwKQDxXnjOIZQ==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.3" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -3220,6 +3484,12 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", @@ -3251,9 +3521,9 @@ } }, "node_modules/jsx-ast-utils": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.4.tgz", - "integrity": "sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==", + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "dependencies": { "array-includes": "^3.1.6", @@ -3265,6 +3535,15 @@ "node": ">=4.0" } }, + "node_modules/keyv": { + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.3.tgz", + "integrity": "sha512-QCiSav9WaX1PgETJ+SpNnx2PRRapJ/oRSXM4VO5OGYGSjrxbKPVFVhB3l2OCbLCk329N8qyAtsJjSjvVBWzEug==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -3382,9 +3661,9 @@ } }, "node_modules/luxon": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.3.0.tgz", - "integrity": "sha512-An0UCfG/rSiqtAIiBPO0Y9/zAnHUZxAMiCpTd5h2smgsj7GGmcenvrvww2cqNA8/4A5ZrD1gJpHN2mIHZQF+Mg==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.4.3.tgz", + "integrity": "sha512-tFWBiv3h7z+T/tDaoxA8rqTxy1CHV6gHS//QdaH4pulbq/JuBSGgQspQQqcgnwdAx6pNI7cmvz5Sv/addzHmUg==", "engines": { "node": ">=12" } @@ -3502,9 +3781,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", - "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==", + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", "dev": true }, "node_modules/normalize-path": { @@ -3733,28 +4012,28 @@ } }, "node_modules/object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.7.tgz", + "integrity": "sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" } }, "node_modules/object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.7.tgz", + "integrity": "sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -3763,28 +4042,40 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/object.groupby": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.1.tgz", + "integrity": "sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1" + } + }, "node_modules/object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.3.tgz", + "integrity": "sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==", "dev": true, "dependencies": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.7.tgz", + "integrity": "sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -3951,13 +4242,13 @@ "dev": true }, "node_modules/pg": { - "version": "8.11.1", - "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.1.tgz", - "integrity": "sha512-utdq2obft07MxaDg0zBJI+l/M3mBRfIpEN3iSemsz0G5F2/VXx+XzqF4oxrbIZXQxt2AZzIUzyVg/YM6xOP/WQ==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz", + "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==", "dependencies": { "buffer-writer": "2.0.0", "packet-reader": "1.0.0", - "pg-connection-string": "^2.6.1", + "pg-connection-string": "^2.6.2", "pg-pool": "^3.6.1", "pg-protocol": "^1.6.0", "pg-types": "^2.1.0", @@ -3985,9 +4276,9 @@ "optional": true }, "node_modules/pg-connection-string": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.1.tgz", - "integrity": "sha512-w6ZzNu6oMmIzEAYVw+RLK0+nqHPt8K3ZnknKi+g48Ak2pr3dtljJW3o+D/n2zzCG07Zoe9VOX3aiKpj+BN0pjg==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.2.tgz", + "integrity": "sha512-ch6OwaeaPYcova4kKZ15sbJ2hKb/VP48ZD2gE7i1J+L4MspCtBMAx8nMgz7bksc7IojCIIWuEhHibSMFH8m8oA==" }, "node_modules/pg-int8": { "version": "1.0.1", @@ -4321,6 +4612,26 @@ "node": ">=8.10.0" } }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.4.tgz", + "integrity": "sha512-ECkTw8TmJwW60lOTR+ZkODISW6RQ8+2CL3COqtiJKLd6MmB45hN51HprHFziKLGkAuTGQhBb91V8cy+KHlaCjw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/regexp.prototype.flags": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz", @@ -4378,12 +4689,12 @@ "dev": true }, "node_modules/resolve": { - "version": "1.22.2", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", - "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "version": "1.22.4", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.4.tgz", + "integrity": "sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==", "dev": true, "dependencies": { - "is-core-module": "^2.11.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -4451,6 +4762,24 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-array-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.0.1.tgz", + "integrity": "sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -4486,9 +4815,9 @@ } }, "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "bin": { "semver": "bin/semver.js" @@ -4696,18 +5025,18 @@ } }, "node_modules/string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.9.tgz", + "integrity": "sha512-6i5hL3MqG/K2G43mWXWgP+qizFW/QH/7kCNN13JrJS5q48FN5IKksLDscexKP3dnmB6cdm9jlNgAsWNLpSykmA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", + "internal-slot": "^1.0.5", + "regexp.prototype.flags": "^1.5.0", "side-channel": "^1.0.4" }, "funding": { @@ -4715,14 +5044,14 @@ } }, "node_modules/string.prototype.trim": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz", + "integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -4732,28 +5061,28 @@ } }, "node_modules/string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz", + "integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz", + "integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -4936,6 +5265,57 @@ "node": ">=8" } }, + "node_modules/typed-array-buffer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz", + "integrity": "sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz", + "integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz", + "integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==", + "dev": true, + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/typed-array-length": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", @@ -5013,6 +5393,15 @@ "punycode": "^2.1.0" } }, + "node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/version-guard": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/version-guard/-/version-guard-1.1.1.tgz", @@ -5053,6 +5442,47 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", + "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", + "dev": true, + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", + "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "dev": true, + "dependencies": { + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-weakmap": "^2.0.1", + "is-weakset": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/which-module": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", @@ -5060,17 +5490,16 @@ "dev": true }, "node_modules/which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.11.tgz", + "integrity": "sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" diff --git a/package.json b/package.json index 6ee6e6d4..00f6bdb6 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,6 @@ }, "dependencies": { "cron-parser": "^4.0.0", - "delay": "^5.0.0", "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", diff --git a/releasenotesv10.md b/releasenotesv10.md index 3372fe31..0f3796b8 100644 --- a/releasenotesv10.md +++ b/releasenotesv10.md @@ -1,7 +1,10 @@ ## v10 +* Postgres 12 and Node 18 required -* Create explicit queues. Each queue is partitioned into dedicated storage +* Created policy queues. Each queue is partitioned into dedicated storage (via postgres declarative partitioning) + +* cascade configuration for send() and insert() from policy queue and then global settings in the constructor * Introduce dead letter queue config * Removes completion jobs and onComplete config @@ -31,5 +34,4 @@ New constructor option ## TODO -* Add peek API for running TOP N queries against job tables -* cascade configuration for boss.insert([jobs]) \ No newline at end of file +* Add peek API for running TOP N queries against job tables \ No newline at end of file diff --git a/src/boss.js b/src/boss.js index cfd770ee..fd058631 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,6 +1,6 @@ const EventEmitter = require('events') const plans = require('./plans') -const delay = require('delay') +const { delay } = require('./tools') const events = { error: 'error', @@ -143,7 +143,7 @@ class Boss extends EventEmitter { async stop () { if (!this.stopped) { - if (this.__testDelayPromise) this.__testDelayPromise.clear() + if (this.__testDelayPromise) this.__testDelayPromise.abort() if (this.maintenanceInterval) clearInterval(this.maintenanceInterval) if (this.monitorInterval) clearInterval(this.monitorInterval) diff --git a/src/index.js b/src/index.js index e17125b2..c3abd9ab 100644 --- a/src/index.js +++ b/src/index.js @@ -6,7 +6,7 @@ const Manager = require('./manager') const Timekeeper = require('./timekeeper') const Boss = require('./boss') const Db = require('./db') -const delay = require('delay') +const { delay } = require('./tools') const events = { error: 'error', diff --git a/src/manager.js b/src/manager.js index b4d8d79e..c0a7b02e 100644 --- a/src/manager.js +++ b/src/manager.js @@ -1,11 +1,10 @@ const assert = require('assert') const EventEmitter = require('events') const { randomUUID } = require('crypto') -const delay = require('delay') const debounce = require('lodash.debounce') const { serializeError: stringify } = require('serialize-error') const pMap = require('p-map') - +const { delay } = require('./tools') const Attorney = require('./attorney') const Worker = require('./worker') const plans = require('./plans') @@ -26,16 +25,14 @@ const events = { const resolveWithinSeconds = async (promise, seconds) => { const timeout = Math.max(1, seconds) * 1000 - const reject = delay.reject(timeout, { value: new Error(`handler execution exceeded ${timeout}ms`) }) + const reject = delay(timeout, `handler execution exceeded ${timeout}ms`) let result try { result = await Promise.race([promise, reject]) } finally { - try { - reject.clear() - } catch {} + reject.abort() } return result diff --git a/src/tools.js b/src/tools.js new file mode 100644 index 00000000..7a04c3dc --- /dev/null +++ b/src/tools.js @@ -0,0 +1,28 @@ +module.exports = { + delay +} + +function delay (ms, error) { + const { setTimeout } = require('timers/promises') + const ac = new AbortController() + + const promise = new Promise((resolve, reject) => { + setTimeout(ms, null, { signal: ac.signal }) + .then(() => { + if (error) { + reject(new Error(error)) + } else { + resolve() + } + }) + .catch(resolve) + }) + + promise.abort = () => { + if (!ac.signal.aborted) { + ac.abort() + } + } + + return promise +} diff --git a/src/worker.js b/src/worker.js index 386dea41..c15409df 100644 --- a/src/worker.js +++ b/src/worker.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('./tools') const WORKER_STATES = { created: 'created', @@ -34,7 +34,7 @@ class Worker { this.beenNotified = true if (this.loopDelayPromise) { - this.loopDelayPromise.clear() + this.loopDelayPromise.abort() } } @@ -91,7 +91,7 @@ class Worker { this.state = WORKER_STATES.stopping if (this.loopDelayPromise) { - this.loopDelayPromise.clear() + this.loopDelayPromise.abort() } } } diff --git a/test/archiveTest.js b/test/archiveTest.js index ee5c00ff..f70407cb 100644 --- a/test/archiveTest.js +++ b/test/archiveTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') const { states } = require('../src/plans') describe('archive', function () { diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 4a5f30c8..41ebbc62 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const PgBoss = require('../') -const delay = require('delay') +const { delay } = require('../src/tools') describe('background processing error handling', function () { it('maintenance error handling works', async function () { diff --git a/test/delayTest.js b/test/delayTest.js index 918d7089..d6e705f8 100644 --- a/test/delayTest.js +++ b/test/delayTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('delayed jobs', function () { it('should wait until after an int (in seconds)', async function () { diff --git a/test/expireTest.js b/test/expireTest.js index 54960cc0..5cd3eff2 100644 --- a/test/expireTest.js +++ b/test/expireTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('expire', function () { it('should expire a job', async function () { diff --git a/test/failureTest.js b/test/failureTest.js index 5d099ffa..328fc66e 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') const pMap = require('p-map') diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 7a99f432..c84de149 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('maintenance', async function () { it('clearStorage() should empty both job storage tables', async function () { diff --git a/test/managerTest.js b/test/managerTest.js index 18569807..15007be6 100644 --- a/test/managerTest.js +++ b/test/managerTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') diff --git a/test/retryTest.js b/test/retryTest.js index ad3d8f3b..7b512423 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('retries', function () { it('should retry a job that didn\'t complete', async function () { diff --git a/test/scheduleTest.js b/test/scheduleTest.js index c1e738e7..475107df 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const { DateTime } = require('luxon') const helper = require('./testHelper') diff --git a/test/throttleTest.js b/test/throttleTest.js index ff77afe2..7a6fc093 100644 --- a/test/throttleTest.js +++ b/test/throttleTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('throttle', function () { it('should only create 1 job for interval with a delay', async function () { diff --git a/test/workTest.js b/test/workTest.js index 48c3ce4f..5a5e79df 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') @@ -72,18 +72,21 @@ describe('work', function () { const queue = this.test.bossConfig.schema let processCount = 0 - const newJobCheckIntervalSeconds = 5 await boss.send(queue) - const workerId = await boss.work(queue, { newJobCheckIntervalSeconds }, () => processCount++) - await delay(100) + const workerId = await boss.work(queue, { newJobCheckIntervalSeconds: 5 }, () => processCount++) + + await delay(500) + assert.strictEqual(processCount, 1) + await boss.send(queue) boss.notifyWorker(workerId) - await delay(100) + await delay(500) + assert.strictEqual(processCount, 2) }) From b65250c92c1faa2804d4c57e3b6dd86095641ab6 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 10 Sep 2023 16:56:36 -0500 Subject: [PATCH 25/36] wire in fetch() priority option --- docs/readme.md | 6 +++++- src/manager.js | 5 +++-- types.d.ts | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/readme.md b/docs/readme.md index 8ec8b87d..258a5da4 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -683,7 +683,11 @@ Typically one would use `work()` for automated polling for new jobs based upon a - `batchSize`: number, # of jobs to fetch - `options`: object - * `includeMetadata`, bool + * `priority`, bool, default: `true` + + If true, allow jobs with a higher priority to be fetched before jobs with lower or no priority + + * `includeMetadata`, bool, default: `false` If `true`, all job metadata will be returned on the job object. The following table shows each property and its type, which is basically all columns from the job table. diff --git a/src/manager.js b/src/manager.js index c0a7b02e..1021bfcc 100644 --- a/src/manager.js +++ b/src/manager.js @@ -182,7 +182,8 @@ class Manager extends EventEmitter { teamSize = 1, teamConcurrency = 1, teamRefill: refill = false, - includeMetadata = false + includeMetadata = false, + priority = true } = options const id = randomUUID({ disableEntropyCache: true }) @@ -206,7 +207,7 @@ class Manager extends EventEmitter { createTeamRefillPromise() } - const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata }) + const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, priority }) const onFetch = async (jobs) => { if (this.config.__test__throw_worker) { diff --git a/types.d.ts b/types.d.ts index 0fe25dae..ac3df340 100644 --- a/types.d.ts +++ b/types.d.ts @@ -104,6 +104,7 @@ declare namespace PgBoss { interface CommonJobFetchOptions { includeMetadata?: boolean; + priority?: boolean; } type JobFetchOptions = CommonJobFetchOptions & { From 49506f9f8934413a0c5fdf7a05a84d539f42b4de Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 10 Sep 2023 17:02:35 -0500 Subject: [PATCH 26/36] update CI node versions --- .github/workflows/ci.yml | 2 +- releasenotesv10.md | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81de1875..338438e4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: container: node:18 strategy: matrix: - node: [ 16, 18 ] + node: [ 18, 20 ] services: postgres: image: postgres diff --git a/releasenotesv10.md b/releasenotesv10.md index 0f3796b8..45582463 100644 --- a/releasenotesv10.md +++ b/releasenotesv10.md @@ -27,6 +27,10 @@ New constructor option ``` * consolidate failed states: expired => failed + +* Add priority option to work and fetch to bypass priority sorting + + * Add manual maintenance API for one-off upgrade API without processing queues ```js await boss.maintain() From b75fe6e246f21b0d53c89a07432801aa1e7b75e4 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Sun, 10 Sep 2023 17:34:46 -0500 Subject: [PATCH 27/36] add oncomplete column to migration --- src/migrationStore.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/migrationStore.js b/src/migrationStore.js index 0f2f7aff..82600c85 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -77,6 +77,7 @@ function getAll (schema) { `ALTER TABLE ${schema}.job ADD COLUMN deadletter text`, `ALTER TABLE ${schema}.job ADD COLUMN policy text`, + `ALTER TABLE ${schema}.job DROP COLUMN onComplete`, // update state enum `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, @@ -164,6 +165,7 @@ function getAll (schema) { `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','expired','cancelled','failed')`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + `ALTER TABLE ${schema}.job ADD COLUMN onComplete bool`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.archive DROP COLUMN policy`, `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, From ea04879032a355cb7bcf3dddb9424d33277a2a68 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Tue, 12 Sep 2023 23:31:52 -0500 Subject: [PATCH 28/36] docs and updates --- package.json | 2 +- releasenotesv10.md | 85 ++++++++++++++++++++++++------------- src/index.js | 2 +- test/backgroundErrorTest.js | 4 +- test/hooks.js | 2 +- test/migrationTest.js | 4 +- test/multiMasterTest.js | 4 +- test/opsTest.js | 6 +-- test/scheduleTest.js | 6 +-- types.d.ts | 3 +- 10 files changed, 73 insertions(+), 45 deletions(-) diff --git a/package.json b/package.json index 00f6bdb6..455463d9 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "description": "Queueing jobs in Node.js using PostgreSQL like a boss", "main": "./src/index.js", "engines": { - "node": ">=16" + "node": ">=18" }, "dependencies": { "cron-parser": "^4.0.0", diff --git a/releasenotesv10.md b/releasenotesv10.md index 45582463..6fae34fc 100644 --- a/releasenotesv10.md +++ b/releasenotesv10.md @@ -1,41 +1,68 @@ -## v10 +v10 is the largest semver major release of pg-boss in years. The API changes included below are ordered by significance. -* Postgres 12 and Node 18 required - -* Created policy queues. Each queue is partitioned into dedicated storage (via postgres declarative partitioning) +## Database changes +PostgreSQL 12 is now the minimum supported version. If you upgrade and run `start()`, the database will automatically be upgraded. However, this release requires rebuilding almost all of the job table indexes, which may require a bit of downtime depending on the size of your queues. If this is a concern, you may extract the migration script via `getMigrationPlans()` and run it against a backup to get an estimate on downtime. -* cascade configuration for send() and insert() from policy queue and then global settings in the constructor +If the standard auto-migration isn't desired, consider alternatives, such as running a new v10 schema side by side of a v9 schema until the v9 queues are drained. -* Introduce dead letter queue config - * Removes completion jobs and onComplete config - * Allows retries in dlq, since they become just like any other queue +## API changes -* Add primary key to archive - * allows replication of database for read-replica and/or HA use cases - * Existing archive table will be renamed to archive_backup and kept until the next release of pgboss +* MAJOR: **Job retries are now opt-out instead of opt-in.** The default `retryLimit` is now 2 retries. This will cause an issue for any job handlers that aren't idempotent. Consider setting `retryLimit=0` on these queues if needed. -* Allow instances to connect without trying to migrate to latest version (instances that should be able to process jobs, but not have access to schema changes or upgrades) +* MAJOR: **Policy queues.** Queues can now be optionally created using `createQueue()` with a new set of storage policies. Each policy will store jobs in dedicated partition tables (courtesy of Postgres's declarative partitioning). Additionally, these queues can store default retry and retention policies that will be auto-applied to all new jobs (see below). + + * **`standard`** (default): Standard queues are the default queue policy, which supports all existing features. This will provision a dedicated job partition for all jobs with this name. + * **`short`**: Short queues only allow 1 item to be queued (in created state), which replaces the previous `sendSingleton()` and `sendOnce()` functions. + * **`singleton`**: Singleton queues only allow 1 item to be active, which replaces the previous `fetch()` option `enforceSingletonQueueActiveLimit`. + * **`stately`**: Stately queues are a combination of `short` and `singleton`, only allowing 1 job to be queued and 1 job active. -New constructor option -```js - migrate: false -``` -* Update existing constructor options for maintenance and scheduling: -```js - supervise: true, - schedule: true -``` -* consolidate failed states: expired => failed +* MAJOR: **Dead letter queues replace completion jobs.** Failed jobs will be added to optional dead letter queues after exhausting all retries. This is preferred over completion jobs to gain retry support via `work()`. Additionally, dead letter queues only make a copy of the job if it fails, instead of filling up the job table with numerous, mostly unneeded completion jobs. + * `onComplete` option in `send()` and `insert()` has been removed + * `onComplete()`, `offComplete()`, and `fetchCompleted()` have been removed + * `deadLetter` option added to `send()` and `insert()` and `createQueue()` +* MAJOR: Dropped the following API functions in favor of policy queues + * `sendOnce()` + * `sendSingleton()` -* Add priority option to work and fetch to bypass priority sorting +* MAJOR: Postgres 12 is now the minimum required version +* MAJOR: Node 18 is now the minimum required version +* MINOR: `send()` and `insert()` cascade configuration from policy queues (if they exist) and then global settings in the constructor. Use the following table to help identify which settings are inherited and when. + + | Setting | API | Queue | Constructor | + | - | - | - | - | + | `retryLimit` | * | - [x] | - [x] | + | `retryDelay` | * | - [x] | - [x] | + | `retryBackoff` | * | - [x] | - [x] | + | `expireInSeconds` | * | - [x] | - [x] | + | `expireInMinutes` | `send()`, `createQueue()` | - [x] | - [x] | + | `expireInHours` | `send()`, `createQueue()` | - [x] | - [x] | + | `retentionSeconds` | `send()`, `createQueue()` | - [x] | - [x] | + | `retentionMinutes` | `send()`, `createQueue()` | - [x] | - [x] | + | `retentionHours` | `send()`, `createQueue()` | - [x] | - [x] | + | `retentionDays` | `send()`, `createQueue()` | - [x] | - [x] | + | `deadLetter` | * | - [x] | - [ ] | -* Add manual maintenance API for one-off upgrade API without processing queues -```js - await boss.maintain() -``` +* MINOR: Added primary key to job archive to support replication use cases such as read replicas or high availability standbys. + * Existing archive table will be renamed to archive_backup and kept until the next release of pgboss, at which event it will be removed. This is only to make sure the automatic schema migration is fast. If you no longer need the jobs in archive and it's blocking you from replication, you can run the following to drop it. + + ```sql + DROP TABLE archive_backup + ``` - ## TODO +* MINOR: Added a new constructor option, `migrate:false`, to block an instance from attempting to migrate to the latest database schema version. This is useful if the configured credentials don't have schema modification privileges or complete control of when and how migrations are run is required. + +* MINOR: `noSupervisor` and `noScheduling` were renamed to a more intuitive naming convention. + * If using `noSupervisor: true` to disable mainteance, instead use `supervise: false` + * If using `noScheduling: true` to disable scheduled cron jobs, use `schedule: false` -* Add peek API for running TOP N queries against job tables \ No newline at end of file +* MINOR: The `expired` failed state has been consolidated into `failed` for simplicity. + +* MINOR: Added `priority:false` option to `work()` and `fetch()` to opt out of priority sorting during job fetching. If a queue is very large and not using the priority feature, this may help job fetch performance. + +* MINOR: Added a manual maintenance API if desired: `maintain()`. + +* MINOR: `stop()` will now wait for the default graceful stop timeout (30s) before resolving its promise. The `stopped` event will still emit. If you want to the original behavior, set the new `wait` option to `false`. + +* MINOR: Added `id` property as an option to `send()` for pre-assigning the job id. Previously, only `insert()` supported pre-assignment. \ No newline at end of file diff --git a/src/index.js b/src/index.js index c3abd9ab..cc8dcc23 100644 --- a/src/index.js +++ b/src/index.js @@ -123,7 +123,7 @@ class PgBoss extends EventEmitter { return } - let { destroy = false, graceful = true, timeout = 30000, wait = false } = options + let { destroy = false, graceful = true, timeout = 30000, wait = true } = options timeout = Math.max(timeout, 1000) diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index 41ebbc62..f17291f0 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -109,7 +109,7 @@ describe('background processing error handling', function () { await boss.start() - await boss.stop() + await boss.stop({ wait: false }) await delay(1000) @@ -127,7 +127,7 @@ describe('background processing error handling', function () { await boss.start() try { - await boss.stop() + await boss.stop({ wait: false }) assert(false) } catch (err) { assert(true) diff --git a/test/hooks.js b/test/hooks.js index 0a67410a..e25e6462 100644 --- a/test/hooks.js +++ b/test/hooks.js @@ -26,7 +26,7 @@ async function afterEach () { const { boss } = this.currentTest if (boss) { - await boss.stop({ wait: true, timeout: 2000 }) + await boss.stop({ timeout: 2000 }) } await helper.dropSchema(config.schema) diff --git a/test/migrationTest.js b/test/migrationTest.js index 9b69a6b1..81350296 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -136,7 +136,7 @@ describe('migration', function () { } catch (error) { assert(error.message.includes('wat')) } finally { - await boss1.stop({ graceful: false }) + await boss1.stop({ graceful: false, wait: false }) } const version1 = await contractor.version() @@ -154,7 +154,7 @@ describe('migration', function () { assert.strictEqual(version2, currentSchemaVersion) - await boss2.stop({ graceful: false }) + await boss2.stop({ graceful: false, wait: false }) }) it('should not install if migrate option is false', async function () { diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index 2364bd69..c714fa71 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -21,7 +21,7 @@ describe('multi-master', function () { } catch (err) { assert(false, err.message) } finally { - await pMap(instances, i => i.stop({ graceful: false })) + await pMap(instances, i => i.stop({ graceful: false, wait: false })) } }) @@ -58,7 +58,7 @@ describe('multi-master', function () { } catch (err) { assert(false) } finally { - await pMap(instances, i => i.stop({ graceful: false })) + await pMap(instances, i => i.stop({ graceful: false, wait: false })) } }) }) diff --git a/test/opsTest.js b/test/opsTest.js index 8d56423d..20616e8e 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -38,19 +38,19 @@ describe('ops', function () { it('should force stop', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - await boss.stop({ graceful: false }) + await boss.stop({ graceful: false, wait: false }) }) it('should destroy the connection pool', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - await boss.stop({ destroy: true, graceful: false }) + await boss.stop({ destroy: true, graceful: false, wait: false }) assert(boss.db.pool.totalCount === 0) }) it('should destroy the connection pool gracefully', async function () { const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - await boss.stop({ destroy: true }) + await boss.stop({ destroy: true, wait: false }) await new Promise((resolve) => { boss.on('stopped', () => resolve()) }) diff --git a/test/scheduleTest.js b/test/scheduleTest.js index 475107df..88f99832 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -109,7 +109,7 @@ describe('schedule', function () { await boss.schedule(queue, '* * * * *') - await boss.stop() + await boss.stop({ wait: false }) boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, schedule: true }) @@ -119,7 +119,7 @@ describe('schedule', function () { assert(job) - await boss.stop() + await boss.stop({ wait: false }) }) it('should remove previously scheduled job', async function () { @@ -135,7 +135,7 @@ describe('schedule', function () { await boss.unschedule(queue) - await boss.stop({ graceful: false }) + await boss.stop({ graceful: false, wait: false }) const db = await helper.getDb() await db.executeSql(plans.clearStorage(this.test.bossConfig.schema)) diff --git a/types.d.ts b/types.d.ts index ac3df340..07695b47 100644 --- a/types.d.ts +++ b/types.d.ts @@ -77,6 +77,7 @@ declare namespace PgBoss { } interface JobOptions { + id?: string, priority?: number; startAfter?: number | string | Date; singletonKey?: string; @@ -93,7 +94,7 @@ declare namespace PgBoss { type InsertOptions = ConnectionOptions; - type SendOptions = JobOptions & ExpirationOptions & RetentionOptions & RetryOptions & CompletionOptions & ConnectionOptions; + type SendOptions = JobOptions & ExpirationOptions & RetentionOptions & RetryOptions & ConnectionOptions; type ScheduleOptions = SendOptions & { tz?: string } From 4d772c57f1c3e5d247a0f2f5dba0fab9de5c2d15 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Thu, 14 Sep 2023 19:36:46 -0500 Subject: [PATCH 29/36] moved release notes to github --- releasenotesv10.md | 68 ---------------------------------------------- 1 file changed, 68 deletions(-) delete mode 100644 releasenotesv10.md diff --git a/releasenotesv10.md b/releasenotesv10.md deleted file mode 100644 index 6fae34fc..00000000 --- a/releasenotesv10.md +++ /dev/null @@ -1,68 +0,0 @@ -v10 is the largest semver major release of pg-boss in years. The API changes included below are ordered by significance. - -## Database changes -PostgreSQL 12 is now the minimum supported version. If you upgrade and run `start()`, the database will automatically be upgraded. However, this release requires rebuilding almost all of the job table indexes, which may require a bit of downtime depending on the size of your queues. If this is a concern, you may extract the migration script via `getMigrationPlans()` and run it against a backup to get an estimate on downtime. - -If the standard auto-migration isn't desired, consider alternatives, such as running a new v10 schema side by side of a v9 schema until the v9 queues are drained. - -## API changes - -* MAJOR: **Job retries are now opt-out instead of opt-in.** The default `retryLimit` is now 2 retries. This will cause an issue for any job handlers that aren't idempotent. Consider setting `retryLimit=0` on these queues if needed. - -* MAJOR: **Policy queues.** Queues can now be optionally created using `createQueue()` with a new set of storage policies. Each policy will store jobs in dedicated partition tables (courtesy of Postgres's declarative partitioning). Additionally, these queues can store default retry and retention policies that will be auto-applied to all new jobs (see below). - - * **`standard`** (default): Standard queues are the default queue policy, which supports all existing features. This will provision a dedicated job partition for all jobs with this name. - * **`short`**: Short queues only allow 1 item to be queued (in created state), which replaces the previous `sendSingleton()` and `sendOnce()` functions. - * **`singleton`**: Singleton queues only allow 1 item to be active, which replaces the previous `fetch()` option `enforceSingletonQueueActiveLimit`. - * **`stately`**: Stately queues are a combination of `short` and `singleton`, only allowing 1 job to be queued and 1 job active. - -* MAJOR: **Dead letter queues replace completion jobs.** Failed jobs will be added to optional dead letter queues after exhausting all retries. This is preferred over completion jobs to gain retry support via `work()`. Additionally, dead letter queues only make a copy of the job if it fails, instead of filling up the job table with numerous, mostly unneeded completion jobs. - * `onComplete` option in `send()` and `insert()` has been removed - * `onComplete()`, `offComplete()`, and `fetchCompleted()` have been removed - * `deadLetter` option added to `send()` and `insert()` and `createQueue()` - -* MAJOR: Dropped the following API functions in favor of policy queues - * `sendOnce()` - * `sendSingleton()` - -* MAJOR: Postgres 12 is now the minimum required version -* MAJOR: Node 18 is now the minimum required version - -* MINOR: `send()` and `insert()` cascade configuration from policy queues (if they exist) and then global settings in the constructor. Use the following table to help identify which settings are inherited and when. - - | Setting | API | Queue | Constructor | - | - | - | - | - | - | `retryLimit` | * | - [x] | - [x] | - | `retryDelay` | * | - [x] | - [x] | - | `retryBackoff` | * | - [x] | - [x] | - | `expireInSeconds` | * | - [x] | - [x] | - | `expireInMinutes` | `send()`, `createQueue()` | - [x] | - [x] | - | `expireInHours` | `send()`, `createQueue()` | - [x] | - [x] | - | `retentionSeconds` | `send()`, `createQueue()` | - [x] | - [x] | - | `retentionMinutes` | `send()`, `createQueue()` | - [x] | - [x] | - | `retentionHours` | `send()`, `createQueue()` | - [x] | - [x] | - | `retentionDays` | `send()`, `createQueue()` | - [x] | - [x] | - | `deadLetter` | * | - [x] | - [ ] | - -* MINOR: Added primary key to job archive to support replication use cases such as read replicas or high availability standbys. - * Existing archive table will be renamed to archive_backup and kept until the next release of pgboss, at which event it will be removed. This is only to make sure the automatic schema migration is fast. If you no longer need the jobs in archive and it's blocking you from replication, you can run the following to drop it. - - ```sql - DROP TABLE archive_backup - ``` - -* MINOR: Added a new constructor option, `migrate:false`, to block an instance from attempting to migrate to the latest database schema version. This is useful if the configured credentials don't have schema modification privileges or complete control of when and how migrations are run is required. - -* MINOR: `noSupervisor` and `noScheduling` were renamed to a more intuitive naming convention. - * If using `noSupervisor: true` to disable mainteance, instead use `supervise: false` - * If using `noScheduling: true` to disable scheduled cron jobs, use `schedule: false` - -* MINOR: The `expired` failed state has been consolidated into `failed` for simplicity. - -* MINOR: Added `priority:false` option to `work()` and `fetch()` to opt out of priority sorting during job fetching. If a queue is very large and not using the priority feature, this may help job fetch performance. - -* MINOR: Added a manual maintenance API if desired: `maintain()`. - -* MINOR: `stop()` will now wait for the default graceful stop timeout (30s) before resolving its promise. The `stopped` event will still emit. If you want to the original behavior, set the new `wait` option to `false`. - -* MINOR: Added `id` property as an option to `send()` for pre-assigning the job id. Previously, only `insert()` supported pre-assignment. \ No newline at end of file From 59b5ba56432e4561e99ed71a520c5ece6c3de521 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Thu, 14 Sep 2023 19:40:57 -0500 Subject: [PATCH 30/36] update types --- types.d.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/types.d.ts b/types.d.ts index 07695b47..6f372042 100644 --- a/types.d.ts +++ b/types.d.ts @@ -230,7 +230,7 @@ declare namespace PgBoss { id: string, name: string, options: WorkOptions, - state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed', + state: 'created' | 'active' | 'stopping' | 'stopped' count: number, createdOn: Date, lastFetchedOn: Date, @@ -244,7 +244,8 @@ declare namespace PgBoss { interface StopOptions { destroy?: boolean, graceful?: boolean, - timeout?: number + timeout?: number, + wait?: boolean } interface OffWorkOptions { @@ -355,6 +356,7 @@ declare class PgBoss extends EventEmitter { archive(): Promise; purge(): Promise; expire(): Promise; + maintain(): Promise; schedule(name: string, cron: string, data?: object, options?: PgBoss.ScheduleOptions): Promise; unschedule(name: string): Promise; From aa04ae82a3168a2324f815612a35e9016a699813 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 15 Sep 2023 20:43:46 -0500 Subject: [PATCH 31/36] update error message [skip ci] --- src/attorney.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/attorney.js b/src/attorney.js index 5b56d51b..43cb5ac4 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -191,7 +191,7 @@ function applySchemaConfig (config) { function assertPostgresObjectName (name) { assert(typeof name === 'string', 'Name must be a string') assert(name.length <= 50, 'Name cannot exceed 50 characters') - assert(!/\W/.test(name), 'Name can only contain alphanumeric characters and underscores are allowed') + assert(!/\W/.test(name), 'Name can only contain alphanumeric characters and underscores') assert(!/^d/.test(name), 'Name cannot start with a number') } From 6e1f01d2d9e94a566a938aa00f11dc6d37fb0104 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 15 Sep 2023 21:08:41 -0500 Subject: [PATCH 32/36] drop pg version badge [skip ci] --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index bde97d58..2d6651e2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ Queueing jobs in Node.js using PostgreSQL like a boss. -[![PostgreSql Version](https://img.shields.io/badge/PostgreSQL-11+-blue.svg?maxAge=2592000)](http://www.postgresql.org) [![npm version](https://badge.fury.io/js/pg-boss.svg)](https://badge.fury.io/js/pg-boss) [![Build](https://github.com/timgit/pg-boss/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/timgit/pg-boss/actions/workflows/ci.yml) [![Coverage Status](https://coveralls.io/repos/github/timgit/pg-boss/badge.svg?branch=master)](https://coveralls.io/github/timgit/pg-boss?branch=master) From 8f99512df3f6acbfd8fc8fa321f8495ace86cc8b Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Thu, 5 Oct 2023 14:03:02 -0500 Subject: [PATCH 33/36] updated readme [skip ci] --- package.json | 3 --- test/readme.js | 6 +++++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/package.json b/package.json index 455463d9..0ebe9863 100644 --- a/package.json +++ b/package.json @@ -23,9 +23,6 @@ "scripts": { "test": "standard && mocha", "cover": "nyc npm test", - "export-schema": "node ./scripts/construct.js", - "export-migration": "node ./scripts/migrate.js", - "export-rollback": "node ./scripts/rollback.js", "tsc": "tsc --noEmit types.d.ts", "readme": "node ./test/readme.js" }, diff --git a/test/readme.js b/test/readme.js index e60e064e..fc2f28f2 100644 --- a/test/readme.js +++ b/test/readme.js @@ -8,7 +8,11 @@ async function readme () { await boss.start() - const queue = 'some-queue' + const queue = 'some_queue' + + try { + await boss.createQueue(queue) + } catch{} await boss.schedule(queue, '* * * * *') From 7b8bd3cf8ff11b81040acb426e6503ca961e8fe5 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Thu, 5 Oct 2023 16:19:40 -0500 Subject: [PATCH 34/36] update migration --- src/migrationStore.js | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/migrationStore.js b/src/migrationStore.js index 82600c85..d38bb801 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -77,21 +77,28 @@ function getAll (schema) { `ALTER TABLE ${schema}.job ADD COLUMN deadletter text`, `ALTER TABLE ${schema}.job ADD COLUMN policy text`, - `ALTER TABLE ${schema}.job DROP COLUMN onComplete`, + `ALTER TABLE ${schema}.job DROP COLUMN on_complete`, // update state enum `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + `DROP TABLE IF EXISTS ${schema}.archive_backup`, `ALTER TABLE ${schema}.archive RENAME to archive_backup`, + `ALTER INDEX ${schema}.archive_archivedon_idx RENAME to archive_backup_archivedon_idx`, + `DROP TYPE ${schema}.job_state`, `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','cancelled','failed')`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + `DELETE FROM ${schema}.job WHERE name LIKE '__pgboss__%'`, + // set up job partitioning `ALTER TABLE ${schema}.job RENAME TO job_default`, + `ALTER TABLE ${schema}.job_default DROP CONSTRAINT job_pkey`, `CREATE TABLE ${schema}.job ( id uuid not null default gen_random_uuid(), @@ -115,22 +122,26 @@ function getAll (schema) { deadletter text, policy text, CONSTRAINT job_pkey PRIMARY KEY (name, id) - ) PARTITION BY RANGE (name)`, + ) PARTITION BY RANGE (name)`, `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT`, `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)`, - `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (id)`, + `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (name, id)`, `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < 'active'`, + `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)` `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = 'created' AND policy = 'short'`, `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = 'active' AND policy = 'singleton'`, `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= 'completed' AND singletonOn IS NULL`, `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, + `ALTER TABLE ${schema}.version ADD COLUMN monitored_on timestamp with time zone`, + `CREATE TABLE ${schema}.queue ( name text primary key, policy text, @@ -150,6 +161,7 @@ function getAll (schema) { `DROP INDEX ${schema}.job_throttle_on`, `DROP INDEX ${schema}.job_throttle_key`, `DROP INDEX ${schema}.job_fetch`, + `DROP INDEX ${schema}.job_name`, `ALTER TABLE ${schema}.job DETACH PARTITION ${schema}.job_default`, `DROP TABLE ${schema}.job`, `ALTER TABLE ${schema}.job_default RENAME TO job`, @@ -165,7 +177,7 @@ function getAll (schema) { `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','expired','cancelled','failed')`, `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, - `ALTER TABLE ${schema}.job ADD COLUMN onComplete bool`, + `ALTER TABLE ${schema}.job ADD COLUMN on_complete bool NOT NULL DEFAULT false`, `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, `ALTER TABLE ${schema}.archive DROP COLUMN policy`, `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, From 0e56699e9fc9bb706e04a87a0c0fd56e8e0bac4b Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 6 Oct 2023 12:56:20 -0500 Subject: [PATCH 35/36] linting --- test/readme.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/readme.js b/test/readme.js index fc2f28f2..7574af7e 100644 --- a/test/readme.js +++ b/test/readme.js @@ -12,7 +12,7 @@ async function readme () { try { await boss.createQueue(queue) - } catch{} + } catch {} await boss.schedule(queue, '* * * * *') From 4846edfb46f7f6c1bc542d683cd5d8e5662dc316 Mon Sep 17 00:00:00 2001 From: Tim Jones Date: Fri, 6 Oct 2023 15:54:10 -0500 Subject: [PATCH 36/36] migration updates --- src/migrationStore.js | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/migrationStore.js b/src/migrationStore.js index d38bb801..22d192c1 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -74,6 +74,7 @@ function getAll (schema) { `DROP INDEX ${schema}.job_singletonOn`, `DROP INDEX ${schema}.job_singletonKeyOn`, `DROP INDEX ${schema}.job_fetch`, + `DROP INDEX ${schema}.job_name`, `ALTER TABLE ${schema}.job ADD COLUMN deadletter text`, `ALTER TABLE ${schema}.job ADD COLUMN policy text`, @@ -98,7 +99,7 @@ function getAll (schema) { // set up job partitioning `ALTER TABLE ${schema}.job RENAME TO job_default`, - `ALTER TABLE ${schema}.job_default DROP CONSTRAINT job_pkey`, + `ALTER TABLE ${schema}.job_default DROP CONSTRAINT IF EXISTS job_pkey`, `CREATE TABLE ${schema}.job ( id uuid not null default gen_random_uuid(), @@ -122,7 +123,7 @@ function getAll (schema) { deadletter text, policy text, CONSTRAINT job_pkey PRIMARY KEY (name, id) - ) PARTITION BY RANGE (name)`, + ) PARTITION BY LIST (name)`, `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT`, @@ -132,16 +133,16 @@ function getAll (schema) { `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, - `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn) WHERE state < 'active'`, - `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)` + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn, id) WHERE state < 'active'`, + `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)`, `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = 'created' AND policy = 'short'`, `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = 'active' AND policy = 'singleton'`, `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= 'completed' AND singletonOn IS NULL`, `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, - + `ALTER TABLE ${schema}.version ADD COLUMN monitored_on timestamp with time zone`, - + `CREATE TABLE ${schema}.queue ( name text primary key, policy text, @@ -166,7 +167,6 @@ function getAll (schema) { `DROP TABLE ${schema}.job`, `ALTER TABLE ${schema}.job_default RENAME TO job`, `DROP TABLE IF EXISTS ${schema}.archive_backup`, - `DROP INDEX ${schema}.archive_archivedon_idx`, `DROP INDEX ${schema}.archive_name_idx`, `ALTER TABLE ${schema}.job DROP COLUMN deadletter`, `ALTER TABLE ${schema}.job DROP COLUMN policy`, @@ -182,6 +182,7 @@ function getAll (schema) { `ALTER TABLE ${schema}.archive DROP COLUMN policy`, `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, + `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)`, `CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < 'expired' AND singletonKey IS NULL`, `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`,