diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 81de1875..338438e4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: container: node:18 strategy: matrix: - node: [ 16, 18 ] + node: [ 18, 20 ] services: postgres: image: postgres diff --git a/README.md b/README.md index 51506c6a..2d6651e2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,5 @@ Queueing jobs in Node.js using PostgreSQL like a boss. -[![PostgreSql Version](https://img.shields.io/badge/PostgreSQL-11+-blue.svg?maxAge=2592000)](http://www.postgresql.org) [![npm version](https://badge.fury.io/js/pg-boss.svg)](https://badge.fury.io/js/pg-boss) [![Build](https://github.com/timgit/pg-boss/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/timgit/pg-boss/actions/workflows/ci.yml) [![Coverage Status](https://coveralls.io/repos/github/timgit/pg-boss/badge.svg?branch=master)](https://coveralls.io/github/timgit/pg-boss?branch=master) @@ -42,16 +41,16 @@ This will likely cater the most to teams already familiar with the simplicity of * Backpressure-compatible polling workers * Cron scheduling * Pub/sub API for fan-out queue relationships -* Deferral, retries (with exponential backoff), rate limiting, debouncing -* Completion jobs for orchestrations/sagas +* Priority, deferral, retries (with exponential backoff), rate limiting, debouncing * Direct table access for bulk loads via COPY or INSERT * Multi-master compatible (for example, in a Kubernetes ReplicaSet) +* Dead letter queues * Automatic creation and migration of storage tables * Automatic maintenance operations to manage table growth ## Requirements -* Node 16 or higher -* PostgreSQL 11 or higher +* Node 18 or higher +* PostgreSQL 12 or higher ## Installation diff --git a/docs/readme.md b/docs/readme.md index b7ceb607..258a5da4 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -26,20 +26,15 @@ - [`send(name, data, options)`](#sendname-data-options) - [`send(request)`](#sendrequest) - [`sendAfter(name, data, options, seconds | ISO date string | Date)`](#sendaftername-data-options-seconds--iso-date-string--date) - - [`sendOnce(name, data, options, key)`](#sendoncename-data-options-key) - - [`sendSingleton(name, data, options)`](#sendsingletonname-data-options) - [`sendThrottled(name, data, options, seconds [, key])`](#sendthrottledname-data-options-seconds--key) - [`sendDebounced(name, data, options, seconds [, key])`](#senddebouncedname-data-options-seconds--key) - [`insert([jobs])`](#insertjobs) - [`fetch()`](#fetch) - [`fetch(name)`](#fetchname) - [`fetch(name, batchSize, [, options])`](#fetchname-batchsize--options) - - [`fetchCompleted(name [, batchSize] [, options])`](#fetchcompletedname--batchsize--options) - [`work()`](#work) - [`work(name [, options], handler)`](#workname--options-handler) - - [`onComplete(name [, options], handler)`](#oncompletename--options-handler) - [`offWork(value)`](#offworkvalue) - - [`offComplete(value)`](#offcompletevalue) - [`publish(event, data, options)`](#publishevent-data-options) - [`subscribe(event, name)`](#subscribeevent-name) - [`unsubscribe(event, name)`](#unsubscribeevent-name) @@ -58,8 +53,8 @@ - [`notifyWorker(id)`](#notifyworkerid) - [`getQueueSize(name [, options])`](#getqueuesizename--options) - [`getJobById(id, options)`](#getjobbyidid-options) + - [`createQueue(name, type)`](#createqueuename-type) - [`deleteQueue(name)`](#deletequeuename) - - [`deleteAllQueues()`](#deleteallqueues) - [`clearStorage()`](#clearstorage) @@ -71,18 +66,14 @@ You may use as many instances as needed to connect to the same Postgres database If you require multiple installations in the same database, such as for large volume queues, you may wish to specify a separate schema per install to achieve partitioning. -Architecturally, pg-boss is somewhat similar to queue products such as AWS SQS, which primarily acts as a store of jobs that are "pulled", not "pushed" from the server. If at least one pg-boss instance is running, internal maintenance jobs will be periodically run to make sure fetched jobs that are never completed are marked as expired or retried (if configured). If and when this happens, think of a job with a retry configuration to act just like the SQS message visibility timeout. In regards to job delivery, Postgres [SKIP LOCKED](http://blog.2ndquadrant.com/what-is-select-skip-locked-for-in-postgresql-9-5) will guarantee exactly-once, which is only available in SQS via FIFO queues (and its throughput limitations). However, even if you have exactly-once delivery, this is not a guarantee that a job will never be processed more than once if you opt into retries, so keep the general recommendation for idempotency with queueing systems in mind. +Architecturally, pg-boss is somewhat similar to queue products such as AWS SQS, which primarily acts as a store of jobs that are "pulled", not "pushed" from the server. If at least one pg-boss instance is running, internal maintenance jobs will be periodically run to make sure fetched jobs that are never completed are moved into the retry or failed state (this is somewhat similar to the SQS message visibility timeout). [SKIP LOCKED](https://www.2ndquadrant.com/en/blog/what-is-select-skip-locked-for-in-postgresql-9-5) guarantees exactly-once delivery, which is only available in SQS via FIFO queues (with the caveat of their throughput limitations). Keep in mind that exactly-once delivery is not a guarantee that a job will never be processed more than once because of retries, so keep the general recommendation for idempotency with queueing systems in mind. ## Job states -All jobs start out in the `created` state and become `active` when picked up for work. If job processing completes successfully, jobs will go to `completed`. If a job fails, it will typcially enter the `failed` state. However, if a job has retry options configured, it will enter the `retry` state on failure instead and have a chance to re-enter `active` state. It's also possible for `active` jobs to become `expired`, which happens when job processing takes too long. Jobs can also enter `cancelled` state via [`cancel(id)`](#cancelid) or [`cancel([ids])`](#cancelids). +All jobs start out in the `created` state and become `active` when picked up for work. If job processing completes successfully, jobs will go to `completed`. If a job fails, it will typcially enter the `failed` state. However, if a job has retry options configured, it will enter the `retry` state on failure instead and have a chance to re-enter `active` state. Jobs can also enter `cancelled` state via [`cancel(id)`](#cancelid) or [`cancel([ids])`](#cancelids). All jobs that are `completed`, `expired`, `cancelled` or `failed` become eligible for archiving (i.e. they will transition into the `archive` state) after the configured `archiveCompletedAfterSeconds` time. Once `archive`d, jobs will be automatically deleted by pg-boss after the configured deletion period. -Here's a state diagram that shows the possible states and their transitions: - -![job state diagram](./images/job-states.png) - # Database install pg-boss can be installed into any database. When started, it will detect if it is installed and automatically create the required schema for all queue operations if needed. If the database doesn't already have the pgcrypto extension installed, you will need to have a superuser add it before pg-boss can create its schema. @@ -189,7 +180,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 40, "active": 26, "completed": 3400, - "expired": 4, "cancelled": 0, "failed": 49, "all": 4049 @@ -199,7 +189,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 0, "active": 0, "completed": 645, - "expired": 0, "cancelled": 0, "failed": 0, "all": 645 @@ -209,7 +198,6 @@ The payload of the event is an object with a key per queue and state, such as th "retry": 40, "active": 26, "completed": 4045, - "expired": 4, "cancelled": 0, "failed": 4, "all": 4694 @@ -333,10 +321,6 @@ The following options can be set as properties in an object for additional confi Queue options contain the following constructor-only settings. -* **uuid** - string, defaults to "v4" - - job uuid format used, "v1" or "v4" - * **archiveCompletedAfterSeconds** Specifies how long in seconds completed jobs get archived. Note: a warning will be emitted if set to lower than 60s and cron processing will be disabled. @@ -366,13 +350,17 @@ Queue options contain the following constructor-only settings. Maintenance operations include checking active jobs for expiration, archiving completed jobs from the primary job table, and deleting archived jobs from the archive table. -* **noSupervisor**, bool, default false +* **supervise**, bool, default true - If this is set to true, maintenance and monitoring operations will not be started during a `start()` after the schema is created. This is an advanced use case, as bypassing maintenance operations is not something you would want to do under normal circumstances. + If this is set to false, maintenance and monitoring operations will be disabled on this instance. This is an advanced use case, as bypassing maintenance operations is not something you would want to do under normal circumstances. -* **noScheduling**, bool, default false +* **schedule**, bool, default true - If this is set to true, this instance will not monitor scheduled jobs during `start()`. However, this instance can still use the scheduling api. This is an advanced use case you may want to do for testing or if the clock of the server is skewed and you would like to disable the skew warnings. + If this is set to false, this instance will not monitor or created scheduled jobs during. This is an advanced use case you may want to do for testing or if the clock of the server is skewed and you would like to disable the skew warnings. + +* **migrate**, bool, default true + + If this is set to false, this instance will skip attempts to run schema migratations during `start()`. If schema migrations exist, `start()` will throw and error and block usage. This is an advanced use case when the configured user account does not have schema mutation privileges. **Archive options** @@ -566,19 +554,6 @@ Available in constructor as a default, or overridden in send. boss.send('my-job', {}, {singletonKey: '123'}) // resolves a null jobId until first job completed ``` - This can be used in conjunction with throttling explained below. - - * **useSingletonQueue** boolean - - When used in conjunction with singletonKey, allows a max of 1 job to be queued. - - >By default, there is no limit on the number of these jobs that may be active. However, this behavior may be modified by passing the [enforceSingletonQueueActiveLimit](#fetch) option. - - ```js - boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a jobId - boss.send('my-job', {}, {singletonKey: '123', useSingletonQueue: true}) // resolves a null jobId until first job becomes active - ``` - **Throttled jobs** * **singletonSeconds**, int @@ -594,12 +569,11 @@ For example, if you set the `singletonMinutes` to 1, then submit 2 jobs within a Setting `singletonNextSlot` to true will cause the job to be scheduled to run after the current time slot if and when a job is throttled. This option is set to true, for example, when calling the convenience function `sendDebounced()`. -**Completion jobs** +**Dead Letter Queues** -* **onComplete**, bool (Default: false) - -When a job completes, a completion job will be created in the queue, copying the same retention policy as the job, for the purpose of `onComplete()` or `fetchCompleted()`. If completion jobs are not used, they will be archived according to the retention policy. If the queue in question has a very high volume, this can be set to `false` to bypass creating the completion job. This can also be set in the constructor as a default for all calls to `send()`. +* **deadLetter**, string +When a job fails after all retries, if a `deadLetter` property exists, the job's payload will be copied into that queue, copying the same retention and retry configuration as the original job. ```js @@ -649,17 +623,6 @@ Send a job that should start after a number of seconds from now, or after a spec This is a convenience version of `send()` with the `startAfter` option assigned. -### `sendOnce(name, data, options, key)` - -Send a job with a unique key to only allow 1 job to be in created, retry, or active state at a time. - -This is a convenience version of `send()` with the `singletonKey` option assigned. - -### `sendSingleton(name, data, options)` - -Send a job but only allow 1 job to be in created or retry state at at time. - -This is a convenience version of `send()` with the `singletonKey` option assigned. ### `sendThrottled(name, data, options, seconds [, key])` @@ -694,7 +657,7 @@ interface JobInsert { singletonKey?: string; expireInSeconds?: number; keepUntil?: Date | string; - onComplete?: boolean + deadLetter?: string; } ``` @@ -720,7 +683,11 @@ Typically one would use `work()` for automated polling for new jobs based upon a - `batchSize`: number, # of jobs to fetch - `options`: object - * `includeMetadata`, bool + * `priority`, bool, default: `true` + + If true, allow jobs with a higher priority to be fetched before jobs with lower or no priority + + * `includeMetadata`, bool, default: `false` If `true`, all job metadata will be returned on the job object. The following table shows each property and its type, which is basically all columns from the job table. @@ -743,14 +710,9 @@ Typically one would use `work()` for automated polling for new jobs based upon a | createdon | string, timestamp | | completedon | string, timestamp | | keepuntil | string, timestamp | - | oncomplete | bool | + | deadletter | string | | output | object | - * `enforceSingletonQueueActiveLimit`, bool - - If `true`, modifies the behavior of the `useSingletonQueue` flag to allow a max of 1 job to be queued plus a max of 1 job to be active. - >Note that use of this option can impact performance on instances with large numbers of jobs. - **Resolves** - `[job]`: array of job objects, `null` if none found @@ -784,9 +746,6 @@ for (let i = 0; i < jobs.length; i++) { } ``` -### `fetchCompleted(name [, batchSize] [, options])` - -Same as `fetch()`, but retrieves any completed jobs. See [`onComplete()`](#oncompletename--options-handler) for more information. ## `work()` Adds a new polling worker for a queue and executes the provided callback function when jobs are found. Multiple workers can be added if needed. @@ -826,13 +785,10 @@ The default concurrency for `work()` is 1 job every 2 seconds. Both the interval Same as in [`fetch()`](#fetch) -* **enforceSingletonQueueActiveLimit**, bool - - Same as in [`fetch()`](#fetch) **Polling options** -How often workers will poll the queue table for jobs. Available in the constructor as a default or per worker in `work()` and `onComplete()`. +How often workers will poll the queue table for jobs. Available in the constructor as a default or per worker in `work()`. * **newJobCheckInterval**, int @@ -878,57 +834,6 @@ await boss.work('email-welcome', { batchSize: 5 }, ) ``` -### `onComplete(name [, options], handler)` - -Sometimes when a job completes, expires or fails, it's important enough to trigger other things that should react to it. `onComplete` works identically to `work()` and was created to facilitate the creation of orchestrations or sagas between jobs that may or may not know about each other. This common messaging pattern allows you to keep multi-job flow logic out of the individual job handlers so you can manage things in a more centralized fashion while not losing your mind. As you most likely already know, asynchronous jobs are complicated enough already. Internally, these jobs have a special prefix of `__state__completed__`. - -The callback for `onComplete()` returns a job containing the original job and completion details. `request` will be the original job as submitted with `id`, `name` and `data`. `response` may or may not have a value based on arguments in [complete()](#completeid--data) or [fail()](#failid--data). - -Here's an example from the test suite showing this in action. - -```js -const jobName = 'onCompleteFtw' -const requestPayload = { token:'trivial' } -const responsePayload = { message: 'so verbose', code: '1234' } - -boss.onComplete(jobName, job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.token, requestPayload.token) - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.code, responsePayload.code) - - finished() // test suite completion callback -}) - -const jobId = await boss.send(jobName, requestPayload) -const job = await boss.fetch(jobName) -await boss.complete(job.id, responsePayload) -``` - -The following is an example data object from the job retrieved in onComplete() above. - -```js -{ - "request": { - "id": "26a608d0-79bf-11e8-8391-653981c16efd", - "name": "onCompleteFtw", - "data": { - "token": "trivial" - } - }, - "response": { - "message": "so verbose", - "code": "1234" - }, - "failed": false, - "state": "completed", - "createdOn": "2018-06-26T23:04:12.9392-05:00", - "startedOn": "2018-06-26T23:04:12.945533-05:00", - "completedOn": "2018-06-26T23:04:12.949092-05:00", - "retryCount": 0 -} -``` - ## `offWork(value)` Removes a worker by name or id and stops polling. @@ -938,10 +843,6 @@ Removes a worker by name or id and stops polling. If a string, removes all workers found matching the name. If an object, only the worker with a matching `id` will be removed. -### `offComplete(value)` - -Similar to `offWork()`, but removes an `onComplete()` worker. - ## `publish(event, data, options)` Publish an event with optional data and options (Same as `send()` args). Looks up all subscriptions for the event and sends jobs to all those queues. Returns an array of job ids. @@ -956,7 +857,7 @@ Remove the subscription of queue `name` to `event`. ## Scheduling -Jobs may be sent automatically based on a cron expression. As with other cron-based systems, at least one instance needs to be running for scheduling to work. In order to reduce the amount of evaluations, schedules are checked every 30 seconds, which means the 6-placeholder format should be discouraged in favor of the minute-level precision 5-placeholder format. +Jobs may be created automatically based on a cron expression. As with other cron-based systems, at least one instance needs to be running for scheduling to work. In order to reduce the amount of evaluations, schedules are checked every 30 seconds, which means the 6-placeholder format should be discouraged in favor of the minute-level precision 5-placeholder format. For example, use this format, which implies "any second during 3:30 am every day" @@ -976,7 +877,7 @@ If needed, the default clock monitoring interval can be adjusted using `clockMon ```js { - noScheduling: true + schedule: false } ``` @@ -1033,7 +934,7 @@ Resumes a set of cancelled jobs. ## `complete(id [, data, options])` -Completes an active job. This would likely only be used with `fetch()`. Accepts an optional `data` argument for usage with [`onComplete()`](#oncompletename--options-handler) state-based workers or `fetchCompleted()`. +Completes an active job. This would likely only be used with `fetch()`. Accepts an optional `data` argument. The promise will resolve on a successful completion, or reject if the job could not be completed. @@ -1085,13 +986,21 @@ As an example, the following options object include active jobs along with creat Retrieves a job with all metadata by id in either the primary or archive storage. -## `deleteQueue(name)` +## `createQueue(name, type)` -Deletes all pending jobs in the specified queue from the active job table. All jobs in the archive table are retained. +Creates a typed queue. This is an optional step in order to use unique constraints to limit how many jobs can exist in each state. -## `deleteAllQueues()` +Allowed type values: + +| type | description | +| - | - | +| debounced | Allows only 1 job to be queued, unlimited active | +| singleton | Allows only 1 job to be active, unlimited queued | +| stately | Combination of the above: Allow 1 job to be queued. Allow 1 job to be active | + +## `deleteQueue(name)` -Deletes all pending jobs from all queues in the active job table. All jobs in the archive table are retained. +Deletes a queue and all jobs from the active job table. All jobs in the archive table are retained. ## `clearStorage()` diff --git a/package-lock.json b/package-lock.json index 878a73fd..c3db11a6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,21 +1,19 @@ { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "license": "MIT", "dependencies": { "cron-parser": "^4.0.0", - "delay": "^5.0.0", "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", - "serialize-error": "^8.1.0", - "uuid": "^9.0.0" + "serialize-error": "^8.1.0" }, "devDependencies": { "@types/node": "^20.3.3", @@ -51,47 +49,119 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz", - "integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "dependencies": { - "@babel/highlight": "^7.22.5" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz", - "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==", + "version": "7.22.9", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.9.tgz", + "integrity": "sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz", - "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.17.tgz", + "integrity": "sha512-2EENLmhpwplDux5PSsZnSbnSkB3tZ6QTksgO25xwEL7pIDcNOMhF5v/s6RzwjMZzZzw9Ofc30gHv5ChCC8pifQ==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.5", - "@babel/helper-module-transforms": "^7.22.5", - "@babel/helpers": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.22.15", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-module-transforms": "^7.22.17", + "@babel/helpers": "^7.22.15", + "@babel/parser": "^7.22.16", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.22.17", + "@babel/types": "^7.22.17", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" + "json5": "^2.2.3", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -102,12 +172,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz", - "integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.15.tgz", + "integrity": "sha512-Zu9oWARBqeVOW0dZOjXc3JObrzuqothQ3y/n1kUtrjCoCPLkXUwMvOo/F/TCfoHMbWIFlWwpZtkZVb9ga4U2pA==", "dev": true, "dependencies": { - "@babel/types": "^7.22.5", + "@babel/types": "^7.22.15", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -117,22 +187,19 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz", - "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", + "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.22.5", - "@babel/helper-validator-option": "^7.22.5", - "browserslist": "^4.21.3", + "@babel/compat-data": "^7.22.9", + "@babel/helper-validator-option": "^7.22.15", + "browserslist": "^4.21.9", "lru-cache": "^5.1.1", - "semver": "^6.3.0" + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-environment-visitor": { @@ -170,34 +237,34 @@ } }, "node_modules/@babel/helper-module-imports": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz", - "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", "dev": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz", - "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.17.tgz", + "integrity": "sha512-XouDDhQESrLHTpnBtCKExJdyY4gJCdrvH2Pyv8r8kovX2U8G0dRUOT45T9XlbLtuu9CLXP15eusnkprhoPV5iQ==", "dev": true, "dependencies": { "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-module-imports": "^7.22.15", "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.15" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-simple-access": { @@ -213,9 +280,9 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz", - "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "dependencies": { "@babel/types": "^7.22.5" @@ -234,45 +301,45 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.15.tgz", + "integrity": "sha512-4E/F9IIEi8WR94324mbDUMo074YTheJmd7eZF5vITTeYchqAi6sYXRLHUVsmkdmY4QjfKTcB2jB7dVP3NaBElQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz", - "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", + "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz", - "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.15.tgz", + "integrity": "sha512-7pAjK0aSdxOwR+CcYAqgWOGy5dcfvzsTIfFTb2odQqW47MDfv14UaJDY6eng8ylM2EaeKXdxaSWESbkmaQHTmw==", "dev": true, "dependencies": { - "@babel/template": "^7.22.5", - "@babel/traverse": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz", - "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.13.tgz", + "integrity": "sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==", "dev": true, "dependencies": { "@babel/helper-validator-identifier": "^7.22.5", - "chalk": "^2.0.0", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -351,9 +418,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz", - "integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==", + "version": "7.22.16", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.16.tgz", + "integrity": "sha512-+gPfKv8UWeKKeJTUxe59+OobVcrYHETCsORl61EmSkmgymguYk/X5bp7GuUIXaFsc6y++v8ZxPsLSSuujqDphA==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -363,33 +430,33 @@ } }, "node_modules/@babel/template": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", - "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz", - "integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.17.tgz", + "integrity": "sha512-xK4Uwm0JnAMvxYZxOVecss85WxTEIbTa7bnGyf/+EgCL5Zt3U7htUpEOWv9detPlamGKuRzCqw74xVglDWpPdg==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.22.15", "@babel/helper-environment-visitor": "^7.22.5", "@babel/helper-function-name": "^7.22.5", "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.22.16", + "@babel/types": "^7.22.17", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -398,13 +465,13 @@ } }, "node_modules/@babel/types": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz", - "integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==", + "version": "7.22.17", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.17.tgz", + "integrity": "sha512-YSQPHLFtQNE5xN9tHuZnzu8vPr61wVTBZdfv1meex1NBosa4iT05k/Jw06ddJugi4bk7The/oSwQGFcksmEJQg==", "dev": true, "dependencies": { "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.15", "to-fast-properties": "^2.0.0" }, "engines": { @@ -427,18 +494,18 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.1.tgz", - "integrity": "sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==", + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.8.0.tgz", + "integrity": "sha512-JylOEEzDiOryeUnFbQz+oViCXS0KsvR1mvHkoMiu5+UiBvy+RYX7tzlIIIEstF/gVa2tj9AQXk3dgnxv6KxhFg==", "dev": true, "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, "node_modules/@eslint/eslintrc": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.0.tgz", - "integrity": "sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.2.tgz", + "integrity": "sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==", "dev": true, "dependencies": { "ajv": "^6.12.4", @@ -469,9 +536,9 @@ } }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "version": "13.21.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.21.0.tgz", + "integrity": "sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -508,18 +575,18 @@ } }, "node_modules/@eslint/js": { - "version": "8.44.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.44.0.tgz", - "integrity": "sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.49.0.tgz", + "integrity": "sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.10", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz", - "integrity": "sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==", + "version": "0.11.11", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.11.tgz", + "integrity": "sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==", "dev": true, "dependencies": { "@humanwhocodes/object-schema": "^1.2.1", @@ -685,9 +752,9 @@ } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", "dev": true, "engines": { "node": ">=6.0.0" @@ -709,21 +776,15 @@ "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.18", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", - "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", "dev": true, "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", - "dev": true - }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", @@ -766,15 +827,15 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.3.3", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.3.3.tgz", - "integrity": "sha512-wheIYdr4NYML61AjC8MKj/2jrR/kDQri/CIpVoZwldwhnIrD/j9jIU5bJ8yBKuB2VhpFV7Ab6G2XkBjv9r9Zzw==", + "version": "20.6.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.6.0.tgz", + "integrity": "sha512-najjVq5KN2vsH2U/xyh2opaSEz6cZMR2SetLIlxlj08nOcmPOemJmUK2o4kUzfLqfrWE0PIrNeE16XhYDd3nqg==", "dev": true }, "node_modules/acorn": { - "version": "8.9.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.9.0.tgz", - "integrity": "sha512-jaVNAFBHNLXspO543WnNNPZFRtavh3skAkITqD0/2aeMkKZTN+254PyhwxFYrk3vQ1xfY+2wbesJMs/JC8/PwQ==", + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -904,15 +965,15 @@ } }, "node_modules/array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.7.tgz", + "integrity": "sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", "is-string": "^1.0.7" }, "engines": { @@ -922,15 +983,34 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.3.tgz", + "integrity": "sha512-LzLoiOMAxvy+Gd3BAq3B7VeIgPdo+Q8hthvKtXybMvRV0jrXfJM/t8mw7nNlpEcVlVUnCnM2KSX4XU5HmpodOA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -941,14 +1021,14 @@ } }, "node_modules/array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -959,16 +1039,46 @@ } }, "node_modules/array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.2.tgz", + "integrity": "sha512-HuQCHOlk1Weat5jzStICBCd83NxiIMwqDg/dHEsoefabn/hJRj5pVdWcPUSpRrwhwxZOsQassMpgN/xRYFBMIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" + "get-intrinsic": "^1.2.1" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.2.tgz", + "integrity": "sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "is-array-buffer": "^3.0.2", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/asynciterator.prototype": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz", + "integrity": "sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg==", + "dev": true, + "dependencies": { + "has-symbols": "^1.0.3" } }, "node_modules/available-typed-arrays": { @@ -1026,9 +1136,9 @@ "dev": true }, "node_modules/browserslist": { - "version": "4.21.9", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.9.tgz", - "integrity": "sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==", + "version": "4.21.10", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz", + "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==", "dev": true, "funding": [ { @@ -1045,9 +1155,9 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001503", - "electron-to-chromium": "^1.4.431", - "node-releases": "^2.0.12", + "caniuse-lite": "^1.0.30001517", + "electron-to-chromium": "^1.4.477", + "node-releases": "^2.0.13", "update-browserslist-db": "^1.0.11" }, "bin": { @@ -1087,9 +1197,9 @@ } }, "node_modules/builtins/node_modules/semver": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz", - "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1154,9 +1264,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001510", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001510.tgz", - "integrity": "sha512-z35lD6xjHklPNgjW4P68R30Er5OCIZE0C1bUf8IMXSh34WJYXfIy+GxIEraQXYJ2dvTU8TumjYAeLrPhpMlsuw==", + "version": "1.0.30001532", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001532.tgz", + "integrity": "sha512-FbDFnNat3nMnrROzqrsg314zhqN5LGQ1kyyMk2opcrwGbVGpHRhgCWtAgD5YJUqNAiQ+dklreil/c3Qf1dfCTw==", "dev": true, "funding": [ { @@ -1284,9 +1394,9 @@ "dev": true }, "node_modules/cron-parser": { - "version": "4.8.1", - "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.8.1.tgz", - "integrity": "sha512-jbokKWGcyU4gl6jAfX97E1gDpY12DJ1cLJZmoDzaAln/shZ+S3KBFBuA2Q6WeUN4gJf/8klnV1EfvhA2lK5IRQ==", + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz", + "integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==", "dependencies": { "luxon": "^3.2.1" }, @@ -1377,17 +1487,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/delay": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz", - "integrity": "sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/diff": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", @@ -1410,9 +1509,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.447", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.447.tgz", - "integrity": "sha512-sxX0LXh+uL41hSJsujAN86PjhrV/6c79XmpY0TvjZStV6VxIgarf8SRkUoUTuYmFcZQTemsoqo8qXOGw5npWfw==", + "version": "1.4.513", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.513.tgz", + "integrity": "sha512-cOB0xcInjm+E5qIssHeXJ29BaUyWpMyFKT5RB3bsLENDheCja0wMkHJyiPl0NBE/VzDI7JDuNEQWhe6RitEUcw==", "dev": true }, "node_modules/emoji-regex": { @@ -1431,18 +1530,19 @@ } }, "node_modules/es-abstract": { - "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", + "version": "1.22.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.1.tgz", + "integrity": "sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw==", "dev": true, "dependencies": { "array-buffer-byte-length": "^1.0.0", + "arraybuffer.prototype.slice": "^1.0.1", "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", "es-set-tostringtag": "^2.0.1", "es-to-primitive": "^1.2.1", "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.2.0", + "get-intrinsic": "^1.2.1", "get-symbol-description": "^1.0.0", "globalthis": "^1.0.3", "gopd": "^1.0.1", @@ -1462,14 +1562,18 @@ "object-inspect": "^1.12.3", "object-keys": "^1.1.1", "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", + "regexp.prototype.flags": "^1.5.0", + "safe-array-concat": "^1.0.0", "safe-regex-test": "^1.0.0", "string.prototype.trim": "^1.2.7", "string.prototype.trimend": "^1.0.6", "string.prototype.trimstart": "^1.0.6", + "typed-array-buffer": "^1.0.0", + "typed-array-byte-length": "^1.0.0", + "typed-array-byte-offset": "^1.0.0", "typed-array-length": "^1.0.4", "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" + "which-typed-array": "^1.1.10" }, "engines": { "node": ">= 0.4" @@ -1478,6 +1582,28 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/es-iterator-helpers": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.14.tgz", + "integrity": "sha512-JgtVnwiuoRuzLvqelrvN3Xu7H9bu2ap/kQ2CrM62iidP8SKuD99rWU3CJy++s7IVL2qb/AjXPGR/E7i9ngd/Cw==", + "dev": true, + "dependencies": { + "asynciterator.prototype": "^1.0.0", + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-set-tostringtag": "^2.0.1", + "function-bind": "^1.1.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.0", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.5", + "iterator.prototype": "^1.1.0", + "safe-array-concat": "^1.0.0" + } + }, "node_modules/es-set-tostringtag": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", @@ -1546,27 +1672,27 @@ } }, "node_modules/eslint": { - "version": "8.44.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.44.0.tgz", - "integrity": "sha512-0wpHoUbDUHgNCyvFB5aXLiQVfK9B0at6gUvzy83k4kAsQ/u769TQDX6iKC+aO4upIHO9WSaA3QoXYQDHbNwf1A==", + "version": "8.49.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.49.0.tgz", + "integrity": "sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.1.0", - "@eslint/js": "8.44.0", - "@humanwhocodes/config-array": "^0.11.10", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.2", + "@eslint/js": "8.49.0", + "@humanwhocodes/config-array": "^0.11.11", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", + "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.0", - "eslint-visitor-keys": "^3.4.1", - "espree": "^9.6.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -1576,7 +1702,6 @@ "globals": "^13.19.0", "graphemer": "^1.4.0", "ignore": "^5.2.0", - "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", @@ -1588,7 +1713,6 @@ "natural-compare": "^1.4.0", "optionator": "^0.9.3", "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" }, "bin": { @@ -1655,14 +1779,14 @@ } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", "dev": true, "dependencies": { "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { @@ -1744,26 +1868,28 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", + "version": "2.28.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.28.1.tgz", + "integrity": "sha512-9I9hFlITvOV55alzoKBI+K9q74kv0iKMeY6av5+umsNwayt59fz692daGyjR+oStBQgx6nwR9rXldDev3Clw+A==", "dev": true, "dependencies": { "array-includes": "^3.1.6", + "array.prototype.findlastindex": "^1.2.2", "array.prototype.flat": "^1.3.1", "array.prototype.flatmap": "^1.3.1", "debug": "^3.2.7", "doctrine": "^2.1.0", "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", + "eslint-module-utils": "^2.8.0", "has": "^1.0.3", - "is-core-module": "^2.11.0", + "is-core-module": "^2.13.0", "is-glob": "^4.0.3", "minimatch": "^3.1.2", + "object.fromentries": "^2.0.6", + "object.groupby": "^1.0.0", "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" + "semver": "^6.3.1", + "tsconfig-paths": "^3.14.2" }, "engines": { "node": ">=4" @@ -1875,9 +2001,9 @@ } }, "node_modules/eslint-plugin-n/node_modules/semver": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.3.tgz", - "integrity": "sha512-QBlUtyVk/5EeHbi7X0fw6liDZc7BBmEaSYn01fMU1OUYbf6GPsbTtd8WmnqbI20SeycoHSeiybkE/q1Q+qlThQ==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -1908,15 +2034,16 @@ } }, "node_modules/eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", + "version": "7.33.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz", + "integrity": "sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==", "dev": true, "dependencies": { "array-includes": "^3.1.6", "array.prototype.flatmap": "^1.3.1", "array.prototype.tosorted": "^1.1.1", "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.12", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", @@ -1926,7 +2053,7 @@ "object.values": "^1.1.6", "prop-types": "^15.8.1", "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", + "semver": "^6.3.1", "string.prototype.matchall": "^4.0.8" }, "engines": { @@ -1988,9 +2115,9 @@ } }, "node_modules/eslint-scope": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.0.tgz", - "integrity": "sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -2031,9 +2158,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz", - "integrity": "sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -2065,9 +2192,9 @@ } }, "node_modules/eslint/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "version": "13.21.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.21.0.tgz", + "integrity": "sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -2104,9 +2231,9 @@ } }, "node_modules/espree": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.0.tgz", - "integrity": "sha512-1FH/IiruXZ84tpUlm0aCUEwMl2Ho5ilqVh0VvQXw+byAz/4SAciyHLlfmL5WYqsvD38oymdUwBss0LtK8m4s/A==", + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, "dependencies": { "acorn": "^8.9.0", @@ -2269,16 +2396,17 @@ } }, "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.1.0.tgz", + "integrity": "sha512-OHx4Qwrrt0E4jEIcI5/Xb+f+QmJYNj2rrK8wiIdQOIrB9WrrJL8cjZvXdXuBTkkEwEqLycb5BeZDV1o2i9bTew==", "dev": true, "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.7", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { - "node": "^10.12.0 || >=12.0.0" + "node": ">=12.0.0" } }, "node_modules/flatted": { @@ -2336,9 +2464,9 @@ "dev": true }, "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "dev": true, "hasInstallScript": true, "optional": true, @@ -2356,15 +2484,15 @@ "dev": true }, "node_modules/function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" }, "engines": { "node": ">= 0.4" @@ -2767,6 +2895,21 @@ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", "dev": true }, + "node_modules/is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-bigint": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", @@ -2820,9 +2963,9 @@ } }, "node_modules/is-core-module": { - "version": "2.12.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", - "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", + "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", "dev": true, "dependencies": { "has": "^1.0.3" @@ -2855,6 +2998,18 @@ "node": ">=0.10.0" } }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -2864,6 +3019,21 @@ "node": ">=8" } }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -2876,6 +3046,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-map": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", + "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-negative-zero": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", @@ -2946,6 +3125,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-set": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", + "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-shared-array-buffer": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", @@ -3001,16 +3189,12 @@ } }, "node_modules/is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.12.tgz", + "integrity": "sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" + "which-typed-array": "^1.1.11" }, "engines": { "node": ">= 0.4" @@ -3037,6 +3221,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/is-weakmap": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", + "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-weakref": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", @@ -3049,6 +3242,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-weakset": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", + "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-windows": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", @@ -3058,6 +3264,12 @@ "node": ">=0.10.0" } }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -3129,27 +3341,60 @@ "node": ">=8" } }, - "node_modules/istanbul-lib-processinfo/node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", - "dev": true, - "bin": { - "uuid": "dist/bin/uuid" - } - }, "node_modules/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", "dev": true, "dependencies": { "istanbul-lib-coverage": "^3.0.0", - "make-dir": "^3.0.0", + "make-dir": "^4.0.0", "supports-color": "^7.1.0" }, "engines": { - "node": ">=8" + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/istanbul-lib-report/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/istanbul-lib-report/node_modules/supports-color": { @@ -3164,6 +3409,12 @@ "node": ">=8" } }, + "node_modules/istanbul-lib-report/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, "node_modules/istanbul-lib-source-maps": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", @@ -3179,9 +3430,9 @@ } }, "node_modules/istanbul-reports": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.5.tgz", - "integrity": "sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", + "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", "dev": true, "dependencies": { "html-escaper": "^2.0.0", @@ -3191,6 +3442,18 @@ "node": ">=8" } }, + "node_modules/iterator.prototype": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.1.tgz", + "integrity": "sha512-9E+nePc8C9cnQldmNl6bgpTY6zI4OPRZd97fhJ/iVZ1GifIUDVV5F6x1nEDqpe8KaMEZGT4xgrwKQDxXnjOIZQ==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.0", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.3" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -3221,6 +3484,12 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", @@ -3252,9 +3521,9 @@ } }, "node_modules/jsx-ast-utils": { - "version": "3.3.4", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.4.tgz", - "integrity": "sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==", + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "dependencies": { "array-includes": "^3.1.6", @@ -3266,6 +3535,15 @@ "node": ">=4.0" } }, + "node_modules/keyv": { + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.3.tgz", + "integrity": "sha512-QCiSav9WaX1PgETJ+SpNnx2PRRapJ/oRSXM4VO5OGYGSjrxbKPVFVhB3l2OCbLCk329N8qyAtsJjSjvVBWzEug==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -3383,9 +3661,9 @@ } }, "node_modules/luxon": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.3.0.tgz", - "integrity": "sha512-An0UCfG/rSiqtAIiBPO0Y9/zAnHUZxAMiCpTd5h2smgsj7GGmcenvrvww2cqNA8/4A5ZrD1gJpHN2mIHZQF+Mg==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.4.3.tgz", + "integrity": "sha512-tFWBiv3h7z+T/tDaoxA8rqTxy1CHV6gHS//QdaH4pulbq/JuBSGgQspQQqcgnwdAx6pNI7cmvz5Sv/addzHmUg==", "engines": { "node": ">=12" } @@ -3503,9 +3781,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.12", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", - "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==", + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", "dev": true }, "node_modules/normalize-path": { @@ -3734,28 +4012,28 @@ } }, "node_modules/object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.7.tgz", + "integrity": "sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" } }, "node_modules/object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.7.tgz", + "integrity": "sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -3764,28 +4042,40 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/object.groupby": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.1.tgz", + "integrity": "sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1" + } + }, "node_modules/object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.3.tgz", + "integrity": "sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==", "dev": true, "dependencies": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.7.tgz", + "integrity": "sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -3952,13 +4242,13 @@ "dev": true }, "node_modules/pg": { - "version": "8.11.1", - "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.1.tgz", - "integrity": "sha512-utdq2obft07MxaDg0zBJI+l/M3mBRfIpEN3iSemsz0G5F2/VXx+XzqF4oxrbIZXQxt2AZzIUzyVg/YM6xOP/WQ==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz", + "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==", "dependencies": { "buffer-writer": "2.0.0", "packet-reader": "1.0.0", - "pg-connection-string": "^2.6.1", + "pg-connection-string": "^2.6.2", "pg-pool": "^3.6.1", "pg-protocol": "^1.6.0", "pg-types": "^2.1.0", @@ -3986,9 +4276,9 @@ "optional": true }, "node_modules/pg-connection-string": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.1.tgz", - "integrity": "sha512-w6ZzNu6oMmIzEAYVw+RLK0+nqHPt8K3ZnknKi+g48Ak2pr3dtljJW3o+D/n2zzCG07Zoe9VOX3aiKpj+BN0pjg==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.2.tgz", + "integrity": "sha512-ch6OwaeaPYcova4kKZ15sbJ2hKb/VP48ZD2gE7i1J+L4MspCtBMAx8nMgz7bksc7IojCIIWuEhHibSMFH8m8oA==" }, "node_modules/pg-int8": { "version": "1.0.1", @@ -4322,6 +4612,26 @@ "node": ">=8.10.0" } }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.4.tgz", + "integrity": "sha512-ECkTw8TmJwW60lOTR+ZkODISW6RQ8+2CL3COqtiJKLd6MmB45hN51HprHFziKLGkAuTGQhBb91V8cy+KHlaCjw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/regexp.prototype.flags": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.0.tgz", @@ -4379,12 +4689,12 @@ "dev": true }, "node_modules/resolve": { - "version": "1.22.2", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", - "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", + "version": "1.22.4", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.4.tgz", + "integrity": "sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==", "dev": true, "dependencies": { - "is-core-module": "^2.11.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -4452,6 +4762,24 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-array-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.0.1.tgz", + "integrity": "sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -4487,9 +4815,9 @@ } }, "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "bin": { "semver": "bin/semver.js" @@ -4697,18 +5025,18 @@ } }, "node_modules/string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.9.tgz", + "integrity": "sha512-6i5hL3MqG/K2G43mWXWgP+qizFW/QH/7kCNN13JrJS5q48FN5IKksLDscexKP3dnmB6cdm9jlNgAsWNLpSykmA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "get-intrinsic": "^1.2.1", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", + "internal-slot": "^1.0.5", + "regexp.prototype.flags": "^1.5.0", "side-channel": "^1.0.4" }, "funding": { @@ -4716,14 +5044,14 @@ } }, "node_modules/string.prototype.trim": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz", + "integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "engines": { "node": ">= 0.4" @@ -4733,28 +5061,28 @@ } }, "node_modules/string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz", + "integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz", + "integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -4937,6 +5265,57 @@ "node": ">=8" } }, + "node_modules/typed-array-buffer": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz", + "integrity": "sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz", + "integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz", + "integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==", + "dev": true, + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "has-proto": "^1.0.1", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/typed-array-length": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", @@ -5015,9 +5394,10 @@ } }, "node_modules/uuid": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", - "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==", + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, "bin": { "uuid": "dist/bin/uuid" } @@ -5062,6 +5442,47 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", + "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", + "dev": true, + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", + "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "dev": true, + "dependencies": { + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-weakmap": "^2.0.1", + "is-weakset": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/which-module": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.1.tgz", @@ -5069,17 +5490,16 @@ "dev": true }, "node_modules/which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.11.tgz", + "integrity": "sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.5", "call-bind": "^1.0.2", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.0" }, "engines": { "node": ">= 0.4" diff --git a/package.json b/package.json index f75611ed..0ebe9863 100644 --- a/package.json +++ b/package.json @@ -1,19 +1,17 @@ { "name": "pg-boss", - "version": "9.0.3", + "version": "10.0.0", "description": "Queueing jobs in Node.js using PostgreSQL like a boss", "main": "./src/index.js", "engines": { - "node": ">=16" + "node": ">=18" }, "dependencies": { "cron-parser": "^4.0.0", - "delay": "^5.0.0", "lodash.debounce": "^4.0.8", "p-map": "^4.0.0", "pg": "^8.5.1", - "serialize-error": "^8.1.0", - "uuid": "^9.0.0" + "serialize-error": "^8.1.0" }, "devDependencies": { "@types/node": "^20.3.3", @@ -25,9 +23,6 @@ "scripts": { "test": "standard && mocha", "cover": "nyc npm test", - "export-schema": "node ./scripts/construct.js", - "export-migration": "node ./scripts/migrate.js", - "export-rollback": "node ./scripts/rollback.js", "tsc": "tsc --noEmit types.d.ts", "readme": "node ./test/readme.js" }, diff --git a/src/attorney.js b/src/attorney.js index 2a6fae4d..43cb5ac4 100644 --- a/src/attorney.js +++ b/src/attorney.js @@ -1,20 +1,20 @@ const assert = require('assert') -const { DEFAULT_SCHEMA, SINGLETON_QUEUE_KEY } = require('./plans') +const { DEFAULT_SCHEMA } = require('./plans') module.exports = { getConfig, checkSendArgs, - checkInsertArgs, + checkQueueArgs, checkWorkArgs, checkFetchArgs, - warnClockSkew + warnClockSkew, + queueNameHasPatternMatch, + assertPostgresObjectName } +const MAX_INTERVAL_HOURS = 24 + const WARNINGS = { - EXPIRE_IN_REMOVED: { - message: '\'expireIn\' option detected. This option has been removed. Use expireInSeconds, expireInMinutes or expireInHours.', - code: 'pg-boss-w01' - }, CLOCK_SKEW: { message: 'Timekeeper detected clock skew between this instance and the database server. This will not affect scheduling operations, but this warning is shown any time the skew exceeds 60 seconds.', code: 'pg-boss-w02' @@ -22,9 +22,25 @@ const WARNINGS = { CRON_DISABLED: { message: 'Archive interval is set less than 60s. Cron processing is disabled.', code: 'pg-boss-w03' + }, + ON_COMPLETE_REMOVED: { + message: '\'onComplete\' option detected. This option has been removed. Consider deadLetter if needed.', + code: 'pg-boss-w04' } } +function checkQueueArgs (name, options = {}) { + assertPostgresObjectName(name) + + assert(!('deadLetter' in options) || (typeof options.deadLetter === 'string'), 'deadLetter must be a string') + + applyRetryConfig(options) + applyExpirationConfig(options) + applyRetentionConfig(options) + + return options +} + function checkSendArgs (args, defaults) { let name, data, options @@ -57,11 +73,11 @@ function checkSendArgs (args, defaults) { assert(!('priority' in options) || (Number.isInteger(options.priority)), 'priority must be an integer') options.priority = options.priority || 0 + assert(!('deadLetter' in options) || (typeof options.deadLetter === 'string'), 'deadLetter must be a string') + applyRetryConfig(options, defaults) applyExpirationConfig(options, defaults) applyRetentionConfig(options, defaults) - applyCompletionConfig(options, defaults) - applySingletonKeyConfig(options) const { startAfter, singletonSeconds, singletonMinutes, singletonHours } = options @@ -83,23 +99,11 @@ function checkSendArgs (args, defaults) { assert(!singletonSeconds || singletonSeconds <= defaults.archiveSeconds, `throttling interval ${singletonSeconds}s cannot exceed archive interval ${defaults.archiveSeconds}s`) - return { name, data, options } -} - -function checkInsertArgs (jobs) { - assert(Array.isArray(jobs), `jobs argument should be an array. Received '${typeof jobs}'`) - return jobs.map(job => { - job = { ...job } - applySingletonKeyConfig(job) - return job - }) -} - -function applySingletonKeyConfig (options) { - if (options.singletonKey && options.useSingletonQueue && options.singletonKey !== SINGLETON_QUEUE_KEY) { - options.singletonKey = SINGLETON_QUEUE_KEY + options.singletonKey + if (options.onComplete) { + emitWarning(WARNINGS.ON_COMPLETE_REMOVED) } - delete options.useSingletonQueue + + return { name, data, options } } function checkWorkArgs (name, args, defaults) { @@ -129,7 +133,6 @@ function checkWorkArgs (name, args, defaults) { assert(!('teamSize' in options) || (Number.isInteger(options.teamSize) && options.teamSize >= 1), 'teamSize must be an integer > 0') assert(!('batchSize' in options) || (Number.isInteger(options.batchSize) && options.batchSize >= 1), 'batchSize must be an integer > 0') assert(!('includeMetadata' in options) || typeof options.includeMetadata === 'boolean', 'includeMetadata must be a boolean') - assert(!('enforceSingletonQueueActiveLimit' in options) || typeof options.enforceSingletonQueueActiveLimit === 'boolean', 'enforceSingletonQueueActiveLimit must be a boolean') return { options, callback } } @@ -137,11 +140,12 @@ function checkWorkArgs (name, args, defaults) { function checkFetchArgs (name, batchSize, options) { assert(name, 'missing queue name') - name = sanitizeQueueNameForFetch(name) + if (queueNameHasPatternMatch(name)) { + name = sanitizeQueueNameForFetch(name) + } assert(!batchSize || (Number.isInteger(batchSize) && batchSize >= 1), 'batchSize must be an integer > 0') assert(!('includeMetadata' in options) || typeof options.includeMetadata === 'boolean', 'includeMetadata must be a boolean') - assert(!('enforceSingletonQueueActiveLimit' in options) || typeof options.enforceSingletonQueueActiveLimit === 'boolean', 'enforceSingletonQueueActiveLimit must be a boolean') return { name } } @@ -150,6 +154,10 @@ function sanitizeQueueNameForFetch (name) { return name.replace(/[%_*]/g, match => match === '*' ? '%' : '\\' + match) } +function queueNameHasPatternMatch (name) { + return name.includes('*') +} + function getConfig (value) { assert(value && (typeof value === 'object' || typeof value === 'string'), 'configuration assert: string or config object is required to connect to postgres') @@ -158,32 +166,35 @@ function getConfig (value) { ? { connectionString: value } : { ...value } - applyDatabaseConfig(config) + applySchemaConfig(config) applyMaintenanceConfig(config) applyArchiveConfig(config) applyArchiveFailedConfig(config) applyDeleteConfig(config) applyMonitoringConfig(config) - applyUuidConfig(config) applyNewJobCheckInterval(config) applyExpirationConfig(config) applyRetentionConfig(config) - applyCompletionConfig(config) return config } -function applyDatabaseConfig (config) { +function applySchemaConfig (config) { if (config.schema) { - assert(typeof config.schema === 'string', 'configuration assert: schema must be a string') - assert(config.schema.length <= 50, 'configuration assert: schema name cannot exceed 50 characters') - assert(!/\W/.test(config.schema), `configuration assert: ${config.schema} cannot be used as a schema. Only alphanumeric characters and underscores are allowed`) + assertPostgresObjectName(config.schema) } config.schema = config.schema || DEFAULT_SCHEMA } +function assertPostgresObjectName (name) { + assert(typeof name === 'string', 'Name must be a string') + assert(name.length <= 50, 'Name cannot exceed 50 characters') + assert(!/\W/.test(name), 'Name can only contain alphanumeric characters and underscores') + assert(!/^d/.test(name), 'Name cannot start with a number') +} + function applyArchiveConfig (config) { const ARCHIVE_DEFAULT = 60 * 60 * 12 @@ -211,18 +222,7 @@ function applyArchiveFailedConfig (config) { } } -function applyCompletionConfig (config, defaults) { - assert(!('onComplete' in config) || config.onComplete === true || config.onComplete === false, - 'configuration assert: onComplete must be either true or false') - - if (!('onComplete' in config)) { - config.onComplete = defaults - ? defaults.onComplete - : false - } -} - -function applyRetentionConfig (config, defaults) { +function applyRetentionConfig (config, defaults = {}) { assert(!('retentionSeconds' in config) || config.retentionSeconds >= 1, 'configuration assert: retentionSeconds must be at least every second') @@ -243,18 +243,13 @@ function applyRetentionConfig (config, defaults) { ? `${config.retentionMinutes} minutes` : ('retentionSeconds' in config) ? `${config.retentionSeconds} seconds` - : defaults - ? defaults.keepUntil - : '14 days' + : null config.keepUntil = keepUntil + config.keepUntilDefault = defaults?.keepUntil } -function applyExpirationConfig (config, defaults) { - if ('expireIn' in config) { - emitWarning(WARNINGS.EXPIRE_IN_REMOVED) - } - +function applyExpirationConfig (config, defaults = {}) { assert(!('expireInSeconds' in config) || config.expireInSeconds >= 1, 'configuration assert: expireInSeconds must be at least every second') @@ -265,16 +260,17 @@ function applyExpirationConfig (config, defaults) { 'configuration assert: expireInHours must be at least every hour') const expireIn = ('expireInHours' in config) - ? `${config.expireInHours} hours` + ? config.expireInHours * 60 * 60 : ('expireInMinutes' in config) - ? `${config.expireInMinutes} minutes` + ? config.expireInMinutes * 60 : ('expireInSeconds' in config) - ? `${config.expireInSeconds} seconds` - : defaults - ? defaults.expireIn - : '15 minutes' + ? config.expireInSeconds + : null + + assert(!expireIn || expireIn / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: expiration cannot exceed ${MAX_INTERVAL_HOURS} hours`) config.expireIn = expireIn + config.expireInDefault = defaults?.expireIn } function applyRetryConfig (config, defaults) { @@ -282,35 +278,23 @@ function applyRetryConfig (config, defaults) { assert(!('retryLimit' in config) || (Number.isInteger(config.retryLimit) && config.retryLimit >= 0), 'retryLimit must be an integer >= 0') assert(!('retryBackoff' in config) || (config.retryBackoff === true || config.retryBackoff === false), 'retryBackoff must be either true or false') - if (defaults) { - config.retryDelay = config.retryDelay || defaults.retryDelay - config.retryLimit = config.retryLimit || defaults.retryLimit - config.retryBackoff = config.retryBackoff || defaults.retryBackoff - } - - config.retryDelay = config.retryDelay || 0 - config.retryLimit = config.retryLimit || 0 - config.retryBackoff = !!config.retryBackoff - config.retryDelay = (config.retryBackoff && !config.retryDelay) ? 1 : config.retryDelay - config.retryLimit = (config.retryDelay && !config.retryLimit) ? 1 : config.retryLimit + config.retryDelayDefault = defaults?.retryDelay + config.retryLimitDefault = defaults?.retryLimit + config.retryBackoffDefault = defaults?.retryBackoff } function applyNewJobCheckInterval (config, defaults) { - const second = 1000 - - assert(!('newJobCheckInterval' in config) || config.newJobCheckInterval >= 100, - 'configuration assert: newJobCheckInterval must be at least every 100ms') + assert(!('newJobCheckInterval' in config) || config.newJobCheckInterval >= 500, + 'configuration assert: newJobCheckInterval must be at least every 500ms') assert(!('newJobCheckIntervalSeconds' in config) || config.newJobCheckIntervalSeconds >= 1, 'configuration assert: newJobCheckIntervalSeconds must be at least every second') config.newJobCheckInterval = ('newJobCheckIntervalSeconds' in config) - ? config.newJobCheckIntervalSeconds * second + ? config.newJobCheckIntervalSeconds * 1000 : ('newJobCheckInterval' in config) ? config.newJobCheckInterval - : defaults - ? defaults.newJobCheckInterval - : second * 2 + : defaults?.newJobCheckInterval || 2000 } function applyMaintenanceConfig (config) { @@ -325,6 +309,12 @@ function applyMaintenanceConfig (config) { : ('maintenanceIntervalSeconds' in config) ? config.maintenanceIntervalSeconds : 120 + + assert(config.maintenanceIntervalSeconds / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: maintenance interval cannot exceed ${MAX_INTERVAL_HOURS} hours`) + + config.schedule = ('schedule' in config) ? config.schedule : true + config.supervise = ('supervise' in config) ? config.supervise : true + config.migrate = ('migrate' in config) ? config.migrate : true } function applyDeleteConfig (config) { @@ -367,6 +357,10 @@ function applyMonitoringConfig (config) { ? config.monitorStateIntervalSeconds : null + if (config.monitorStateIntervalSeconds) { + assert(config.monitorStateIntervalSeconds / 60 / 60 < MAX_INTERVAL_HOURS, `configuration assert: state monitoring interval cannot exceed ${MAX_INTERVAL_HOURS} hours`) + } + const TEN_MINUTES_IN_SECONDS = 600 assert(!('clockMonitorIntervalSeconds' in config) || (config.clockMonitorIntervalSeconds >= 1 && config.clockMonitorIntervalSeconds <= TEN_MINUTES_IN_SECONDS), @@ -399,11 +393,6 @@ function applyMonitoringConfig (config) { : 4 } -function applyUuidConfig (config) { - assert(!('uuid' in config) || config.uuid === 'v1' || config.uuid === 'v4', 'configuration assert: uuid option only supports v1 or v4') - config.uuid = config.uuid || 'v4' -} - function warnClockSkew (message) { emitWarning(WARNINGS.CLOCK_SKEW, message, { force: true }) } diff --git a/src/boss.js b/src/boss.js index 01a47c91..fd058631 100644 --- a/src/boss.js +++ b/src/boss.js @@ -1,12 +1,6 @@ const EventEmitter = require('events') const plans = require('./plans') -const { states } = require('./plans') -const { COMPLETION_JOB_PREFIX } = plans - -const queues = { - MAINTENANCE: '__pgboss__maintenance', - MONITOR_STATES: '__pgboss__monitor-states' -} +const { delay } = require('./tools') const events = { error: 'error', @@ -23,165 +17,135 @@ class Boss extends EventEmitter { this.manager = config.manager this.maintenanceIntervalSeconds = config.maintenanceIntervalSeconds - - this.monitorStates = config.monitorStateIntervalSeconds !== null - - if (this.monitorStates) { - this.monitorIntervalSeconds = config.monitorStateIntervalSeconds - } + this.monitorStateIntervalSeconds = config.monitorStateIntervalSeconds this.events = events - this.expireCommand = plans.locked(config.schema, plans.expire(config.schema)) + this.failJobsByTimeoutCommand = plans.locked(config.schema, plans.failJobsByTimeout(config.schema)) this.archiveCommand = plans.locked(config.schema, plans.archive(config.schema, config.archiveInterval, config.archiveFailedInterval)) - this.purgeCommand = plans.locked(config.schema, plans.purge(config.schema, config.deleteAfter)) + this.dropCommand = plans.locked(config.schema, plans.drop(config.schema, config.deleteAfter)) this.getMaintenanceTimeCommand = plans.getMaintenanceTime(config.schema) this.setMaintenanceTimeCommand = plans.setMaintenanceTime(config.schema) + this.getMonitorTimeCommand = plans.getMonitorTime(config.schema) + this.setMonitorTimeCommand = plans.setMonitorTime(config.schema) this.countStatesCommand = plans.countStates(config.schema) this.functions = [ this.expire, this.archive, - this.purge, + this.drop, this.countStates, - this.getQueueNames + this.maintain ] } async supervise () { - this.metaMonitor() + this.maintenanceInterval = setInterval(() => this.onSupervise(), this.maintenanceIntervalSeconds * 1000) + } - await this.manager.deleteQueue(COMPLETION_JOB_PREFIX + queues.MAINTENANCE) - await this.manager.deleteQueue(queues.MAINTENANCE) + async monitor () { + this.monitorInterval = setInterval(() => this.onMonitor(), this.monitorStateIntervalSeconds * 1000) + } - await this.maintenanceAsync() + async onMonitor () { + let locker - const maintenanceWorkOptions = { - newJobCheckIntervalSeconds: Math.max(1, this.maintenanceIntervalSeconds / 2) - } - - await this.manager.work(queues.MAINTENANCE, maintenanceWorkOptions, (job) => this.onMaintenance(job)) + try { + if (this.monitoring) { + return + } - if (this.monitorStates) { - await this.manager.deleteQueue(COMPLETION_JOB_PREFIX + queues.MONITOR_STATES) - await this.manager.deleteQueue(queues.MONITOR_STATES) + this.monitoring = true - await this.monitorStatesAsync() + if (this.config.__test__delay_monitor) { + await delay(this.config.__test__delay_monitor) + } - const monitorStatesWorkOptions = { - newJobCheckIntervalSeconds: Math.max(1, this.monitorIntervalSeconds / 2) + if (this.config.__test__throw_monitor) { + throw new Error(this.config.__test__throw_monitor) } - await this.manager.work(queues.MONITOR_STATES, monitorStatesWorkOptions, (job) => this.onMonitorStates(job)) - } - } + locker = await this.db.lock({ key: 'monitor' }) - metaMonitor () { - this.metaMonitorInterval = setInterval(async () => { - try { - if (this.config.__test__throw_meta_monitor) { - throw new Error(this.config.__test__throw_meta_monitor) - } - - const { secondsAgo } = await this.getMaintenanceTime() - - if (secondsAgo > this.maintenanceIntervalSeconds * 2) { - await this.manager.deleteQueue(queues.MAINTENANCE, { before: states.completed }) - await this.maintenanceAsync() - } - } catch (err) { - this.emit(events.error, err) - } - }, this.maintenanceIntervalSeconds * 2 * 1000) - } + const { secondsAgo } = await this.getMonitorTime() - async maintenanceAsync (options = {}) { - const { startAfter } = options + if (secondsAgo > this.monitorStateIntervalSeconds && !this.stopped) { + const states = await this.countStates() + this.setMonitorTime() + this.emit(events.monitorStates, states) + } + } catch (err) { + this.emit(events.error, err) + } finally { + if (locker?.locked) { + await locker.unlock() + } - options = { - startAfter, - retentionSeconds: this.maintenanceIntervalSeconds * 4, - singletonKey: queues.MAINTENANCE, - onComplete: false + this.monitoring = false } - - await this.manager.send(queues.MAINTENANCE, null, options) } - async monitorStatesAsync (options = {}) { - const { startAfter } = options + async onSupervise () { + let locker - options = { - startAfter, - retentionSeconds: this.monitorIntervalSeconds * 4, - singletonKey: queues.MONITOR_STATES, - onComplete: false - } + try { + if (this.maintaining) { + return + } - await this.manager.send(queues.MONITOR_STATES, null, options) - } + this.maintaining = true + + if (this.config.__test__delay_maintenance && !this.stopped) { + this.__testDelayPromise = delay(this.config.__test__delay_maintenance) + await this.__testDelayPromise + } - async onMaintenance (job) { - try { if (this.config.__test__throw_maint) { throw new Error(this.config.__test__throw_maint) } - const started = Date.now() - - await this.expire() - await this.archive() - await this.purge() - - const ended = Date.now() + if (this.stopped) { + return + } - await this.setMaintenanceTime() + locker = await this.db.lock({ key: 'maintenance' }) - this.emit('maintenance', { ms: ended - started }) + const { secondsAgo } = await this.getMaintenanceTime() - if (!this.stopped) { - await this.manager.complete(job.id) // pre-complete to bypass throttling - await this.maintenanceAsync({ startAfter: this.maintenanceIntervalSeconds }) + if (secondsAgo > this.maintenanceIntervalSeconds) { + const result = await this.maintain() + this.emit(events.maintenance, result) } } catch (err) { this.emit(events.error, err) + } finally { + if (locker?.locked) { + await locker.unlock() + } + + this.maintaining = false } } - async onMonitorStates (job) { - try { - if (this.config.__test__throw_monitor) { - throw new Error(this.config.__test__throw_monitor) - } + async maintain () { + const started = Date.now() - const states = await this.countStates() + !this.stopped && await this.expire() + !this.stopped && await this.archive() + !this.stopped && await this.drop() - this.emit(events.monitorStates, states) + const ended = Date.now() - if (!this.stopped && this.monitorStates) { - await this.manager.complete(job.id) // pre-complete to bypass throttling - await this.monitorStatesAsync({ startAfter: this.monitorIntervalSeconds }) - } - } catch (err) { - this.emit(events.error, err) - } + await this.setMaintenanceTime() + + return { ms: ended - started } } async stop () { - if (this.config.__test__throw_stop) { - throw new Error(this.config.__test__throw_stop) - } - if (!this.stopped) { - if (this.metaMonitorInterval) { - clearInterval(this.metaMonitorInterval) - } - - await this.manager.offWork(queues.MAINTENANCE) - - if (this.monitorStates) { - await this.manager.offWork(queues.MONITOR_STATES) - } + if (this.__testDelayPromise) this.__testDelayPromise.abort() + if (this.maintenanceInterval) clearInterval(this.maintenanceInterval) + if (this.monitorInterval) clearInterval(this.monitorInterval) this.stopped = true } @@ -193,7 +157,7 @@ class Boss extends EventEmitter { Object.keys(stateCountDefault) .forEach(key => { stateCountDefault[key] = 0 }) - const counts = await this.executeSql(this.countStatesCommand) + const counts = await this.db.executeSql(this.countStatesCommand) const states = counts.rows.reduce((acc, item) => { if (item.name) { @@ -213,43 +177,44 @@ class Boss extends EventEmitter { } async expire () { - await this.executeSql(this.expireCommand) + await this.db.executeSql(this.failJobsByTimeoutCommand) } async archive () { - await this.executeSql(this.archiveCommand) + await this.db.executeSql(this.archiveCommand) } - async purge () { - await this.executeSql(this.purgeCommand) + async drop () { + await this.db.executeSql(this.dropCommand) } async setMaintenanceTime () { - await this.executeSql(this.setMaintenanceTimeCommand) + await this.db.executeSql(this.setMaintenanceTimeCommand) } async getMaintenanceTime () { - if (!this.stopped) { - const { rows } = await this.db.executeSql(this.getMaintenanceTimeCommand) + const { rows } = await this.db.executeSql(this.getMaintenanceTimeCommand) - let { maintained_on: maintainedOn, seconds_ago: secondsAgo } = rows[0] + let { maintained_on: maintainedOn, seconds_ago: secondsAgo } = rows[0] - secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : this.maintenanceIntervalSeconds * 10 + secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : 999_999_999 - return { maintainedOn, secondsAgo } - } + return { maintainedOn, secondsAgo } } - getQueueNames () { - return queues + async setMonitorTime () { + await this.db.executeSql(this.setMonitorTimeCommand) } - async executeSql (sql, params) { - if (!this.stopped) { - return await this.db.executeSql(sql, params) - } + async getMonitorTime () { + const { rows } = await this.db.executeSql(this.getMonitorTimeCommand) + + let { monitored_on: monitoredOn, seconds_ago: secondsAgo } = rows[0] + + secondsAgo = secondsAgo !== null ? parseFloat(secondsAgo) : 999_999_999 + + return { monitoredOn, secondsAgo } } } module.exports = Boss -module.exports.QUEUES = queues diff --git a/src/contractor.js b/src/contractor.js index a77bad9b..7c2dd242 100644 --- a/src/contractor.js +++ b/src/contractor.js @@ -47,6 +47,20 @@ class Contractor { } } + async check () { + const installed = await this.isInstalled() + + if (!installed) { + throw new Error('pg-boss is not installed') + } + + const version = await this.version() + + if (schemaVersion !== version) { + throw new Error('pg-boss database requires migrations') + } + } + async create () { try { const commands = plans.create(this.config.schema, schemaVersion) diff --git a/src/db.js b/src/db.js index 97a6261f..bc19bfe7 100644 --- a/src/db.js +++ b/src/db.js @@ -1,5 +1,6 @@ const EventEmitter = require('events') const pg = require('pg') +const { advisoryLock } = require('./plans') class Db extends EventEmitter { constructor (config) { @@ -25,10 +26,49 @@ class Db extends EventEmitter { async executeSql (text, values) { if (this.opened) { + // if (this.config.debug === true) { + // console.log(`${new Date().toISOString()}: DEBUG SQL`) + // console.log(text) + + // if (values) { + // console.log(`${new Date().toISOString()}: DEBUG VALUES`) + // console.log(values) + // } + // } + return await this.pool.query(text, values) } } + async lock ({ timeout = 30, key } = {}) { + // const lockedClient = new pg.Client(this.config) + // await lockedClient.connect() + const lockedClient = await this.pool.connect() + + const query = ` + BEGIN; + SET LOCAL lock_timeout = '${timeout}s'; + SET LOCAL idle_in_transaction_session_timeout = '3600s'; + ${advisoryLock(this.config.schema, key)}; + ` + + await lockedClient.query(query) + + const locker = { + locked: true, + unlock: async function () { + try { + await lockedClient.query('COMMIT') + await lockedClient.end() + } finally { + this.locked = false + } + } + } + + return locker + } + static quotePostgresStr (str) { const delimeter = '$sanitize$' if (str.includes(delimeter)) { diff --git a/src/index.js b/src/index.js index 775bc345..cc8dcc23 100644 --- a/src/index.js +++ b/src/index.js @@ -6,7 +6,7 @@ const Manager = require('./manager') const Timekeeper = require('./timekeeper') const Boss = require('./boss') const Db = require('./db') -const delay = require('delay') +const { delay } = require('./tools') const events = { error: 'error', @@ -72,16 +72,7 @@ class PgBoss extends EventEmitter { } function promoteFunction (obj, func) { - this[func.name] = (...args) => { - const shouldRun = !this.started || !((func.name === 'work' || func.name === 'onComplete') && (this.stopped || this.stoppingOn)) - - if (shouldRun) { - return func.apply(obj, args) - } else { - const state = this.stoppingOn ? 'stopping' : this.stopped ? 'stopped' : !this.started ? 'not started' : 'started' - return Promise.reject(new Error(`pg-boss is ${state}.`)) - } - } + this[func.name] = (...args) => func.apply(obj, args) } function promoteEvent (emitter, event) { @@ -90,42 +81,49 @@ class PgBoss extends EventEmitter { } async start () { - if (!this.stopped) { - return this + if (this.starting || this.started) { + return } + this.starting = true + if (this.db.isOurs && !this.db.opened) { await this.db.open() } - await this.contractor.start() - - this.stopped = false - this.started = true + if (this.config.migrate) { + await this.contractor.start() + } else { + await this.contractor.check() + } this.manager.start() - if (!this.config.noSupervisor) { + if (this.config.supervise) { await this.boss.supervise() } - if (!this.config.noScheduling) { + if (this.config.monitorStateIntervalSeconds) { + await this.boss.monitor() + } + + if (this.config.schedule) { await this.timekeeper.start() } + this.starting = false + this.started = true + this.stopped = false + return this } async stop (options = {}) { - if (this.stoppingOn) { + if (this.stoppingOn || this.stopped) { return } - if (this.stopped) { - this.emit(events.stopped) - } - - let { destroy = false, graceful = true, timeout = 30000 } = options + let { destroy = false, graceful = true, timeout = 30000, wait = true } = options timeout = Math.max(timeout, 1000) @@ -133,47 +131,59 @@ class PgBoss extends EventEmitter { await this.manager.stop() await this.timekeeper.stop() + await this.boss.stop() - const shutdown = async () => { - this.stopped = true - this.stoppingOn = null + await new Promise((resolve, reject) => { + const shutdown = async () => { + try { + if (this.config.__test__throw_shutdown) { + throw new Error(this.config.__test__throw_shutdown) + } - if (this.db.isOurs && this.db.opened && destroy) { - await this.db.close() - } + await this.manager.failWip() - this.emit(events.stopped) - } + if (this.db.isOurs && this.db.opened && destroy) { + await this.db.close() + } - if (!graceful) { - await this.boss.stop() - await shutdown() - return - } + this.stopped = true + this.stoppingOn = null + this.started = false - setImmediate(async () => { - let closing = false + this.emit(events.stopped) + resolve() + } catch (err) { + this.emit(events.error, err) + reject(err) + } + } - try { - while (Date.now() - this.stoppingOn < timeout) { - if (this.manager.getWipData({ includeInternal: closing }).length === 0) { - if (closing) { - break - } + if (!graceful) { + return shutdown() + } - closing = true + if (!wait) { + resolve() + } - await this.boss.stop() + setImmediate(async () => { + try { + if (this.config.__test__throw_stop_monitor) { + throw new Error(this.config.__test__throw_stop_monitor) } - await delay(1000) - } + const isWip = () => this.manager.getWipData({ includeInternal: false }).length > 0 - await this.boss.stop() - await shutdown() - } catch (err) { - this.emit(events.error, err) - } + while ((Date.now() - this.stoppingOn) < timeout && isWip()) { + await delay(500) + } + + await shutdown() + } catch (err) { + reject(err) + this.emit(events.error, err) + } + }) }) } } diff --git a/src/manager.js b/src/manager.js index 29727f03..1021bfcc 100644 --- a/src/manager.js +++ b/src/manager.js @@ -1,21 +1,19 @@ const assert = require('assert') const EventEmitter = require('events') -const delay = require('delay') -const uuid = require('uuid') +const { randomUUID } = require('crypto') const debounce = require('lodash.debounce') const { serializeError: stringify } = require('serialize-error') +const pMap = require('p-map') +const { delay } = require('./tools') const Attorney = require('./attorney') const Worker = require('./worker') +const plans = require('./plans') const Db = require('./db') -const pMap = require('p-map') -const { QUEUES: BOSS_QUEUES } = require('./boss') const { QUEUES: TIMEKEEPER_QUEUES } = require('./timekeeper') +const { QUEUE_POLICY } = plans -const INTERNAL_QUEUES = Object.values(BOSS_QUEUES).concat(Object.values(TIMEKEEPER_QUEUES)).reduce((acc, i) => ({ ...acc, [i]: i }), {}) - -const plans = require('./plans') -const { COMPLETION_JOB_PREFIX, SINGLETON_QUEUE_KEY } = plans +const INTERNAL_QUEUES = Object.values(TIMEKEEPER_QUEUES).reduce((acc, i) => ({ ...acc, [i]: i }), {}) const WIP_EVENT_INTERVAL = 2000 const WIP_DEBOUNCE_OPTIONS = { leading: true, trailing: true, maxWait: WIP_EVENT_INTERVAL } @@ -27,16 +25,14 @@ const events = { const resolveWithinSeconds = async (promise, seconds) => { const timeout = Math.max(1, seconds) * 1000 - const reject = delay.reject(timeout, { value: new Error(`handler execution exceeded ${timeout}ms`) }) + const reject = delay(timeout, `handler execution exceeded ${timeout}ms`) let result try { result = await Promise.race([promise, reject]) } finally { - try { - reject.clear() - } catch {} + reject.abort() } return result @@ -58,7 +54,7 @@ class Manager extends EventEmitter { this.completeJobsCommand = plans.completeJobs(config.schema) this.cancelJobsCommand = plans.cancelJobs(config.schema) this.resumeJobsCommand = plans.resumeJobs(config.schema) - this.failJobsCommand = plans.failJobs(config.schema) + this.failJobsByIdCommand = plans.failJobsById(config.schema) this.getJobByIdCommand = plans.getJobById(config.schema) this.getArchivedJobByIdCommand = plans.getArchivedJobById(config.schema) this.subscribeCommand = plans.subscribe(config.schema) @@ -72,12 +68,9 @@ class Manager extends EventEmitter { this.resume, this.fail, this.fetch, - this.fetchCompleted, this.work, this.offWork, this.notifyWorker, - this.onComplete, - this.offComplete, this.publish, this.subscribe, this.unsubscribe, @@ -85,13 +78,14 @@ class Manager extends EventEmitter { this.send, this.sendDebounced, this.sendThrottled, - this.sendOnce, this.sendAfter, - this.sendSingleton, + this.createQueue, + this.updateQueue, + this.getQueue, this.deleteQueue, - this.deleteAllQueues, - this.clearStorage, + this.purgeQueue, this.getQueueSize, + this.clearStorage, this.getJobById ] @@ -99,27 +93,30 @@ class Manager extends EventEmitter { } start () { - this.stopping = false + this.stopped = false } async stop () { - this.stopping = true + this.stopped = true - for (const sub of this.workers.values()) { - if (!INTERNAL_QUEUES[sub.name]) { - await this.offWork(sub.name) + for (const worker of this.workers.values()) { + if (!INTERNAL_QUEUES[worker.name]) { + await this.offWork(worker.name) } } } - async work (name, ...args) { - const { options, callback } = Attorney.checkWorkArgs(name, args, this.config) - return await this.watch(name, options, callback) + async failWip () { + const jobIds = Array.from(this.workers.values()).flatMap(w => w.jobs.map(j => j.id)) + + if (jobIds.length) { + await this.fail(jobIds, 'pg-boss shut down while active') + } } - async onComplete (name, ...args) { + async work (name, ...args) { const { options, callback } = Attorney.checkWorkArgs(name, args, this.config) - return await this.watch(COMPLETION_JOB_PREFIX + name, options, callback) + return await this.watch(name, options, callback) } addWorker (worker) { @@ -175,8 +172,8 @@ class Manager extends EventEmitter { } async watch (name, options, callback) { - if (this.stopping) { - throw new Error('Workers are disabled. pg-boss is stopping.') + if (this.stopped) { + throw new Error('Workers are disabled. pg-boss is stopped') } const { @@ -186,10 +183,10 @@ class Manager extends EventEmitter { teamConcurrency = 1, teamRefill: refill = false, includeMetadata = false, - enforceSingletonQueueActiveLimit = false + priority = true } = options - const id = uuid.v4() + const id = randomUUID({ disableEntropyCache: true }) let queueSize = 0 @@ -210,7 +207,7 @@ class Manager extends EventEmitter { createTeamRefillPromise() } - const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, enforceSingletonQueueActiveLimit }) + const fetch = () => this.fetch(name, batchSize || (teamSize - queueSize), { includeMetadata, priority }) const onFetch = async (jobs) => { if (this.config.__test__throw_worker) { @@ -329,39 +326,11 @@ class Manager extends EventEmitter { return await Promise.all(result.rows.map(({ name }) => this.send(name, ...args))) } - async offComplete (value) { - if (typeof value === 'string') { - value = COMPLETION_JOB_PREFIX + value - } - - return await this.offWork(value) - } - async send (...args) { const { name, data, options } = Attorney.checkSendArgs(args, this.config) return await this.createJob(name, data, options) } - async sendOnce (name, data, options, key) { - options = options ? { ...options } : {} - - options.singletonKey = key || name - - const result = Attorney.checkSendArgs([name, data, options], this.config) - - return await this.createJob(result.name, result.data, result.options) - } - - async sendSingleton (name, data, options) { - options = options ? { ...options } : {} - - options.singletonKey = SINGLETON_QUEUE_KEY - - const result = Attorney.checkSendArgs([name, data, options], this.config) - - return await this.createJob(result.name, result.data, result.options) - } - async sendAfter (name, data, options, after) { options = options ? { ...options } : {} options.startAfter = after @@ -395,37 +364,47 @@ class Manager extends EventEmitter { async createJob (name, data, options, singletonOffset = 0) { const { + id = null, db: wrapper, - expireIn, priority, startAfter, - keepUntil, singletonKey = null, singletonSeconds, - retryBackoff, + deadLetter = null, + expireIn, + expireInDefault, + keepUntil, + keepUntilDefault, retryLimit, + retryLimitDefault, retryDelay, - onComplete + retryDelayDefault, + retryBackoff, + retryBackoffDefault } = options - const id = uuid[this.config.uuid]() - const values = [ id, // 1 name, // 2 - priority, // 3 - retryLimit, // 4 + data, // 3 + priority, // 4 startAfter, // 5 - expireIn, // 6 - data, // 7 - singletonKey, // 8 - singletonSeconds, // 9 - singletonOffset, // 10 - retryDelay, // 11 - retryBackoff, // 12 - keepUntil, // 13 - onComplete // 14 + singletonKey, // 6 + singletonSeconds, // 7 + singletonOffset, // 8 + deadLetter, // 9 + expireIn, // 10 + expireInDefault, // 11 + keepUntil, // 12 + keepUntilDefault, // 13 + retryLimit, // 14 + retryLimitDefault, // 15 + retryDelay, // 16 + retryDelayDefault, // 17 + retryBackoff, // 18 + retryBackoffDefault // 19 ] + const db = wrapper || this.db const result = await db.executeSql(this.insertJobCommand, values) @@ -449,11 +428,20 @@ class Manager extends EventEmitter { } async insert (jobs, options = {}) { - const { db: wrapper } = options - const db = wrapper || this.db - const checkedJobs = Attorney.checkInsertArgs(jobs) - const data = JSON.stringify(checkedJobs) - return await db.executeSql(this.insertJobsCommand, [data]) + assert(Array.isArray(jobs), 'jobs argument should be an array') + + const db = options.db || this.db + + const params = [ + JSON.stringify(jobs), // 1 + this.config.expireIn, // 2 + this.config.keepUntil, // 3 + this.config.retryLimit, // 4 + this.config.retryDelay, // 5 + this.config.retryBackoff // 6 + ] + + return await db.executeSql(this.insertJobsCommand, params) } getDebounceStartAfter (singletonSeconds, clockOffset) { @@ -474,27 +462,34 @@ class Manager extends EventEmitter { } async fetch (name, batchSize, options = {}) { + const patternMatch = Attorney.queueNameHasPatternMatch(name) const values = Attorney.checkFetchArgs(name, batchSize, options) const db = options.db || this.db - const preparedStatement = this.nextJobCommand(options.includeMetadata || false, options.enforceSingletonQueueActiveLimit || false) + const nextJobSql = this.nextJobCommand({ ...options, patternMatch }) const statementValues = [values.name, batchSize || 1] let result - if (options.enforceSingletonQueueActiveLimit && !options.db) { - // Prepare/format now and send multi-statement transaction - const fetchQuery = preparedStatement - .replace('$1', Db.quotePostgresStr(statementValues[0])) - .replace('$2', statementValues[1].toString()) - // eslint-disable-next-line no-unused-vars - const [_begin, _setLocal, fetchResult, _commit] = await db.executeSql([ - 'BEGIN', - 'SET LOCAL jit = OFF', // JIT can slow things down significantly - fetchQuery, - 'COMMIT' - ].join(';\n')) - result = fetchResult - } else { - result = await db.executeSql(preparedStatement, statementValues) + + try { + if (!options.db) { + // Prepare/format now and send multi-statement transaction + const fetchQuery = nextJobSql + .replace('$1', Db.quotePostgresStr(statementValues[0])) + .replace('$2', statementValues[1].toString()) + + // eslint-disable-next-line no-unused-vars + const [_begin, _setLocal, fetchResult, _commit] = await db.executeSql([ + 'BEGIN', + 'SET LOCAL jit = OFF', // JIT can slow things down significantly + fetchQuery, + 'COMMIT' + ].join(';\n')) + result = fetchResult + } else { + result = await db.executeSql(nextJobSql, statementValues) + } + } catch (err) { + // errors from fetchquery should only be unique constraint violations } if (!result || result.rows.length === 0) { @@ -504,10 +499,6 @@ class Manager extends EventEmitter { return result.rows.length === 1 && !batchSize ? result.rows[0] : result.rows } - async fetchCompleted (name, batchSize, options = {}) { - return await this.fetch(COMPLETION_JOB_PREFIX + name, batchSize, options) - } - mapCompletionIdArg (id, funcName) { const errorMessage = `${funcName}() requires an id` @@ -548,7 +539,7 @@ class Manager extends EventEmitter { async fail (id, data, options = {}) { const db = options.db || this.db const ids = this.mapCompletionIdArg(id, 'fail') - const result = await db.executeSql(this.failJobsCommand, [ids, this.mapCompletionDataArg(data)]) + const result = await db.executeSql(this.failJobsByIdCommand, [ids, this.mapCompletionDataArg(data)]) return this.mapCompletionResponse(ids, result) } @@ -566,17 +557,122 @@ class Manager extends EventEmitter { return this.mapCompletionResponse(ids, result) } - async deleteQueue (queue, options) { - assert(queue, 'Missing queue name argument') - const sql = plans.deleteQueue(this.config.schema, options) - const result = await this.db.executeSql(sql, [queue]) - return result ? result.rowCount : null + async createQueue (name, options = {}) { + assert(name, 'Missing queue name argument') + + const { policy = QUEUE_POLICY.standard } = options + + assert(policy in QUEUE_POLICY, `${policy} is not a valid queue policy`) + + const { + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } = Attorney.checkQueueArgs(name, options) + + const paritionSql = plans.partitionCreateJobName(this.config.schema, name) + + await this.db.executeSql(paritionSql) + + const sql = plans.createQueue(this.config.schema, name) + + const params = [ + name, + policy, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + ] + + await this.db.executeSql(sql, params) + } + + async updateQueue (name, options = {}) { + assert(name, 'Missing queue name argument') + + const { + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } = Attorney.checkQueueArgs(name, options) + + const sql = plans.updateQueue(this.config.schema) + + const params = [ + name, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + ] + + await this.db.executeSql(sql, params) } - async deleteAllQueues (options) { - const sql = plans.deleteAllQueues(this.config.schema, options) - const result = await this.db.executeSql(sql) - return result ? result.rowCount : null + async getQueue (name) { + assert(name, 'Missing queue name argument') + + const sql = plans.getQueueByName(this.config.schema) + const result = await this.db.executeSql(sql, [name]) + + if (result.rows.length === 0) { + return null + } + + const { + policy, + retry_limit: retryLimit, + retry_delay: retryDelay, + retry_backoff: retryBackoff, + expire_seconds: expireInSeconds, + retention_minutes: retentionMinutes, + dead_letter: deadLetter + } = result.rows[0] + + return { + name, + policy, + retryLimit, + retryDelay, + retryBackoff, + expireInSeconds, + retentionMinutes, + deadLetter + } + } + + async deleteQueue (name) { + assert(name, 'Missing queue name argument') + + const queueSql = plans.getQueueByName(this.config.schema) + const result = await this.db.executeSql(queueSql, [name]) + + if (result?.rows?.length) { + Attorney.assertPostgresObjectName(name) + const sql = plans.dropJobTablePartition(this.config.schema, name) + await this.db.executeSql(sql) + } + + const sql = plans.deleteQueueRecords(this.config.schema) + const result2 = await this.db.executeSql(sql, [name]) + return result2?.rowCount || null + } + + async purgeQueue (queue) { + assert(queue, 'Missing queue name argument') + const sql = plans.purgeQueue(this.config.schema) + await this.db.executeSql(sql, [queue]) } async clearStorage () { diff --git a/src/migrationStore.js b/src/migrationStore.js index 08fa1b44..22d192c1 100644 --- a/src/migrationStore.js +++ b/src/migrationStore.js @@ -64,6 +64,133 @@ function migrate (value, version, migrations) { function getAll (schema) { return [ + { + release: '10.0.0', + version: 21, + previous: 20, + install: [ + `DROP INDEX ${schema}.job_singletonKey`, + `DROP INDEX ${schema}.job_singleton_queue`, + `DROP INDEX ${schema}.job_singletonOn`, + `DROP INDEX ${schema}.job_singletonKeyOn`, + `DROP INDEX ${schema}.job_fetch`, + `DROP INDEX ${schema}.job_name`, + + `ALTER TABLE ${schema}.job ADD COLUMN deadletter text`, + `ALTER TABLE ${schema}.job ADD COLUMN policy text`, + `ALTER TABLE ${schema}.job DROP COLUMN on_complete`, + + // update state enum + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, + `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + + `DROP TABLE IF EXISTS ${schema}.archive_backup`, + `ALTER TABLE ${schema}.archive RENAME to archive_backup`, + `ALTER INDEX ${schema}.archive_archivedon_idx RENAME to archive_backup_archivedon_idx`, + + `DROP TYPE ${schema}.job_state`, + `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','cancelled','failed')`, + + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + + `DELETE FROM ${schema}.job WHERE name LIKE '__pgboss__%'`, + + // set up job partitioning + `ALTER TABLE ${schema}.job RENAME TO job_default`, + `ALTER TABLE ${schema}.job_default DROP CONSTRAINT IF EXISTS job_pkey`, + + `CREATE TABLE ${schema}.job ( + id uuid not null default gen_random_uuid(), + name text not null, + priority integer not null default(0), + data jsonb, + state ${schema}.job_state not null default('created'), + retryLimit integer not null default(0), + retryCount integer not null default(0), + retryDelay integer not null default(0), + retryBackoff boolean not null default false, + startAfter timestamp with time zone not null default now(), + startedOn timestamp with time zone, + singletonKey text, + singletonOn timestamp without time zone, + expireIn interval not null default interval '15 minutes', + createdOn timestamp with time zone not null default now(), + completedOn timestamp with time zone, + keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', + output jsonb, + deadletter text, + policy text, + CONSTRAINT job_pkey PRIMARY KEY (name, id) + ) PARTITION BY LIST (name)`, + + `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT`, + + `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)`, + `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (name, id)`, + `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()`, + `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)`, + `CREATE INDEX archive_name_idx ON ${schema}.archive(name)`, + + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn, id) WHERE state < 'active'`, + `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)`, + `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = 'created' AND policy = 'short'`, + `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = 'active' AND policy = 'singleton'`, + `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= 'active' AND policy = 'stately'`, + `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= 'completed' AND singletonOn IS NULL`, + `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= 'completed' AND singletonOn IS NOT NULL`, + + `ALTER TABLE ${schema}.version ADD COLUMN monitored_on timestamp with time zone`, + + `CREATE TABLE ${schema}.queue ( + name text primary key, + policy text, + retry_limit int, + retry_delay int, + retry_backoff bool, + expire_seconds int, + retention_minutes int, + dead_letter text, + created_on timestamp with time zone not null default now() + )` + ], + uninstall: [ + `DROP INDEX ${schema}.job_policy_stately`, + `DROP INDEX ${schema}.job_policy_short`, + `DROP INDEX ${schema}.job_policy_singleton`, + `DROP INDEX ${schema}.job_throttle_on`, + `DROP INDEX ${schema}.job_throttle_key`, + `DROP INDEX ${schema}.job_fetch`, + `DROP INDEX ${schema}.job_name`, + `ALTER TABLE ${schema}.job DETACH PARTITION ${schema}.job_default`, + `DROP TABLE ${schema}.job`, + `ALTER TABLE ${schema}.job_default RENAME TO job`, + `DROP TABLE IF EXISTS ${schema}.archive_backup`, + `DROP INDEX ${schema}.archive_name_idx`, + `ALTER TABLE ${schema}.job DROP COLUMN deadletter`, + `ALTER TABLE ${schema}.job DROP COLUMN policy`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE text`, + `ALTER TABLE ${schema}.job ALTER COLUMN state DROP DEFAULT`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE text`, + `DROP TYPE ${schema}.job_state`, + `CREATE TYPE ${schema}.job_state AS ENUM ('created','retry','active','completed','expired','cancelled','failed')`, + `ALTER TABLE ${schema}.job ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.job ALTER COLUMN state SET DEFAULT 'created'::${schema}.job_state`, + `ALTER TABLE ${schema}.job ADD COLUMN on_complete bool NOT NULL DEFAULT false`, + `ALTER TABLE ${schema}.archive ALTER COLUMN state TYPE ${schema}.job_state USING state::${schema}.job_state`, + `ALTER TABLE ${schema}.archive DROP COLUMN policy`, + `ALTER TABLE ${schema}.archive DROP CONSTRAINT archive_pkey`, + `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, + `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)`, + `CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < 'expired' AND singletonKey IS NULL`, + `CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < 'expired'`, + `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, + `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey LIKE '\\_\\_pgboss\\_\\_singleton\\_queue%'`, + `DROP TABLE ${schema}.queue`, + `ALTER TABLE ${schema}.version DROP COLUMN monitored_on` + ] + }, { release: '7.4.0', version: 20, @@ -97,77 +224,6 @@ function getAll (schema) { uninstall: [ `DROP TABLE ${schema}.subscription` ] - }, - { - release: '6.1.1', - version: 18, - previous: 17, - install: [ - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT false` - ] - }, - { - release: '6.0.0', - version: 17, - previous: 16, - install: [ - `DROP INDEX ${schema}.job_singletonKey`, - `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL AND NOT singletonKey = '__pgboss__singleton_queue'`, - `CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < 'active' AND singletonOn IS NULL AND singletonKey = '__pgboss__singleton_queue'`, - `CREATE INDEX IF NOT EXISTS job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < 'active'`, - `ALTER TABLE ${schema}.job ADD output jsonb`, - `ALTER TABLE ${schema}.archive ADD output jsonb`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT false`, - `ALTER TABLE ${schema}.job ALTER COLUMN keepuntil SET DEFAULT now() + interval '14 days'` - ], - uninstall: [ - `DROP INDEX ${schema}.job_fetch`, - `DROP INDEX ${schema}.job_singleton_queue`, - `DROP INDEX ${schema}.job_singletonKey`, - `CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < 'completed' AND singletonOn IS NULL`, - `ALTER TABLE ${schema}.job DROP COLUMN output`, - `ALTER TABLE ${schema}.archive DROP COLUMN output`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT true`, - `ALTER TABLE ${schema}.job ALTER COLUMN keepuntil SET DEFAULT now() + interval '30 days'` - ] - }, - { - release: '5.2.0', - version: 16, - previous: 15, - install: [ - `ALTER TABLE ${schema}.job ADD on_complete boolean`, - `UPDATE ${schema}.job SET on_complete = true`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET DEFAULT true`, - `ALTER TABLE ${schema}.job ALTER COLUMN on_complete SET NOT NULL`, - `ALTER TABLE ${schema}.archive ADD on_complete boolean` - ], - uninstall: [ - `ALTER TABLE ${schema}.job DROP COLUMN on_complete`, - `ALTER TABLE ${schema}.archive DROP COLUMN on_complete` - ] - }, - { - release: '5.0.6', - version: 15, - previous: 14, - install: [ - `ALTER TABLE ${schema}.version ADD cron_on timestamp with time zone` - ], - uninstall: [ - `ALTER TABLE ${schema}.version DROP COLUMN cron_on` - ] - }, - { - release: '5.0.0', - version: 14, - previous: 13, - install: [ - `ALTER TABLE ${schema}.version ADD maintained_on timestamp with time zone` - ], - uninstall: [ - `ALTER TABLE ${schema}.version DROP COLUMN maintained_on` - ] } ] } diff --git a/src/plans.js b/src/plans.js index 73dc0014..3f158483 100644 --- a/src/plans.js +++ b/src/plans.js @@ -5,19 +5,21 @@ const states = { retry: 'retry', active: 'active', completed: 'completed', - expired: 'expired', cancelled: 'cancelled', failed: 'failed' } const DEFAULT_SCHEMA = 'pgboss' -const COMPLETION_JOB_PREFIX = `__state__${states.completed}__` -const SINGLETON_QUEUE_KEY = '__pgboss__singleton_queue' -const SINGLETON_QUEUE_KEY_ESCAPED = SINGLETON_QUEUE_KEY.replace(/_/g, '\\_') - const MIGRATE_RACE_MESSAGE = 'division by zero' const CREATE_RACE_MESSAGE = 'already exists' +const QUEUE_POLICY = { + standard: 'standard', + short: 'short', + singleton: 'singleton', + stately: 'stately' +} + module.exports = { create, insertVersion, @@ -28,7 +30,8 @@ module.exports = { completeJobs, cancelJobs, resumeJobs, - failJobs, + failJobsById, + failJobsByTimeout, insertJob, insertJobs, getTime, @@ -38,62 +41,64 @@ module.exports = { subscribe, unsubscribe, getQueuesForEvent, - expire, archive, - purge, + drop, countStates, - deleteQueue, - deleteAllQueues, - clearStorage, + createQueue, + updateQueue, + partitionCreateJobName, + dropJobTablePartition, + deleteQueueRecords, + getQueueByName, getQueueSize, + purgeQueue, + clearStorage, getMaintenanceTime, setMaintenanceTime, + getMonitorTime, + setMonitorTime, getCronTime, setCronTime, locked, + advisoryLock, assertMigration, getArchivedJobById, getJobById, + QUEUE_POLICY, states: { ...states }, - COMPLETION_JOB_PREFIX, - SINGLETON_QUEUE_KEY, MIGRATE_RACE_MESSAGE, CREATE_RACE_MESSAGE, DEFAULT_SCHEMA } -function locked (schema, query) { - if (Array.isArray(query)) { - query = query.join(';\n') - } - - return ` - BEGIN; - SET LOCAL statement_timeout = '30s'; - ${advisoryLock(schema)}; - ${query}; - COMMIT; - ` -} - function create (schema, version) { const commands = [ createSchema(schema), - createVersionTable(schema), - createJobStateEnum(schema), - createJobTable(schema), - cloneJobTableForArchive(schema), - createScheduleTable(schema), - createSubscriptionTable(schema), - addIdIndexToArchive(schema), - addArchivedOnToArchive(schema), - addArchivedOnIndexToArchive(schema), + createEnumJobState(schema), + + createTableJob(schema), + createTableJobDefault(schema), + attachPartitionJobDefault(schema), createIndexJobName(schema), createIndexJobFetch(schema), - createIndexSingletonOn(schema), - createIndexSingletonKeyOn(schema), - createIndexSingletonKey(schema), - createIndexSingletonQueue(schema), + createIndexJobPolicyStately(schema), + createIndexJobPolicyShort(schema), + createIndexJobPolicySingleton(schema), + createIndexJobThrottleOn(schema), + createIndexJobThrottleKey(schema), + + createTableArchive(schema), + createPrimaryKeyArchive(schema), + createColumnArchiveArchivedOn(schema), + createIndexArchiveArchivedOn(schema), + createIndexArchiveName(schema), + createArchiveBackupTable(schema), + + createTableVersion(schema), + createTableQueue(schema), + createTableSchedule(schema), + createTableSubscription(schema), + insertVersion(schema, version) ] @@ -106,17 +111,18 @@ function createSchema (schema) { ` } -function createVersionTable (schema) { +function createTableVersion (schema) { return ` CREATE TABLE ${schema}.version ( version int primary key, maintained_on timestamp with time zone, - cron_on timestamp with time zone + cron_on timestamp with time zone, + monitored_on timestamp with time zone ) ` } -function createJobStateEnum (schema) { +function createEnumJobState (schema) { // ENUM definition order is important // base type is numeric and first values are less than last values return ` @@ -125,17 +131,16 @@ function createJobStateEnum (schema) { '${states.retry}', '${states.active}', '${states.completed}', - '${states.expired}', '${states.cancelled}', '${states.failed}' ) ` } -function createJobTable (schema) { +function createTableJob (schema) { return ` CREATE TABLE ${schema}.job ( - id uuid primary key not null default gen_random_uuid(), + id uuid not null default gen_random_uuid(), name text not null, priority integer not null default(0), data jsonb, @@ -152,34 +157,100 @@ function createJobTable (schema) { createdOn timestamp with time zone not null default now(), completedOn timestamp with time zone, keepUntil timestamp with time zone NOT NULL default now() + interval '14 days', - on_complete boolean not null default false, - output jsonb - ) + output jsonb, + deadletter text, + policy text, + CONSTRAINT job_pkey PRIMARY KEY (name, id) + ) PARTITION BY LIST (name) + ` +} + +function createTableJobDefault (schema) { + return `CREATE TABLE ${schema}.job_default (LIKE ${schema}.job INCLUDING DEFAULTS INCLUDING CONSTRAINTS)` +} + +function attachPartitionJobDefault (schema) { + return `ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_default DEFAULT` +} + +function partitionCreateJobName (schema, name) { + return ` + CREATE TABLE ${schema}.job_${name} (LIKE ${schema}.job INCLUDING DEFAULTS INCLUDING CONSTRAINTS); + ALTER TABLE ${schema}.job_${name} ADD CONSTRAINT job_check_${name} CHECK (name='${name}'); + ALTER TABLE ${schema}.job ATTACH PARTITION ${schema}.job_${name} FOR VALUES IN ('${name}'); ` } -function cloneJobTableForArchive (schema) { +function dropJobTablePartition (schema, name) { + return `DROP TABLE IF EXISTS ${schema}.job_${name}` +} + +function createPrimaryKeyArchive (schema) { + return `ALTER TABLE ${schema}.archive ADD CONSTRAINT archive_pkey PRIMARY KEY (name, id)` +} + +function createIndexJobPolicyShort (schema) { + return `CREATE UNIQUE INDEX job_policy_short ON ${schema}.job (name) WHERE state = '${states.created}' AND policy = '${QUEUE_POLICY.short}'` +} + +function createIndexJobPolicySingleton (schema) { + return `CREATE UNIQUE INDEX job_policy_singleton ON ${schema}.job (name) WHERE state = '${states.active}' AND policy = '${QUEUE_POLICY.singleton}'` +} + +function createIndexJobPolicyStately (schema) { + return `CREATE UNIQUE INDEX job_policy_stately ON ${schema}.job (name, state) WHERE state <= '${states.active}' AND policy = '${QUEUE_POLICY.stately}'` +} + +function createIndexJobThrottleOn (schema) { + return `CREATE UNIQUE INDEX job_throttle_on ON ${schema}.job (name, singletonOn, COALESCE(singletonKey, '')) WHERE state <= '${states.completed}' AND singletonOn IS NOT NULL` +} + +function createIndexJobThrottleKey (schema) { + return `CREATE UNIQUE INDEX job_throttle_key ON ${schema}.job (name, singletonKey) WHERE state <= '${states.completed}' AND singletonOn IS NULL` +} + +function createIndexJobName (schema) { + return `CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops)` +} + +function createIndexJobFetch (schema) { + return `CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) INCLUDE (priority, createdOn, id) WHERE state < '${states.active}'` +} + +function createTableArchive (schema) { return `CREATE TABLE ${schema}.archive (LIKE ${schema}.job)` } -function addArchivedOnToArchive (schema) { +function createArchiveBackupTable (schema) { + return `CREATE TABLE ${schema}.archive_backup (LIKE ${schema}.job)` +} + +function createColumnArchiveArchivedOn (schema) { return `ALTER TABLE ${schema}.archive ADD archivedOn timestamptz NOT NULL DEFAULT now()` } -function addArchivedOnIndexToArchive (schema) { +function createIndexArchiveArchivedOn (schema) { return `CREATE INDEX archive_archivedon_idx ON ${schema}.archive(archivedon)` } -function addIdIndexToArchive (schema) { - return `CREATE INDEX archive_id_idx ON ${schema}.archive(id)` +function createIndexArchiveName (schema) { + return `CREATE INDEX archive_name_idx ON ${schema}.archive(name)` +} + +function getMaintenanceTime (schema) { + return `SELECT maintained_on, EXTRACT( EPOCH FROM (now() - maintained_on) ) seconds_ago FROM ${schema}.version` } function setMaintenanceTime (schema) { return `UPDATE ${schema}.version SET maintained_on = now()` } -function getMaintenanceTime (schema) { - return `SELECT maintained_on, EXTRACT( EPOCH FROM (now() - maintained_on) ) seconds_ago FROM ${schema}.version` +function getMonitorTime (schema) { + return `SELECT monitored_on, EXTRACT( EPOCH FROM (now() - monitored_on) ) seconds_ago FROM ${schema}.version` +} + +function setMonitorTime (schema) { + return `UPDATE ${schema}.version SET monitored_on = now()` } function setCronTime (schema, time) { @@ -191,69 +262,69 @@ function getCronTime (schema) { return `SELECT cron_on, EXTRACT( EPOCH FROM (now() - cron_on) ) seconds_ago FROM ${schema}.version` } -function deleteQueue (schema, options = {}) { - options.before = options.before || states.active - assert(options.before in states, `${options.before} is not a valid state`) - return `DELETE FROM ${schema}.job WHERE name = $1 and state < '${options.before}'` -} - -function deleteAllQueues (schema, options = {}) { - options.before = options.before || states.active - assert(options.before in states, `${options.before} is not a valid state`) - return `DELETE FROM ${schema}.job WHERE state < '${options.before}'` -} - -function clearStorage (schema) { - return `TRUNCATE ${schema}.job, ${schema}.archive` -} - -function getQueueSize (schema, options = {}) { - options.before = options.before || states.active - assert(options.before in states, `${options.before} is not a valid state`) - return `SELECT count(*) as count FROM ${schema}.job WHERE name = $1 AND state < '${options.before}'` -} - -function createIndexSingletonKey (schema) { - // anything with singletonKey means "only 1 job can be queued or active at a time" +function createQueue (schema) { return ` - CREATE UNIQUE INDEX job_singletonKey ON ${schema}.job (name, singletonKey) WHERE state < '${states.completed}' AND singletonOn IS NULL AND NOT singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' + INSERT INTO ${schema}.queue (name, policy, retry_limit, retry_delay, retry_backoff, expire_seconds, retention_minutes, dead_letter) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) ` } -function createIndexSingletonQueue (schema) { - // "singleton queue" means "only 1 job can be queued at a time" +function updateQueue (schema) { return ` - CREATE UNIQUE INDEX job_singleton_queue ON ${schema}.job (name, singletonKey) WHERE state < '${states.active}' AND singletonOn IS NULL AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' + UPDATE ${schema}.queue SET + retry_limit = COALESCE($2, retry_limit), + retry_delay = COALESCE($3, retry_delay), + retry_backoff = COALESCE($4, retry_backoff), + expire_seconds = COALESCE($5, expire_seconds), + retention_minutes = COALESCE($6, retention_minutes), + dead_letter = COALESCE($7, dead_letter) + WHERE name = $1 ` } -function createIndexSingletonOn (schema) { - // anything with singletonOn means "only 1 job within this time period, queued, active or completed" - return ` - CREATE UNIQUE INDEX job_singletonOn ON ${schema}.job (name, singletonOn) WHERE state < '${states.expired}' AND singletonKey IS NULL - ` +function getQueueByName (schema) { + return `SELECT * FROM ${schema}.queue WHERE name = $1` } -function createIndexSingletonKeyOn (schema) { - // anything with both singletonOn and singletonKey means "only 1 job within this time period with this key, queued, active or completed" - return ` - CREATE UNIQUE INDEX job_singletonKeyOn ON ${schema}.job (name, singletonOn, singletonKey) WHERE state < '${states.expired}' +function deleteQueueRecords (schema) { + return `WITH dq AS ( + DELETE FROM ${schema}.queue WHERE name = $1 + ) + DELETE FROM ${schema}.job WHERE name = $1 ` } -function createIndexJobName (schema) { - return ` - CREATE INDEX job_name ON ${schema}.job (name text_pattern_ops) - ` +function purgeQueue (schema) { + return `DELETE from ${schema}.job WHERE name = $1 and state < '${states.active}'` } -function createIndexJobFetch (schema) { +function clearStorage (schema) { + return `TRUNCATE ${schema}.job, ${schema}.archive` +} + +function getQueueSize (schema, options = {}) { + options.before = options.before || states.active + assert(options.before in states, `${options.before} is not a valid state`) + return `SELECT count(*) as count FROM ${schema}.job WHERE name = $1 AND state < '${options.before}'` +} + +function createTableQueue (schema) { return ` - CREATE INDEX job_fetch ON ${schema}.job (name text_pattern_ops, startAfter) WHERE state < '${states.active}' + CREATE TABLE ${schema}.queue ( + name text primary key, + policy text, + retry_limit int, + retry_delay int, + retry_backoff bool, + expire_seconds int, + retention_minutes int, + dead_letter text, + created_on timestamp with time zone not null default now() + ) ` } -function createScheduleTable (schema) { +function createTableSchedule (schema) { return ` CREATE TABLE ${schema}.schedule ( name text primary key, @@ -267,7 +338,7 @@ function createScheduleTable (schema) { ` } -function createSubscriptionTable (schema) { +function createTableSubscription (schema) { return ` CREATE TABLE ${schema}.subscription ( event text not null, @@ -351,79 +422,28 @@ function insertVersion (schema, version) { } function fetchNextJob (schema) { - return (includeMetadata, enforceSingletonQueueActiveLimit) => ` - WITH nextJob as ( + return ({ includeMetadata, patternMatch, priority = true } = {}) => ` + WITH next as ( SELECT id - FROM ${schema}.job j + FROM ${schema}.job WHERE state < '${states.active}' - AND name LIKE $1 + AND name ${patternMatch ? 'LIKE' : '='} $1 AND startAfter < now() - ${enforceSingletonQueueActiveLimit - ? `AND ( - CASE - WHEN singletonKey IS NOT NULL - AND singletonKey LIKE '${SINGLETON_QUEUE_KEY_ESCAPED}%' - THEN NOT EXISTS ( - SELECT 1 - FROM ${schema}.job active_job - WHERE active_job.state = '${states.active}' - AND active_job.name = j.name - AND active_job.singletonKey = j.singletonKey - LIMIT 1 - ) - ELSE - true - END - )` - : ''} - ORDER BY priority desc, createdOn, id + ORDER BY ${priority && 'priority desc, '} createdOn, id LIMIT $2 FOR UPDATE SKIP LOCKED ) UPDATE ${schema}.job j SET state = '${states.active}', startedOn = now(), - retryCount = CASE WHEN state = '${states.retry}' THEN retryCount + 1 ELSE retryCount END - FROM nextJob - WHERE j.id = nextJob.id - RETURNING ${includeMetadata ? 'j.*' : 'j.id, name, data'}, EXTRACT(epoch FROM expireIn) as expire_in_seconds + retryCount = CASE WHEN startedOn IS NOT NULL THEN retryCount + 1 ELSE retryCount END + FROM next + WHERE j.id = next.id + RETURNING ${includeMetadata ? 'j.*' : 'j.id, name, data'}, + EXTRACT(epoch FROM expireIn) as expire_in_seconds ` } -function buildJsonCompletionObject (withResponse) { - // job completion contract - return `jsonb_build_object( - 'request', jsonb_build_object('id', id, 'name', name, 'data', data), - 'response', ${withResponse ? '$2::jsonb' : 'null'}, - 'state', state, - 'retryCount', retryCount, - 'createdOn', createdOn, - 'startedOn', startedOn, - 'completedOn', completedOn, - 'failed', CASE WHEN state = '${states.completed}' THEN false ELSE true END - )` -} - -const retryCompletedOnCase = `CASE - WHEN retryCount < retryLimit - THEN NULL - ELSE now() - END` - -const retryStartAfterCase = `CASE - WHEN retryCount = retryLimit THEN startAfter - WHEN NOT retryBackoff THEN now() + retryDelay * interval '1' - ELSE now() + - ( - retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 - + - retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 * random() - ) - * interval '1' - END` - -const keepUntilInheritance = 'keepUntil + (keepUntil - startAfter)' - function completeJobs (schema) { return ` WITH results AS ( @@ -434,76 +454,64 @@ function completeJobs (schema) { WHERE id IN (SELECT UNNEST($1::uuid[])) AND state = '${states.active}' RETURNING * - ), completion_jobs as ( - INSERT INTO ${schema}.job (name, data, keepUntil) - SELECT - '${COMPLETION_JOB_PREFIX}' || name, - ${buildJsonCompletionObject(true)}, - ${keepUntilInheritance} - FROM results - WHERE NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete ) SELECT COUNT(*) FROM results ` } -function failJobs (schema) { +function failJobsById (schema) { + const where = `id IN (SELECT UNNEST($1::uuid[])) AND state < '${states.completed}'` + const output = '$2::jsonb' + + return failJobs(schema, where, output) +} + +function failJobsByTimeout (schema) { + const where = `state = '${states.active}' AND (startedOn + expireIn) < now()` + const output = '\'{ "value": { "message": "job failed by timeout in active state" } }\'::jsonb' + return failJobs(schema, where, output) +} + +function failJobs (schema, where, output) { return ` WITH results AS ( - UPDATE ${schema}.job - SET state = CASE - WHEN retryCount < retryLimit - THEN '${states.retry}'::${schema}.job_state + UPDATE ${schema}.job SET + state = CASE + WHEN retryCount < retryLimit THEN '${states.retry}'::${schema}.job_state ELSE '${states.failed}'::${schema}.job_state END, - completedOn = ${retryCompletedOnCase}, - startAfter = ${retryStartAfterCase}, - output = $2::jsonb - WHERE id IN (SELECT UNNEST($1::uuid[])) - AND state < '${states.completed}' + completedOn = CASE + WHEN retryCount < retryLimit THEN NULL + ELSE now() + END, + startAfter = CASE + WHEN retryCount = retryLimit THEN startAfter + WHEN NOT retryBackoff THEN now() + retryDelay * interval '1' + ELSE now() + ( + retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 + + retryDelay * 2 ^ LEAST(16, retryCount + 1) / 2 * random() + ) * interval '1' + END, + output = ${output} + WHERE ${where} RETURNING * - ), completion_jobs as ( - INSERT INTO ${schema}.job (name, data, keepUntil) + ), dlq_jobs as ( + INSERT INTO ${schema}.job (name, data, output, retryLimit, keepUntil) SELECT - '${COMPLETION_JOB_PREFIX}' || name, - ${buildJsonCompletionObject(true)}, - ${keepUntilInheritance} + deadletter, + data, + output, + retryLimit, + keepUntil + (keepUntil - startAfter) FROM results WHERE state = '${states.failed}' - AND NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete + AND deadletter IS NOT NULL + AND NOT name = deadletter ) SELECT COUNT(*) FROM results ` } -function expire (schema) { - return ` - WITH results AS ( - UPDATE ${schema}.job - SET state = CASE - WHEN retryCount < retryLimit THEN '${states.retry}'::${schema}.job_state - ELSE '${states.expired}'::${schema}.job_state - END, - completedOn = ${retryCompletedOnCase}, - startAfter = ${retryStartAfterCase} - WHERE state = '${states.active}' - AND (startedOn + expireIn) < now() - RETURNING * - ) - INSERT INTO ${schema}.job (name, data, keepUntil) - SELECT - '${COMPLETION_JOB_PREFIX}' || name, - ${buildJsonCompletionObject()}, - ${keepUntilInheritance} - FROM results - WHERE state = '${states.expired}' - AND NOT name LIKE '${COMPLETION_JOB_PREFIX}%' - AND on_complete - ` -} - function cancelJobs (schema) { return ` with results as ( @@ -536,68 +544,73 @@ function insertJob (schema) { INSERT INTO ${schema}.job ( id, name, + data, priority, - state, - retryLimit, startAfter, - expireIn, - data, singletonKey, singletonOn, + deadletter, + expireIn, + keepUntil, + retryLimit, retryDelay, retryBackoff, - keepUntil, - on_complete + policy ) SELECT id, - name, + j.name, + data, priority, - state, - retryLimit, startAfter, - expireIn, - data, singletonKey, singletonOn, - retryDelay, - retryBackoff, - keepUntil, - on_complete + COALESCE(deadLetter, q.dead_letter) as deadletter, + CASE + WHEN expireIn IS NOT NULL THEN CAST(expireIn as interval) + WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' + WHEN expireInDefault IS NOT NULL THEN CAST(expireInDefault as interval) + ELSE interval '15 minutes' + END as expireIn, + CASE + WHEN right(keepUntil, 1) = 'Z' THEN CAST(keepUntil as timestamp with time zone) + ELSE startAfter + CAST(COALESCE(keepUntil, (q.retention_minutes * 60)::text, keepUntilDefault, '14 days') as interval) + END as keepUntil, + COALESCE(retryLimit, q.retry_limit, retryLimitDefault, 2) as retryLimit, + CASE + WHEN COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false) + THEN GREATEST(COALESCE(retryDelay, q.retry_delay, retryDelayDefault), 1) + ELSE COALESCE(retryDelay, q.retry_delay, retryDelayDefault, 0) + END as retryDelay, + COALESCE(retryBackoff, q.retry_backoff, retryBackoffDefault, false) as retryBackoff, + q.policy FROM - ( SELECT *, - CASE - WHEN right(keepUntilValue, 1) = 'Z' THEN CAST(keepUntilValue as timestamp with time zone) - ELSE startAfter + CAST(COALESCE(keepUntilValue,'0') as interval) - END as keepUntil - FROM - ( SELECT *, + ( SELECT + COALESCE($1::uuid, gen_random_uuid()) as id, + $2 as name, + $3::jsonb as data, + COALESCE($4::int, 0) as priority, CASE - WHEN right(startAfterValue, 1) = 'Z' THEN CAST(startAfterValue as timestamp with time zone) - ELSE now() + CAST(COALESCE(startAfterValue,'0') as interval) - END as startAfter - FROM - ( SELECT - $1::uuid as id, - $2::text as name, - $3::int as priority, - '${states.created}'::${schema}.job_state as state, - $4::int as retryLimit, - $5::text as startAfterValue, - CAST($6 as interval) as expireIn, - $7::jsonb as data, - $8::text as singletonKey, - CASE - WHEN $9::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($9 * floor((date_part('epoch', now()) + $10) / $9)) - ELSE NULL - END as singletonOn, - $11::int as retryDelay, - $12::bool as retryBackoff, - $13::text as keepUntilValue, - $14::boolean as on_complete - ) j1 - ) j2 - ) j3 + WHEN right($5, 1) = 'Z' THEN CAST($5 as timestamp with time zone) + ELSE now() + CAST(COALESCE($5,'0') as interval) + END as startAfter, + $6 as singletonKey, + CASE + WHEN $7::integer IS NOT NULL THEN 'epoch'::timestamp + '1 second'::interval * ($7 * floor((date_part('epoch', now()) + $8) / $7)) + ELSE NULL + END as singletonOn, + $9 as deadletter, + $10 as expireIn, + $11 as expireInDefault, + $12 as keepUntil, + $13 as keepUntilDefault, + $14::int as retryLimit, + $15::int as retryLimitDefault, + $16::int as retryDelay, + $17::int as retryDelayDefault, + $18::bool as retryBackoff, + $19::bool as retryBackoffDefault + ) j LEFT JOIN ${schema}.queue q ON j.name = q.name ON CONFLICT DO NOTHING RETURNING id ` @@ -605,52 +618,76 @@ function insertJob (schema) { function insertJobs (schema) { return ` + WITH defaults as ( + SELECT + $2 as expireIn, + $3 as keepUntil, + $4::int as retryLimit, + $5::int as retryDelay, + $6::bool as retryBackoff + ) INSERT INTO ${schema}.job ( id, name, data, priority, startAfter, + singletonKey, + deadletter, expireIn, + keepUntil, retryLimit, retryDelay, retryBackoff, - singletonKey, - keepUntil, - on_complete + policy ) SELECT COALESCE(id, gen_random_uuid()) as id, - name, + j.name, data, - COALESCE(priority, 0) as priority, - COALESCE("startAfter", now()) as startAfter, - COALESCE("expireInSeconds", 15 * 60) * interval '1s' as expireIn, - COALESCE("retryLimit", 0) as retryLimit, - COALESCE("retryDelay", 0) as retryDelay, - COALESCE("retryBackoff", false) as retryBackoff, + COALESCE(priority, 0), + COALESCE("startAfter", now()), "singletonKey", - COALESCE("keepUntil", now() + interval '14 days') as keepUntil, - COALESCE("onComplete", false) as onComplete - FROM json_to_recordset($1) as x( + COALESCE("deadLetter", q.dead_letter), + CASE + WHEN "expireInSeconds" IS NOT NULL THEN "expireInSeconds" * interval '1s' + WHEN q.expire_seconds IS NOT NULL THEN q.expire_seconds * interval '1s' + WHEN defaults.expireIn IS NOT NULL THEN CAST(defaults.expireIn as interval) + ELSE interval '15 minutes' + END as expireIn, + CASE + WHEN "keepUntil" IS NOT NULL THEN "keepUntil" + ELSE COALESCE("startAfter", now()) + CAST(COALESCE((q.retention_minutes * 60)::text, defaults.keepUntil, '14 days') as interval) + END as keepUntil, + COALESCE("retryLimit", q.retry_limit, defaults.retryLimit, 2), + CASE + WHEN COALESCE("retryBackoff", q.retry_backoff, defaults.retryBackoff, false) + THEN GREATEST(COALESCE("retryDelay", q.retry_delay, defaults.retryDelay), 1) + ELSE COALESCE("retryDelay", q.retry_delay, defaults.retryDelay, 0) + END as retryDelay, + COALESCE("retryBackoff", q.retry_backoff, defaults.retryBackoff, false) as retryBackoff, + q.policy + FROM json_to_recordset($1) as j ( id uuid, name text, priority integer, data jsonb, + "startAfter" timestamp with time zone, "retryLimit" integer, "retryDelay" integer, "retryBackoff" boolean, - "startAfter" timestamp with time zone, "singletonKey" text, "expireInSeconds" integer, "keepUntil" timestamp with time zone, - "onComplete" boolean + "deadLetter" text ) + LEFT JOIN ${schema}.queue q ON j.name = q.name, + defaults ON CONFLICT DO NOTHING ` } -function purge (schema, interval) { +function drop (schema, interval) { return ` DELETE FROM ${schema}.archive WHERE archivedOn < (now() - interval '${interval}') @@ -661,22 +698,16 @@ function archive (schema, completedInterval, failedInterval = completedInterval) return ` WITH archived_rows AS ( DELETE FROM ${schema}.job - WHERE ( - state <> '${states.failed}' AND completedOn < (now() - interval '${completedInterval}') - ) - OR ( - state = '${states.failed}' AND completedOn < (now() - interval '${failedInterval}') - ) - OR ( - state < '${states.active}' AND keepUntil < now() - ) + WHERE (state <> '${states.failed}' AND completedOn < (now() - interval '${completedInterval}')) + OR (state = '${states.failed}' AND completedOn < (now() - interval '${failedInterval}')) + OR (state < '${states.active}' AND keepUntil < now()) RETURNING * ) INSERT INTO ${schema}.archive ( - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, on_complete, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, policy, output ) SELECT - id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, on_complete, output + id, name, priority, data, state, retryLimit, retryCount, retryDelay, retryBackoff, startAfter, startedOn, singletonKey, singletonOn, expireIn, createdOn, completedOn, keepUntil, deadletter, policy, output FROM archived_rows ` } @@ -689,9 +720,23 @@ function countStates (schema) { ` } -function advisoryLock (schema) { +function locked (schema, query) { + if (Array.isArray(query)) { + query = query.join(';\n') + } + + return ` + BEGIN; + SET LOCAL lock_timeout = '30s'; + ${advisoryLock(schema)}; + ${query}; + COMMIT; + ` +} + +function advisoryLock (schema, key) { return `SELECT pg_advisory_xact_lock( - ('x' || md5(current_database() || '.pgboss.${schema}'))::bit(64)::bigint + ('x' || md5(current_database() || '.pgboss.${schema}${key || ''}'))::bit(64)::bigint )` } @@ -709,5 +754,5 @@ function getArchivedJobById (schema) { } function getJobByTableAndId (schema, table) { - return `SELECT * From ${schema}.${table} WHERE id = $1` + return `SELECT * FROM ${schema}.${table} WHERE id = $1` } diff --git a/src/timekeeper.js b/src/timekeeper.js index ef989035..0cd038cb 100644 --- a/src/timekeeper.js +++ b/src/timekeeper.js @@ -134,8 +134,7 @@ class Timekeeper extends EventEmitter { async checkSchedulesAsync () { const opts = { retryLimit: 2, - retentionSeconds: 60, - onComplete: false + retentionSeconds: 60 } await this.manager.sendDebounced(queues.CRON, null, opts, 60) @@ -145,8 +144,8 @@ class Timekeeper extends EventEmitter { if (this.stopped) return try { - if (this.config.__test__throw_clock_monitoring) { - throw new Error(this.config.__test__throw_clock_monitoring) + if (this.config.__test__throw_cron_processing) { + throw new Error(this.config.__test__throw_cron_processing) } const items = await this.getSchedules() @@ -186,8 +185,7 @@ class Timekeeper extends EventEmitter { async send (job) { const options = { singletonKey: job.name, - singletonSeconds: 60, - onComplete: false + singletonSeconds: 60 } await this.manager.send(queues.SEND_IT, job, options) diff --git a/src/tools.js b/src/tools.js new file mode 100644 index 00000000..7a04c3dc --- /dev/null +++ b/src/tools.js @@ -0,0 +1,28 @@ +module.exports = { + delay +} + +function delay (ms, error) { + const { setTimeout } = require('timers/promises') + const ac = new AbortController() + + const promise = new Promise((resolve, reject) => { + setTimeout(ms, null, { signal: ac.signal }) + .then(() => { + if (error) { + reject(new Error(error)) + } else { + resolve() + } + }) + .catch(resolve) + }) + + promise.abort = () => { + if (!ac.signal.aborted) { + ac.abort() + } + } + + return promise +} diff --git a/src/worker.js b/src/worker.js index 386dea41..c15409df 100644 --- a/src/worker.js +++ b/src/worker.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('./tools') const WORKER_STATES = { created: 'created', @@ -34,7 +34,7 @@ class Worker { this.beenNotified = true if (this.loopDelayPromise) { - this.loopDelayPromise.clear() + this.loopDelayPromise.abort() } } @@ -91,7 +91,7 @@ class Worker { this.state = WORKER_STATES.stopping if (this.loopDelayPromise) { - this.loopDelayPromise.clear() + this.loopDelayPromise.abort() } } } diff --git a/test/archiveTest.js b/test/archiveTest.js index 85ff88db..f70407cb 100644 --- a/test/archiveTest.js +++ b/test/archiveTest.js @@ -1,12 +1,12 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') const { states } = require('../src/plans') describe('archive', function () { const defaults = { archiveCompletedAfterSeconds: 1, - maintenanceIntervalSeconds: 1 + supervise: true } it('should archive a completed job', async function () { @@ -21,7 +21,9 @@ describe('archive', function () { await boss.complete(jobId) - await delay(4000) + await delay(1000) + + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -41,7 +43,9 @@ describe('archive', function () { await boss.complete(jobId) - await delay(4000) + await delay(1000) + + await boss.maintain() const archivedJob = await boss.getJobById(jobId) @@ -56,7 +60,9 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) - await delay(7000) + await delay(1000) + + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -71,7 +77,9 @@ describe('archive', function () { const jobId = await boss.send(queue) - await delay(7000) + await delay(1000) + + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -88,7 +96,10 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) await boss.fail(jobId, failPayload) - await delay(7000) + + await delay(1000) + + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) @@ -96,7 +107,7 @@ describe('archive', function () { }) it('should archive a failed job', async function () { - const config = { ...this.test.bossConfig, maintenanceIntervalSeconds: 1, archiveFailedAfterSeconds: 1 } + const config = { ...this.test.bossConfig, archiveFailedAfterSeconds: 1 } const boss = this.test.boss = await helper.start(config) const queue = this.test.bossConfig.schema @@ -104,7 +115,10 @@ describe('archive', function () { const jobId = await boss.send(queue, null, { retentionSeconds: 1 }) await boss.fail(jobId, failPayload) - await delay(7000) + + await delay(1000) + + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/backgroundErrorTest.js b/test/backgroundErrorTest.js index fd1a14ff..f17291f0 100644 --- a/test/backgroundErrorTest.js +++ b/test/backgroundErrorTest.js @@ -1,78 +1,136 @@ const assert = require('assert') const PgBoss = require('../') -const delay = require('delay') +const { delay } = require('../src/tools') describe('background processing error handling', function () { it('maintenance error handling works', async function () { const defaults = { - monitorStateIntervalMinutes: 1, maintenanceIntervalSeconds: 1, - noScheduling: true, - __test__throw_maint: true + supervise: true, + __test__throw_maint: 'my maintenance error' } const config = { ...this.test.bossConfig, ...defaults } const boss = this.test.boss = new PgBoss(config) - return new Promise((resolve) => { - let resolved = false - - boss.on('error', () => { - if (!resolved) { - resolved = true - resolve() - } - }) + let errorCount = 0 - boss.start().then(() => {}) + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__throw_maint) + errorCount++ }) + + await boss.start() + + await delay(3000) + + assert.strictEqual(errorCount, 1) + }) + + it('slow maintenance will back off loop interval', async function () { + const config = { + ...this.test.bossConfig, + maintenanceIntervalSeconds: 1, + supervise: true, + __test__delay_maintenance: 2000 + } + + const boss = this.test.boss = new PgBoss(config) + + let eventCount = 0 + + boss.on('maintenance', () => eventCount++) + + await boss.start() + + await delay(5000) + + assert.strictEqual(eventCount, 1) + }) + + it('slow monitoring will back off loop interval', async function () { + const config = { + ...this.test.bossConfig, + monitorStateIntervalSeconds: 1, + __test__delay_monitor: 2000 + } + + const boss = this.test.boss = new PgBoss(config) + + let eventCount = 0 + + boss.on('monitor-states', () => eventCount++) + + await boss.start() + + await delay(4000) + + assert.strictEqual(eventCount, 1) }) it('state monitoring error handling works', async function () { const defaults = { - monitorStateIntervalSeconds: 2, - maintenanceIntervalMinutes: 1, - noScheduling: true, - __test__throw_monitor: true + monitorStateIntervalSeconds: 1, + supervise: true, + __test__throw_monitor: 'my monitor error' } const config = { ...this.test.bossConfig, ...defaults } const boss = this.test.boss = new PgBoss(config) - return new Promise((resolve) => { - let resolved = false - - boss.on('error', () => { - if (!resolved) { - resolved = true - resolve() - } - }) + let errorCount = 0 - boss.start().then(() => {}) + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__throw_monitor) + errorCount++ }) + + await boss.start() + + await delay(3000) + + assert.strictEqual(errorCount, 1) }) - it('clock monitoring error handling works', async function () { + it('shutdown monitoring error handling works', async function () { const config = { ...this.test.bossConfig, - clockMonitorIntervalSeconds: 1, - __test__throw_clock_monitoring: 'pg-boss mock error: clock monitoring' + __test__throw_shutdown: 'shutdown error' } - let errorCount = 0 - const boss = this.test.boss = new PgBoss(config) + let errorCount = 0 + boss.once('error', (error) => { - assert.strictEqual(error.message, config.__test__throw_clock_monitoring) + assert.strictEqual(error.message, config.__test__throw_shutdown) errorCount++ }) await boss.start() - await delay(8000) + await boss.stop({ wait: false }) + + await delay(1000) assert.strictEqual(errorCount, 1) }) + + it('shutdown error handling works', async function () { + const config = { + ...this.test.bossConfig, + __test__throw_stop_monitor: 'monitor error' + } + + const boss = this.test.boss = new PgBoss(config) + + await boss.start() + + try { + await boss.stop({ wait: false }) + assert(false) + } catch (err) { + assert(true) + } + }) }) diff --git a/test/cancelTest.js b/test/cancelTest.js index 6bff1d6d..51892479 100644 --- a/test/cancelTest.js +++ b/test/cancelTest.js @@ -27,21 +27,20 @@ describe('cancel', function () { }) it('should not cancel a completed job', async function () { - const config = this.test.bossConfig - - const boss = this.test.boss = await helper.start(config) - - const queue = 'will_not_cancel' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema await boss.send(queue) const job = await boss.fetch(queue) - await boss.complete(job.id) + const completeResult = await boss.complete(job.id) + + assert.strictEqual(completeResult.updated, 1) - const response = await boss.cancel(job.id) + const cancelResult = await boss.cancel(job.id) - assert.strictEqual(response.updated, 0) + assert.strictEqual(cancelResult.updated, 0) }) it('should cancel a batch of jobs', async function () { diff --git a/test/completeTest.js b/test/completeTest.js index dbe09f6a..e61aaeab 100644 --- a/test/completeTest.js +++ b/test/completeTest.js @@ -1,4 +1,3 @@ -const delay = require('delay') const assert = require('assert') const helper = require('./testHelper') const PgBoss = require('../') @@ -16,7 +15,7 @@ describe('complete', function () { }) it('should complete a batch of jobs', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'complete-batch' const batchSize = 3 @@ -35,210 +34,9 @@ describe('complete', function () { assert.strictEqual(activeCount, batchSize) - await boss.complete(jobs.map(job => job.id)) + const result = await boss.complete(jobs.map(job => job.id)) - const completed = await boss.fetchCompleted(queue, batchSize) - - assert.strictEqual(batchSize, completed.length) - }) - - it('onComplete should have the payload from complete() in the response object', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'part-of-something-important' - const responsePayload = { message: 'super-important-payload', arg2: '123' } - - await boss.send(jobName) - - const job = await boss.fetch(jobName) - - await boss.complete(job.id, responsePayload) - - return new Promise((resolve) => { - boss.onComplete(jobName, async job => { - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.arg2, responsePayload.arg2) - - resolve() - }) - }) - }) - - it('onComplete should have the original payload in request object', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queueName = 'onCompleteRequestTest' - const requestPayload = { foo: 'bar' } - - const jobId = await boss.send(queueName, requestPayload) - - const job = await boss.fetch(queueName) - await boss.complete(job.id) - - return new Promise((resolve) => { - boss.onComplete(queueName, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.foo, requestPayload.foo) - - resolve() - }) - }) - }) - - it('onComplete should have both request and response', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'onCompleteFtw' - const requestPayload = { token: 'trivial' } - const responsePayload = { message: 'so verbose', code: '1234' } - - const jobId = await boss.send(jobName, requestPayload) - const job = await boss.fetch(jobName) - - await boss.complete(job.id, responsePayload) - - return new Promise((resolve) => { - boss.onComplete(jobName, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual(job.data.request.data.token, requestPayload.token) - assert.strictEqual(job.data.response.message, responsePayload.message) - assert.strictEqual(job.data.response.code, responsePayload.code) - - resolve() - }) - }) - }) - - it('should remove an onComplete worker', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const jobName = 'offComplete' - - let receivedCount = 0 - - boss.onComplete(jobName, { newJobCheckInterval: 500 }, async job => { - receivedCount++ - await boss.offComplete(jobName) - }) - - await boss.send(jobName) - const job1 = await boss.fetch(jobName) - await boss.complete(job1.id) - - await delay(2000) - - await boss.send(jobName) - const job2 = await boss.fetch(jobName) - await boss.complete(job2.id) - - await delay(2000) - - assert.strictEqual(receivedCount, 1) - }) - - it('should remove an onComplete worker by id', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - const queue = this.test.bossConfig.schema - - let receivedCount = 0 - - await boss.send(queue) - const job1 = await boss.fetch(queue) - await boss.complete(job1.id) - - await boss.send(queue) - const job2 = await boss.fetch(queue) - await boss.complete(job2.id) - - const id = await boss.onComplete(queue, { newJobCheckInterval: 500 }, async () => { - receivedCount++ - await boss.offComplete({ id }) - }) - - await delay(2000) - - assert.strictEqual(receivedCount, 1) - }) - - it('should fetch a completed job', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'fetchCompleted' - const jobId = await boss.send(queue) - await boss.fetch(queue) - await boss.complete(jobId) - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.request.id, jobId) - }) - - it('should not create an extra state job after completion', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'noMoreExtraStateJobs' - const config = this.test.bossConfig - - const jobId = await boss.send(queue) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - await boss.complete(job.id) - - const stateJobCount = await helper.countJobs(config.schema, 'name = $1', [`${helper.COMPLETION_JOB_PREFIX}${queue}`]) - - assert.strictEqual(stateJobCount, 1) - }) - - it('should not create a completion job if opted out during send', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'onCompleteOptOut' - - const jobId = await boss.send(queue, null, { onComplete: false }) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job, null) - }) - - it('should not create a completion job if opted out during constructor', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: false }) - - const queue = 'onCompleteOptOutGlobal' - - const jobId = await boss.send(queue) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job, null) - }) - - it('should create completion job if overriding the default from constructor', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: false }) - - const queue = 'onCompleteOptInOverride' - - const jobId = await boss.send(queue, null, { onComplete: true }) - - await boss.fetch(queue) - - await boss.complete(jobId) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.request.id, jobId) + assert.strictEqual(batchSize, result.jobs.length) }) it('should store job output in job.output from complete()', async function () { @@ -246,7 +44,7 @@ describe('complete', function () { const queue = 'completion-data-in-job-output' - const jobId = await boss.send(queue, null, { onComplete: false }) + const jobId = await boss.send(queue) const { id } = await boss.fetch(queue) @@ -266,7 +64,7 @@ describe('complete', function () { const queue = 'completion-data-in-job-output' - const jobId = await boss.send(queue, null, { onComplete: false }) + const jobId = await boss.send(queue) const { id } = await boss.fetch(queue) @@ -282,7 +80,7 @@ describe('complete', function () { }) it('should complete a batch of jobs with custom connection', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = 'complete-batch' const batchSize = 3 @@ -310,11 +108,33 @@ describe('complete', function () { } } - await boss.complete(jobs.map(job => job.id), null, { db }) + const result = await boss.complete(jobs.map(job => job.id), null, { db }) - const completed = await boss.fetchCompleted(queue, batchSize) - - assert.strictEqual(batchSize, completed.length) + assert.strictEqual(batchSize, result.jobs.length) assert.strictEqual(called, true) }) + + it('should warn with an old onComplete option only once', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + + const queue = this.test.bossConfig.schema + + let warningCount = 0 + + const warningEvent = 'warning' + const onWarning = (warning) => { + assert(warning.message.includes('onComplete')) + warningCount++ + } + + process.on(warningEvent, onWarning) + + await boss.send({ name: queue, options: { onComplete: true } }) + await boss.send({ name: queue, options: { onComplete: true } }) + await boss.send({ name: queue, options: { onComplete: true } }) + + process.removeListener(warningEvent, onWarning) + + assert.strictEqual(warningCount, 1) + }) }) diff --git a/test/config.json b/test/config.json index 7a99f9cd..b3a0a710 100644 --- a/test/config.json +++ b/test/config.json @@ -4,6 +4,5 @@ "database": "pgboss", "user": "postgres", "password": "postgres", - "uuid": "v4", "max": 3 } diff --git a/test/delayTest.js b/test/delayTest.js index 918d7089..d6e705f8 100644 --- a/test/delayTest.js +++ b/test/delayTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('delayed jobs', function () { it('should wait until after an int (in seconds)', async function () { diff --git a/test/deleteQueueTest.js b/test/deleteQueueTest.js deleted file mode 100644 index 0f05dbbc..00000000 --- a/test/deleteQueueTest.js +++ /dev/null @@ -1,99 +0,0 @@ -const assert = require('assert') -const helper = require('./testHelper') -const delay = require('delay') - -describe('deleteQueue', function () { - it('should clear a specific queue', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue2 = 'delete-named-queue-2' - const queue1 = 'delete-named-queue-1' - - await boss.send(queue1) - await boss.send(queue2) - - const q1Count1 = await boss.getQueueSize(queue1) - const q2Count1 = await boss.getQueueSize(queue2) - - assert.strictEqual(1, q1Count1) - assert.strictEqual(1, q2Count1) - - await boss.deleteQueue(queue1) - - const q1Count2 = await boss.getQueueSize(queue1) - const q2Count2 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q1Count2) - assert.strictEqual(1, q2Count2) - - await boss.deleteQueue(queue2) - - const q2Count3 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q2Count3) - }) - - it('should clear all queues', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue1 = 'delete-named-queue-11' - const queue2 = 'delete-named-queue-22' - - await boss.send(queue1) - await boss.send(queue2) - - const q1Count1 = await boss.getQueueSize(queue1) - const q2Count1 = await boss.getQueueSize(queue2) - - assert.strictEqual(1, q1Count1) - assert.strictEqual(1, q2Count1) - - await boss.deleteAllQueues() - - const q1Count2 = await boss.getQueueSize(queue1) - const q2Count2 = await boss.getQueueSize(queue2) - - assert.strictEqual(0, q1Count2) - assert.strictEqual(0, q2Count2) - }) - - it('clearStorage() should empty both job storage tables', async function () { - const defaults = { - archiveCompletedAfterSeconds: 1, - maintenanceIntervalSeconds: 1 - } - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'clear-storage-works' - - const jobId = await boss.send(queue) - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - await boss.complete(jobId) - - await delay(3000) - - const db = await helper.getDb() - - const getJobCount = async table => { - const jobCountResult = await db.executeSql(`SELECT count(*)::int as job_count FROM ${this.test.bossConfig.schema}.${table}`) - return jobCountResult.rows[0].job_count - } - - const preJobCount = await getJobCount('job') - const preArchiveCount = await getJobCount('archive') - - assert(preJobCount > 0) - assert(preArchiveCount > 0) - - await boss.clearStorage() - - const postJobCount = await getJobCount('job') - const postArchiveCount = await getJobCount('archive') - - assert(postJobCount === 0) - assert(postArchiveCount === 0) - }) -}) diff --git a/test/deleteTest.js b/test/deleteTest.js index 9a69080e..94d8aa51 100644 --- a/test/deleteTest.js +++ b/test/deleteTest.js @@ -1,26 +1,19 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') describe('delete', async function () { - const defaults = { - deleteAfterSeconds: 1, - maintenanceIntervalSeconds: 1 - } - it('should delete an archived job', async function () { - const jobName = 'deleteMe' - - const config = { ...this.test.bossConfig, ...defaults } + const config = { ...this.test.bossConfig, deleteAfterSeconds: 1 } const boss = this.test.boss = await helper.start(config) - const jobId = await boss.send(jobName) - const job = await boss.fetch(jobName) + const queue = this.test.bossConfig.schema + + const jobId = await boss.send(queue) - assert.strictEqual(jobId, job.id) + await boss.fetch(queue) await boss.complete(jobId) - await delay(7000) + await boss.maintain() const archivedJob = await helper.getArchivedJobById(config.schema, jobId) diff --git a/test/expireTest.js b/test/expireTest.js index c37f9628..5cd3eff2 100644 --- a/test/expireTest.js +++ b/test/expireTest.js @@ -1,70 +1,43 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('expire', function () { - const defaults = { maintenanceIntervalSeconds: 1 } - it('should expire a job', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + const key = this.test.bossConfig.schema - const queue = 'expire' + const jobId = await boss.send({ name: queue, data: { key }, options: { retryLimit: 0, expireInSeconds: 1 } }) - const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1 } }) + const job1 = await boss.fetch(queue) - // fetch the job but don't complete it - await boss.fetch(queue) + assert(job1) + + await delay(1000) - // this should give it enough time to expire - await delay(8000) + await boss.maintain() - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual('expired', job.data.state) + assert.strictEqual('failed', job.state) }) it('should expire a job - cascaded config', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, expireInSeconds: 1 }) - - const queue = 'expire-cascade-config' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, expireInSeconds: 1, retryLimit: 0 }) + const queue = this.test.bossConfig.schema const jobId = await boss.send(queue) // fetch the job but don't complete it - const { id } = await boss.fetch(queue) + await boss.fetch(queue) - assert.strictEqual(jobId, id) + await delay(1000) - // this should give it enough time to expire - await delay(8000) + await boss.maintain() const job = await boss.getJobById(jobId) - assert.strictEqual('expired', job.state) - }) - - it('should warn with an old expireIn option only once', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, noSupervisor: true }) - - const queue = 'expireIn-warning-only-once' - - let warningCount = 0 - - const warningEvent = 'warning' - const onWarning = (warning) => { - assert(warning.message.includes('expireIn')) - warningCount++ - } - - process.on(warningEvent, onWarning) - - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) - await boss.send({ name: queue, options: { expireIn: '1 minute' } }) - - process.removeListener(warningEvent, onWarning) - - assert.strictEqual(warningCount, 1) + assert.strictEqual('failed', job.state) }) }) diff --git a/test/failureTest.js b/test/failureTest.js index 6b66dd4f..328fc66e 100644 --- a/test/failureTest.js +++ b/test/failureTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') const pMap = require('p-map') @@ -26,25 +26,6 @@ describe('failure', function () { await boss.fail(job.id) }) - it('worker for job failure', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - const jobId = await boss.send(queue, null, { onComplete: true }) - - const job = await boss.fetch(queue) - - await boss.fail(job.id) - - return new Promise((resolve, reject) => { - boss.onComplete(queue, async job => { - assert.strictEqual(jobId, job.data.request.id) - assert.strictEqual('failed', job.data.state) - resolve() - }).catch(reject) - }) - }) - it('should fail a batch of jobs', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema @@ -57,7 +38,9 @@ describe('failure', function () { const jobs = await boss.fetch(queue, 3) - await boss.fail(jobs.map(job => job.id)) + const result = await boss.fail(jobs.map(job => job.id)) + + assert.strictEqual(result.jobs.length, 3) }) it('should fail a batch of jobs with a data arg', async function () { @@ -80,22 +63,6 @@ describe('failure', function () { assert(results.every(i => i.output.message === message)) }) - it('should accept a payload', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - const failPayload = { someReason: 'nuna' } - - const jobId = await boss.send(queue, null, { onComplete: true }) - - await boss.fail(jobId, failPayload) - - const job = await boss.fetchCompleted(queue) - - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.someReason, failPayload.someReason) - }) - it('should preserve nested objects within a payload that is an instance of Error', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema @@ -103,14 +70,13 @@ describe('failure', function () { const failPayload = new Error('Something went wrong') failPayload.some = { deeply: { nested: { reason: 'nuna' } } } - const jobId = await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await boss.fail(jobId, failPayload) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) + assert.strictEqual(job.output.some.deeply.nested.reason, failPayload.some.deeply.nested.reason) }) it('failure via Promise reject() should pass string wrapped in value prop', async function () { @@ -118,15 +84,14 @@ describe('failure', function () { const queue = this.test.bossConfig.schema const failPayload = 'mah error' - await boss.work(queue, job => Promise.reject(failPayload)) - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) + await boss.work(queue, () => Promise.reject(failPayload)) - await delay(7000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.value, failPayload) + assert.strictEqual(job.output.value, failPayload) }) it('failure via Promise reject() should pass object payload', async function () { @@ -137,31 +102,29 @@ describe('failure', function () { const errorResponse = new Error('custom error') errorResponse.something = something - await boss.work(queue, job => Promise.reject(errorResponse)) - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) + await boss.work(queue, () => Promise.reject(errorResponse)) - await delay(7000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') - assert.strictEqual(job.data.response.something, something) + assert.strictEqual(job.output.something, something) }) - it('failure with Error object should get stored in the failure job', async function () { + it('failure with Error object should be saved in the job', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema const message = 'a real error!' - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await boss.work(queue, async () => { throw new Error(message) }) - await delay(2000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'failed') - assert(job.data.response.message.includes(message)) + assert(job.output.message.includes(message)) }) it('should fail a job with custom connection', async function () { @@ -190,22 +153,53 @@ describe('failure', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - await boss.send(queue, null, { onComplete: true }) - - await boss.work(queue, async job => { - const err = { - message: 'something' - } + const jobId = await boss.send(queue) + const message = 'mhmm' + await boss.work(queue, { newJobCheckInterval: 500 }, async () => { + const err = { message } err.myself = err - throw err }) await delay(2000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) + + assert.strictEqual(job.output.message, message) + }) + + it('dead letter queues are working', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + const deadLetter = `${queue}_dlq` + + const jobId = await boss.send(queue, { key: queue }, { deadLetter }) + + await boss.fetch(queue) + await boss.fail(jobId) + + const job = await boss.fetch(deadLetter) + + assert.strictEqual(job.data.key, queue) + }) + + it('should fail active jobs in a worker during shutdown', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const jobId = await boss.send(queue, null, { retryLimit: 1, expireInSeconds: 60 }) + + await boss.work(queue, async () => await delay(10000)) + + await delay(1000) + + await boss.stop({ wait: true, timeout: 2000 }) + + await boss.start() + + const job = await boss.fetch(queue) - assert(job) + assert.strictEqual(job?.id, jobId) }) }) diff --git a/test/fetchTest.js b/test/fetchTest.js index 092230c4..d2a9a858 100644 --- a/test/fetchTest.js +++ b/test/fetchTest.js @@ -121,49 +121,4 @@ describe('fetch', function () { assert(job.startedon === undefined) assert.strictEqual(calledCounter, 2) }) - - describe('enforceSingletonQueueActiveLimit option', function () { - it('when enforceSingletonQueueActiveLimit=false, should fetch singleton queue job even if there is already an active one', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - const jobOptions = { singletonKey: 'singleton_queue_active_test', useSingletonQueue: true } - const sendArgs = [queue, {}, jobOptions] - const fetchArgs = [queue, undefined, { enforceSingletonQueueActiveLimit: false }] - - const publish1 = await boss.send(...sendArgs) - assert(publish1) - const fetch1 = await boss.fetch(...fetchArgs) - assert(fetch1) - - const publish2 = await boss.send(...sendArgs) - assert(publish2) - const fetch2 = await boss.fetch(...fetchArgs) - assert(fetch2) - }) - - it('when enforceSingletonQueueActiveLimit=true, should not fetch singleton queue job if there is already an active one', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - const jobOptions = { singletonKey: 'singleton_queue_active_test', useSingletonQueue: true } - const sendArgs = [queue, {}, jobOptions] - const fetchArgs = [queue, undefined, { enforceSingletonQueueActiveLimit: true }] - - const publish1 = await boss.send(...sendArgs) - assert(publish1) - const fetch1 = await boss.fetch(...fetchArgs) - assert(fetch1) - - const publish2 = await boss.send(...sendArgs) - assert(publish2) - // Job 1 still active, can't fetch job 2 - const fetch2 = await boss.fetch(...fetchArgs) - assert(fetch2 === null) - - await boss.complete(fetch1.id) - // Job 1 no longer active, should be able to fetch job 2 - const retryFetch2 = await boss.fetch(...fetchArgs) - assert(retryFetch2) - assert(retryFetch2.id === publish2) - }) - }) }) diff --git a/test/hooks.js b/test/hooks.js index eae6392e..e25e6462 100644 --- a/test/hooks.js +++ b/test/hooks.js @@ -26,10 +26,7 @@ async function afterEach () { const { boss } = this.currentTest if (boss) { - await new Promise((resolve) => { - boss.on('stopped', resolve) - helper.stop(boss) - }) + await boss.stop({ timeout: 2000 }) } await helper.dropSchema(config.schema) diff --git a/test/insertTest.js b/test/insertTest.js index 26f455c3..9be7e7e9 100644 --- a/test/insertTest.js +++ b/test/insertTest.js @@ -1,5 +1,5 @@ const assert = require('assert') -const { v4: uuid } = require('uuid') +const { randomUUID } = require('crypto') const helper = require('./testHelper') describe('insert', function () { @@ -21,7 +21,7 @@ describe('insert', function () { const queue = this.test.bossConfig.schema const input = { - id: uuid(), + id: randomUUID(), name: queue, priority: 1, data: { some: 'data' }, @@ -32,7 +32,7 @@ describe('insert', function () { expireInSeconds: 5, singletonKey: '123', keepUntil: new Date().toISOString(), - onComplete: true + deadLetter: `${queue}_dlq` } await boss.insert([input]) @@ -50,7 +50,7 @@ describe('insert', function () { assert.strictEqual(job.expirein.seconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expirein}`) assert.strictEqual(job.singletonkey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonkey}`) assert.strictEqual(new Date(job.keepuntil).toISOString(), input.keepUntil, `keepUntil input ${input.keepUntil} didn't match job ${job.keepuntil}`) - assert.strictEqual(job.on_complete, input.onComplete, `onComplete input ${input.onComplete} didn't match job ${job.on_complete}`) + assert.strictEqual(job.deadletter, input.deadLetter, `deadLetter input ${input.deadLetter} didn't match job ${job.deadletter}`) }) it('should create jobs from an array with all properties and custom connection', async function () { @@ -58,7 +58,7 @@ describe('insert', function () { const queue = this.test.bossConfig.schema const input = { - id: uuid(), + id: randomUUID(), name: queue, priority: 1, data: { some: 'data' }, @@ -69,7 +69,7 @@ describe('insert', function () { expireInSeconds: 5, singletonKey: '123', keepUntil: new Date().toISOString(), - onComplete: true + deadLetter: `${queue}_dlq` } let called = false const db = await helper.getDb() @@ -97,7 +97,7 @@ describe('insert', function () { assert.strictEqual(job.expirein.seconds, input.expireInSeconds, `expireInSeconds input ${input.expireInSeconds} didn't match job ${job.expirein}`) assert.strictEqual(job.singletonkey, input.singletonKey, `name input ${input.singletonKey} didn't match job ${job.singletonkey}`) assert.strictEqual(new Date(job.keepuntil).toISOString(), input.keepUntil, `keepUntil input ${input.keepUntil} didn't match job ${job.keepuntil}`) - assert.strictEqual(job.on_complete, input.onComplete, `onComplete input ${input.onComplete} didn't match job ${job.on_complete}`) + assert.strictEqual(job.deadletter, input.deadLetter, `deadLetter input ${input.deadLetter} didn't match job ${job.deadletter}`) assert.strictEqual(called, true) }) }) diff --git a/test/maintenanceTest.js b/test/maintenanceTest.js index 5270d212..c84de149 100644 --- a/test/maintenanceTest.js +++ b/test/maintenanceTest.js @@ -1,53 +1,40 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') -const PgBoss = require('../') +const { delay } = require('../src/tools') describe('maintenance', async function () { - it('should send maintenance job if missing during monitoring', async function () { - const config = { ...this.test.bossConfig, maintenanceIntervalSeconds: 1 } + it('clearStorage() should empty both job storage tables', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, archiveCompletedAfterSeconds: 1 }) + const queue = this.test.bossConfig.schema - const db = await helper.getDb() - - const boss = this.test.boss = new PgBoss(config) - - const queues = boss.boss.getQueueNames() - const countJobs = () => helper.countJobs(config.schema, 'name = $1', [queues.MAINTENANCE]) - - await boss.start() + const jobId = await boss.send(queue) + await boss.fetch(queue) + await boss.complete(jobId) - boss.on('maintenance', async () => { - // force timestamp to an older date - await db.executeSql(`UPDATE ${config.schema}.version SET maintained_on = now() - interval '5 minutes'`) - }) + await delay(1000) + await boss.maintain() - // wait for monitoring to check timestamp - await delay(4000) + await boss.send(queue) - const count = await countJobs() - assert(count > 1) - }) + const db = await helper.getDb() - it('meta monitoring error handling works', async function () { - const config = { - ...this.test.bossConfig, - maintenanceIntervalSeconds: 1, - __test__throw_meta_monitor: 'meta monitoring error' + const getJobCount = async table => { + const jobCountResult = await db.executeSql(`SELECT count(*)::int as job_count FROM ${this.test.bossConfig.schema}.${table}`) + return jobCountResult.rows[0].job_count } - let errorCount = 0 - - const boss = this.test.boss = new PgBoss(config) + const preJobCount = await getJobCount('job') + const preArchiveCount = await getJobCount('archive') - boss.once('error', (error) => { - assert.strictEqual(error.message, config.__test__throw_meta_monitor) - errorCount++ - }) + assert.strictEqual(preJobCount, 1) + assert.strictEqual(preArchiveCount, 1) - await boss.start() + await boss.clearStorage() - await delay(6000) + const postJobCount = await getJobCount('job') + const postArchiveCount = await getJobCount('archive') - assert.strictEqual(errorCount, 1) + assert.strictEqual(postJobCount, 0) + assert.strictEqual(postArchiveCount, 0) }) }) diff --git a/test/managerTest.js b/test/managerTest.js index 18569807..15007be6 100644 --- a/test/managerTest.js +++ b/test/managerTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') diff --git a/test/migrationTest.js b/test/migrationTest.js index 8147af30..81350296 100644 --- a/test/migrationTest.js +++ b/test/migrationTest.js @@ -9,7 +9,7 @@ describe('migration', function () { let contractor beforeEach(async function () { - const db = await helper.getDb() + const db = await helper.getDb({ debug: false }) contractor = new Contractor(db, this.currentTest.bossConfig) }) @@ -32,7 +32,7 @@ describe('migration', function () { await contractor.rollback(currentSchemaVersion) - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) @@ -46,7 +46,7 @@ describe('migration', function () { it('should migrate through 2 versions back and forth', async function () { const queue = 'migrate-back-2-and-forward' - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) @@ -59,10 +59,6 @@ describe('migration', function () { const job = await boss.fetch(queue) await boss.complete(job.id) - // active job - await boss.send(queue) - await boss.fetch(queue) - // created job await boss.send(queue) @@ -85,6 +81,10 @@ describe('migration', function () { const version = await contractor.version() assert.strictEqual(version, currentSchemaVersion) + + await boss.send(queue) + const job2 = await boss.fetch(queue) + await boss.complete(job2.id) }) it('should migrate to latest during start if on previous 2 schema versions', async function () { @@ -98,7 +98,7 @@ describe('migration', function () { const twoVersionsAgo = await contractor.version() assert.strictEqual(twoVersionsAgo, currentSchemaVersion - 2) - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } const boss = this.test.boss = new PgBoss(config) await boss.start() @@ -118,7 +118,7 @@ describe('migration', function () { }) it('should roll back an error during a migration', async function () { - const config = { ...this.test.bossConfig, noSupervisor: true } + const config = { ...this.test.bossConfig } config.migrations = migrationStore.getAll(config.schema) @@ -136,7 +136,7 @@ describe('migration', function () { } catch (error) { assert(error.message.includes('wat')) } finally { - await boss1.stop({ graceful: false }) + await boss1.stop({ graceful: false, wait: false }) } const version1 = await contractor.version() @@ -154,6 +154,52 @@ describe('migration', function () { assert.strictEqual(version2, currentSchemaVersion) - await boss2.stop({ graceful: false }) + await boss2.stop({ graceful: false, wait: false }) + }) + + it('should not install if migrate option is false', async function () { + const config = { ...this.test.bossConfig, migrate: false } + const boss = this.test.boss = new PgBoss(config) + try { + await boss.start() + assert(false) + } catch (err) { + assert(true) + } + }) + it('should not migrate if migrate option is false', async function () { + await contractor.create() + + await contractor.rollback(currentSchemaVersion) + + const config = { ...this.test.bossConfig, migrate: false } + const boss = this.test.boss = new PgBoss(config) + + try { + await boss.start() + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should still work if migrate option is false', async function () { + await contractor.create() + + const config = { ...this.test.bossConfig, migrate: false } + const queue = this.test.bossConfig.schema + + const boss = this.test.boss = new PgBoss(config) + + try { + await boss.start() + await boss.send(queue) + const job = await boss.fetch(queue) + await boss.complete(job.id) + + assert(false) + } catch (err) { + assert(true) + } }) }) diff --git a/test/moduleTest.js b/test/moduleTest.js index 94377e1e..244eb2b3 100644 --- a/test/moduleTest.js +++ b/test/moduleTest.js @@ -8,7 +8,6 @@ describe('module', function () { assert(states.retry) assert(states.active) assert(states.completed) - assert(states.expired) assert(states.cancelled) assert(states.failed) }) diff --git a/test/monitoringTest.js b/test/monitoringTest.js index 929209ff..9d362e87 100644 --- a/test/monitoringTest.js +++ b/test/monitoringTest.js @@ -3,14 +3,13 @@ const helper = require('./testHelper') describe('monitoring', function () { it('should emit state counts', async function () { - const defaults = { - monitorStateIntervalSeconds: 1, - maintenanceIntervalSeconds: 10 + const config = { + ...this.test.bossConfig, + monitorStateIntervalSeconds: 1 } - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'monitorMe' + const boss = this.test.boss = await helper.start(config) + const queue = this.test.bossConfig.schema await boss.send(queue) await boss.send(queue) @@ -43,18 +42,13 @@ describe('monitoring', function () { assert.strictEqual(2, states4.queues[queue].active, 'active count is wrong after 3 sendes and 3 fetches and 1 complete') assert.strictEqual(1, states4.queues[queue].completed, 'completed count is wrong after 3 sendes and 3 fetches and 1 complete') - return new Promise((resolve) => { - let resolved = false - - boss.on('monitor-states', async states => { - if (!resolved) { - resolved = true - assert.strictEqual(states4.queues[queue].created, states.queues[queue].created, 'created count from monitor-states doesn\'t match') - assert.strictEqual(states4.queues[queue].active, states.queues[queue].active, 'active count from monitor-states doesn\'t match') - assert.strictEqual(states4.queues[queue].completed, states.queues[queue].completed, 'completed count from monitor-states doesn\'t match') + await new Promise((resolve) => { + boss.once('monitor-states', async states => { + assert.strictEqual(states4.queues[queue].created, states.queues[queue].created, 'created count from monitor-states doesn\'t match') + assert.strictEqual(states4.queues[queue].active, states.queues[queue].active, 'active count from monitor-states doesn\'t match') + assert.strictEqual(states4.queues[queue].completed, states.queues[queue].completed, 'completed count from monitor-states doesn\'t match') - resolve() - } + resolve() }) }) }) diff --git a/test/multiMasterTest.js b/test/multiMasterTest.js index 361d7c4d..c714fa71 100644 --- a/test/multiMasterTest.js +++ b/test/multiMasterTest.js @@ -1,5 +1,4 @@ const assert = require('assert') -const delay = require('delay') const helper = require('./testHelper') const PgBoss = require('../') const Contractor = require('../src/contractor') @@ -10,7 +9,7 @@ const pMap = require('p-map') describe('multi-master', function () { it('should only allow 1 master to start at a time', async function () { const replicaCount = 20 - const config = { ...this.test.bossConfig, noSupervisor: true, max: 2 } + const config = { ...this.test.bossConfig, supervise: false, max: 2 } const instances = [] for (let i = 0; i < replicaCount; i++) { @@ -22,13 +21,17 @@ describe('multi-master', function () { } catch (err) { assert(false, err.message) } finally { - await pMap(instances, i => i.stop({ graceful: false })) + await pMap(instances, i => i.stop({ graceful: false, wait: false })) } }) it('should only allow 1 master to migrate to latest at a time', async function () { - const replicaCount = 5 - const config = { ...this.test.bossConfig, noSupervisor: true, max: 2 } + const config = { + ...this.test.bossConfig, + supervise: true, + maintenanceIntervalSeconds: 1, + max: 2 + } const db = await helper.getDb() const contractor = new Contractor(db, config) @@ -46,7 +49,7 @@ describe('multi-master', function () { const instances = [] - for (let i = 0; i < replicaCount; i++) { + for (let i = 0; i < 5; i++) { instances.push(new PgBoss(config)) } @@ -55,49 +58,7 @@ describe('multi-master', function () { } catch (err) { assert(false) } finally { - await pMap(instances, i => i.stop({ graceful: false })) - } - }) - - it('should clear maintenance queue before supervising', async function () { - const { states } = PgBoss - const jobCount = 5 - - const defaults = { - maintenanceIntervalSeconds: 1, - noSupervisor: true + await pMap(instances, i => i.stop({ graceful: false, wait: false })) } - - const config = { ...this.test.bossConfig, ...defaults } - - let boss = new PgBoss(config) - - const queues = boss.boss.getQueueNames() - const countJobs = (state) => helper.countJobs(config.schema, 'name = $1 AND state = $2', [queues.MAINTENANCE, state]) - - await boss.start() - - // create extra maintenace jobs manually - for (let i = 0; i < jobCount; i++) { - await boss.send(queues.MAINTENANCE) - } - - const beforeCount = await countJobs(states.created) - - assert.strictEqual(beforeCount, jobCount) - - await boss.stop({ graceful: false }) - - boss = new PgBoss(this.test.bossConfig) - - await boss.start() - - await delay(3000) - - const completedCount = await countJobs(states.completed) - - assert.strictEqual(completedCount, 1) - - await boss.stop({ graceful: false }) }) }) diff --git a/test/opsTest.js b/test/opsTest.js index 0e1ff95b..20616e8e 100644 --- a/test/opsTest.js +++ b/test/opsTest.js @@ -1,47 +1,25 @@ const assert = require('assert') const helper = require('./testHelper') -const { v4: uuid } = require('uuid') -const delay = require('delay') +const { randomUUID } = require('crypto') describe('ops', function () { - const defaults = { - noSupervisor: true, - noScheduling: true - } - it('should expire manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.expire() }) it('should archive manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) await boss.archive() }) it('should purge the archive manually', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - await boss.purge() - }) - - it('stop should re-emit stoppped if already stopped', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const stopPromise1 = new Promise(resolve => boss.once('stopped', resolve)) - - await boss.stop({ timeout: 1 }) - - await stopPromise1 - - const stopPromise2 = new Promise(resolve => boss.once('stopped', resolve)) - - await boss.stop({ timeout: 1 }) - - await stopPromise2 + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + await boss.drop() }) it('should emit error in worker', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_worker: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, __test__throw_worker: true }) const queue = this.test.bossConfig.schema await boss.send(queue) @@ -51,57 +29,32 @@ describe('ops', function () { }) it('should return null from getJobById if not found', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) - const jobId = await boss.getJobById(uuid()) + const jobId = await boss.getJobById(randomUUID()) assert.strictEqual(jobId, null) }) it('should force stop', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - await boss.stop({ graceful: false }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + await boss.stop({ graceful: false, wait: false }) }) it('should destroy the connection pool', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - await boss.stop({ destroy: true, graceful: false }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + await boss.stop({ destroy: true, graceful: false, wait: false }) assert(boss.db.pool.totalCount === 0) }) it('should destroy the connection pool gracefully', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - await boss.stop({ destroy: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + await boss.stop({ destroy: true, wait: false }) await new Promise((resolve) => { boss.on('stopped', () => resolve()) }) assert(boss.db.pool.totalCount === 0) }) - - it('should emit error during graceful stop if worker is busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_stop: true }) - const queue = this.test.bossConfig.schema - - await boss.send(queue) - await boss.work(queue, () => delay(2000)) - - await delay(500) - - await boss.stop({ timeout: 5000 }) - - await new Promise(resolve => boss.on('error', resolve)) - }) - - it('should throw error during graceful stop if no workers are busy', async function () { - const boss = await helper.start({ ...this.test.bossConfig, ...defaults, __test__throw_stop: true }) - - try { - await boss.stop({ timeout: 1 }) - assert(false) - } catch (err) { - assert(true) - } - }) }) diff --git a/test/priorityTest.js b/test/priorityTest.js index 468fab72..abd55b92 100644 --- a/test/priorityTest.js +++ b/test/priorityTest.js @@ -4,22 +4,20 @@ const helper = require('./testHelper') describe('priority', function () { it('higher priority job', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const jobName = 'priority-test' + await boss.send(queue) - await boss.send(jobName) + const high = await boss.send(queue, null, { priority: 1 }) - const high = await boss.send(jobName, null, { priority: 1 }) - - const job = await boss.fetch(jobName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, high) }) it('descending priority order', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'multiple-priority-test' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema const low = await boss.send(queue, null, { priority: 1 }) const medium = await boss.send(queue, null, { priority: 5 }) diff --git a/test/queueTest.js b/test/queueTest.js new file mode 100644 index 00000000..5d18072e --- /dev/null +++ b/test/queueTest.js @@ -0,0 +1,282 @@ +const assert = require('assert') +const helper = require('./testHelper') + +describe('queues', function () { + it('should create a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + }) + + it('should reject a queue with invalid characters', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = `*${this.test.bossConfig.schema}` + + try { + await boss.createQueue(queue) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should reject a queue that starts with a number', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = `4${this.test.bossConfig.schema}` + + try { + await boss.createQueue(queue) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should reject a queue with invalid policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + try { + await boss.createQueue(queue, { policy: 'something' }) + assert(false) + } catch (err) { + assert(true) + } + }) + + it('should create a queue with standard policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'standard' }) + }) + + it('should create a queue with stately policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'stately' }) + }) + + it('should create a queue with singleton policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'singleton' }) + }) + + it('should create a queue with short policy', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'short' }) + }) + + it('should delete a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + await boss.deleteQueue(queue) + }) + + it('should purge a queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue) + await boss.purgeQueue(queue) + }) + + it('getQueue() returns null when missing', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = await boss.getQueue(this.test.bossConfig.schema) + assert.strictEqual(queue, null) + }) + + it('should update queue properties', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const createProps = { + retryLimit: 1, + retryBackoff: false, + retryDelay: 1, + expireInSeconds: 1, + retentionMinutes: 1, + deadLetter: `${queue}_1` + } + + await boss.createQueue(queue, createProps) + + let queueObj = await boss.getQueue(queue) + + assert.strictEqual(createProps.retryLimit, queueObj.retryLimit) + assert.strictEqual(createProps.retryBackoff, queueObj.retryBackoff) + assert.strictEqual(createProps.retryDelay, queueObj.retryDelay) + assert.strictEqual(createProps.expireInSeconds, queueObj.expireInSeconds) + assert.strictEqual(createProps.retentionMinutes, queueObj.retentionMinutes) + assert.strictEqual(createProps.deadLetter, queueObj.deadLetter) + + const updateProps = { + retryLimit: 2, + retryBackoff: true, + retryDelay: 2, + expireInSeconds: 2, + retentionMinutes: 2, + deadLetter: `${queue}_2` + } + + await boss.updateQueue(queue, updateProps) + + queueObj = await boss.getQueue(queue) + + assert.strictEqual(updateProps.retryLimit, queueObj.retryLimit) + assert.strictEqual(updateProps.retryBackoff, queueObj.retryBackoff) + assert.strictEqual(updateProps.retryDelay, queueObj.retryDelay) + assert.strictEqual(updateProps.expireInSeconds, queueObj.expireInSeconds) + assert.strictEqual(updateProps.retentionMinutes, queueObj.retentionMinutes) + assert.strictEqual(updateProps.deadLetter, queueObj.deadLetter) + }) + + it('jobs should inherit properties from queue', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema + + const createProps = { + retryLimit: 1, + retryBackoff: true, + retryDelay: 2, + expireInSeconds: 3, + retentionMinutes: 4, + deadLetter: `${queue}_1` + } + + await boss.createQueue(queue, createProps) + + const jobId = await boss.send(queue) + + const job = await boss.getJobById(jobId) + + const retentionMinutes = (new Date(job.keepuntil) - new Date(job.createdon)) / 1000 / 60 + + assert.strictEqual(createProps.retryLimit, job.retrylimit) + assert.strictEqual(createProps.retryBackoff, job.retrybackoff) + assert.strictEqual(createProps.retryDelay, job.retrydelay) + assert.strictEqual(createProps.expireInSeconds, job.expirein.seconds) + assert.strictEqual(createProps.retentionMinutes, retentionMinutes) + assert.strictEqual(createProps.deadLetter, job.deadletter) + }) + + it('short policy only allows 1 job in queue', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'short' }) + + const jobId = await boss.send(queue) + + assert(jobId) + + const jobId2 = await boss.send(queue) + + assert.strictEqual(jobId2, null) + + const job = await boss.fetch(queue) + + assert.strictEqual(job.id, jobId) + + const jobId3 = await boss.send(queue) + + assert(jobId3) + }) + + it('singleton policy only allows 1 active job', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'singleton' }) + + await boss.send(queue) + + await boss.send(queue) + + const job1 = await boss.fetch(queue) + + const job2 = await boss.fetch(queue) + + assert.strictEqual(job2, null) + + await boss.complete(job1.id) + + const job3 = await boss.fetch(queue) + + assert(job3) + }) + + it('stately policy only allows 1 job per state up to active', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema + + await boss.createQueue(queue, { policy: 'stately' }) + + const jobId1 = await boss.send(queue, null, { retryLimit: 1 }) + + const blockedId = await boss.send(queue) + + assert.strictEqual(blockedId, null) + + let job1 = await boss.fetch(queue) + + await boss.fail(job1.id) + + job1 = await boss.getJobById(jobId1) + + assert.strictEqual(job1.state, 'retry') + + const jobId2 = await boss.send(queue, null, { retryLimit: 1 }) + + assert(jobId2) + + job1 = await boss.fetch(queue) + + job1 = await boss.getJobById(jobId1) + + assert.strictEqual(job1.state, 'active') + + const blockedSecondActive = await boss.fetch(queue) + + assert.strictEqual(blockedSecondActive, null) + }) + + it('should clear a specific queue', async function () { + const boss = this.test.boss = await helper.start(this.test.bossConfig) + + const queue1 = `${this.test.bossConfig.schema}1` + const queue2 = `${this.test.bossConfig.schema}2` + + await boss.send(queue1) + await boss.send(queue2) + + const q1Count1 = await boss.getQueueSize(queue1) + const q2Count1 = await boss.getQueueSize(queue2) + + assert.strictEqual(1, q1Count1) + assert.strictEqual(1, q2Count1) + + await boss.purgeQueue(queue1) + + const q1Count2 = await boss.getQueueSize(queue1) + const q2Count2 = await boss.getQueueSize(queue2) + + assert.strictEqual(0, q1Count2) + assert.strictEqual(1, q2Count2) + + await boss.purgeQueue(queue2) + + const q2Count3 = await boss.getQueueSize(queue2) + + assert.strictEqual(0, q2Count3) + }) +}) diff --git a/test/readme.js b/test/readme.js index cac01c37..7574af7e 100644 --- a/test/readme.js +++ b/test/readme.js @@ -8,17 +8,20 @@ async function readme () { await boss.start() - const queue = 'some-queue' + const queue = 'some_queue' + + try { + await boss.createQueue(queue) + } catch {} await boss.schedule(queue, '* * * * *') console.log(`created cronjob in queue ${queue}`) - await boss.work(queue, someAsyncJobHandler) -} - -async function someAsyncJobHandler (job) { - console.log(`running job ${job.id}`) + await boss.work(queue, async job => { + console.log(`running job ${job.id}`) + boss.unschedule(queue) + }) } readme() diff --git a/test/retryTest.js b/test/retryTest.js index 13f5dbb4..7b512423 100644 --- a/test/retryTest.js +++ b/test/retryTest.js @@ -1,20 +1,18 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('retries', function () { - const defaults = { maintenanceIntervalSeconds: 1 } - it('should retry a job that didn\'t complete', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'unreliable' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema const jobId = await boss.send({ name: queue, options: { expireInSeconds: 1, retryLimit: 1 } }) const try1 = await boss.fetch(queue) - await delay(5000) + await delay(1000) + await boss.maintain() const try2 = await boss.fetch(queue) @@ -23,41 +21,36 @@ describe('retries', function () { }) it('should retry a job that failed', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queueName = 'retryFailed' - const retryLimit = 1 + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const jobId = await boss.send(queueName, null, { retryLimit }) + const jobId = await boss.send(queue, null, { retryLimit: 1 }) - await boss.fetch(queueName) + await boss.fetch(queue) await boss.fail(jobId) - const job = await boss.fetch(queueName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, jobId) }) it('should retry a job that failed with cascaded config', async function () { - const retryLimit = 1 - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults, retryLimit }) - - const queueName = 'retryFailed-config-cascade' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, retryLimit: 1 }) + const queue = this.test.bossConfig.schema - const jobId = await boss.send(queueName) + const jobId = await boss.send(queue) - await boss.fetch(queueName) + await boss.fetch(queue) await boss.fail(jobId) - const job = await boss.fetch(queueName) + const job = await boss.fetch(queue) assert.strictEqual(job.id, jobId) }) it('should retry with a fixed delay', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'retryDelayFixed' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema const jobId = await boss.send(queue, null, { retryLimit: 1, retryDelay: 1 }) @@ -76,9 +69,8 @@ describe('retries', function () { }) it('should retry with a exponential backoff', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'retryDelayBackoff' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema let processCount = 0 const retryLimit = 4 @@ -94,25 +86,4 @@ describe('retries', function () { assert(processCount < retryLimit) }) - - it('should set the default retry limit to 1 if missing', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, ...defaults }) - - const queue = 'retryLimitDefault' - - const jobId = await boss.send(queue, null, { retryDelay: 1 }) - - await boss.fetch(queue) - await boss.fail(jobId) - - const job1 = await boss.fetch(queue) - - assert.strictEqual(job1, null) - - await delay(1000) - - const job2 = await boss.fetch(queue) - - assert(job2) - }) }) diff --git a/test/scheduleTest.js b/test/scheduleTest.js index c3c3fde5..88f99832 100644 --- a/test/scheduleTest.js +++ b/test/scheduleTest.js @@ -1,4 +1,4 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const { DateTime } = require('luxon') const helper = require('./testHelper') @@ -11,7 +11,8 @@ describe('schedule', function () { it('should send job based on every minute expression', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -27,11 +28,33 @@ describe('schedule', function () { assert(job) }) + it('should not enable scheduling if archive config is < 60s', async function () { + const config = { + ...this.test.bossConfig, + clockMonitorIntervalSeconds: 1, + cronWorkerIntervalSeconds: 1, + archiveCompletedAfterSeconds: 1, + schedule: true + } + + const boss = this.test.boss = await helper.start(config) + const queue = this.test.bossConfig.schema + + await boss.schedule(queue, '* * * * *') + + await delay(ASSERT_DELAY) + + const job = await boss.fetch(queue) + + assert.strictEqual(job, null) + }) + it('should accept a custom clock monitoring interval in seconds', async function () { const config = { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -51,7 +74,8 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -76,8 +100,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, - noScheduling: true, - noSupervisor: true + schedule: false } let boss = await helper.start(config) @@ -86,9 +109,9 @@ describe('schedule', function () { await boss.schedule(queue, '* * * * *') - await boss.stop() + await boss.stop({ wait: false }) - boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1 }) + boss = await helper.start({ ...this.test.bossConfig, cronWorkerIntervalSeconds: 1, schedule: true }) await delay(ASSERT_DELAY) @@ -96,13 +119,12 @@ describe('schedule', function () { assert(job) - await boss.stop() + await boss.stop({ wait: false }) }) it('should remove previously scheduled job', async function () { const config = { ...this.test.bossConfig, - noSupervisor: true, cronWorkerIntervalSeconds: 1 } const boss = this.test.boss = await helper.start(config) @@ -113,7 +135,7 @@ describe('schedule', function () { await boss.unschedule(queue) - await boss.stop({ graceful: false }) + await boss.stop({ graceful: false, wait: false }) const db = await helper.getDb() await db.executeSql(plans.clearStorage(this.test.bossConfig.schema)) @@ -130,7 +152,8 @@ describe('schedule', function () { it('should send job based on current minute in UTC', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -165,7 +188,8 @@ describe('schedule', function () { it('should send job based on current minute in a specified time zone', async function () { const config = { ...this.test.bossConfig, - cronWorkerIntervalSeconds: 1 + cronWorkerIntervalSeconds: 1, + schedule: true } const boss = this.test.boss = await helper.start(config) @@ -202,6 +226,7 @@ describe('schedule', function () { it('should force a clock skew warning', async function () { const config = { ...this.test.bossConfig, + schedule: true, __test__force_clock_skew_warning: true } @@ -229,6 +254,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, clockMonitorIntervalSeconds: 1, + schedule: true, __test__force_clock_monitoring_error: 'pg-boss mock error: clock skew monitoring' } @@ -252,6 +278,7 @@ describe('schedule', function () { const config = { ...this.test.bossConfig, cronMonitorIntervalSeconds: 1, + schedule: true, __test__force_cron_monitoring_error: 'pg-boss mock error: cron monitoring' } @@ -270,4 +297,52 @@ describe('schedule', function () { assert.strictEqual(errorCount, 1) }) + + it('errors during cron processing should emit', async function () { + const config = { + ...this.test.bossConfig, + cronWorkerIntervalSeconds: 1, + schedule: true, + __test__throw_cron_processing: 'cron processing' + } + + let errorCount = 0 + + const boss = this.test.boss = new PgBoss(config) + + boss.once('error', error => { + assert.strictEqual(error.message, config.__test__throw_cron_processing) + errorCount++ + }) + + await boss.start() + + await delay(2000) + + assert.strictEqual(errorCount, 1) + }) + + it('clock monitoring error handling works', async function () { + const config = { + ...this.test.bossConfig, + schedule: true, + clockMonitorIntervalSeconds: 1, + __test__force_clock_monitoring_error: 'pg-boss mock error: clock monitoring' + } + + let errorCount = 0 + + const boss = this.test.boss = new PgBoss(config) + + boss.once('error', (error) => { + assert.strictEqual(error.message, config.__test__force_clock_monitoring_error) + errorCount++ + }) + + await boss.start() + + await delay(4000) + + assert.strictEqual(errorCount, 1) + }) }) diff --git a/test/singletonTest.js b/test/singletonTest.js index 10e17271..f6bafa0a 100644 --- a/test/singletonTest.js +++ b/test/singletonTest.js @@ -1,12 +1,11 @@ const assert = require('assert') -const { v4: uuid } = require('uuid') const helper = require('./testHelper') -describe('singleton', function () { +describe('singleton keys', function () { it('should not allow more than 1 pending job at a time with the same key', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const queue = 'singleton-1-pending' const singletonKey = 'a' const jobId = await boss.send(queue, null, { singletonKey }) @@ -20,8 +19,8 @@ describe('singleton', function () { it('should not allow more than 1 complete job with the same key with an interval', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'singleton-1-complete' const singletonKey = 'a' const singletonMinutes = 1 @@ -47,111 +46,4 @@ describe('singleton', function () { assert(jobId2) }) - - it('sendOnce() should work', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'sendOnce' - const key = 'only-once-plz' - - const jobId = await boss.sendOnce(queue, null, null, key) - - assert(jobId) - - const jobId2 = await boss.sendOnce(queue, null, null, key) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.sendOnce(queue, null, null, key) - - assert.strictEqual(jobId3, null) - }) - - it('sendOnce() without a key should also work', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'sendOnceNoKey' - const jobId = await boss.sendOnce(queue) - - assert(jobId) - - const jobId2 = await boss.sendOnce(queue) - - assert.strictEqual(jobId2, null) - }) - - it('sendSingleton() works', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = this.test.bossConfig.schema - - const jobId = await boss.sendSingleton(queue) - - assert(jobId) - - const jobId2 = await boss.sendSingleton(queue) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.sendSingleton(queue) - - assert(jobId3) - }) - - it('useSingletonQueue allows a second singleton job if first has enetered active state', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'singleton-queue-check' - const singletonKey = 'myKey' - - const jobId = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert(jobId) - - const jobId2 = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert.strictEqual(jobId2, null) - - const job = await boss.fetch(queue) - - assert.strictEqual(job.id, jobId) - - const jobId3 = await boss.send(queue, null, { singletonKey, useSingletonQueue: true }) - - assert(jobId3) - }) - - it('useSingletonQueue works when using insert', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const name = 'singleton-queue-check' - const singletonKey = 'myKey' - - const jobId = uuid() - await boss.insert([{ id: jobId, name, singletonKey, useSingletonQueue: true }]) - - assert(await boss.getJobById(jobId)) - - const jobId2 = uuid() - await boss.insert([{ id: jobId2, name, singletonKey, useSingletonQueue: true }]) - - assert.strictEqual(await boss.getJobById(jobId2), null) - - const job = await boss.fetch(name) - - assert.strictEqual(job.id, jobId) - - const jobId3 = uuid() - await boss.insert([{ id: jobId3, name, singletonKey, useSingletonQueue: true }]) - - assert(await boss.getJobById(jobId3)) - }) }) diff --git a/test/speedTest.js b/test/speedTest.js index f66bc933..c7552d9c 100644 --- a/test/speedTest.js +++ b/test/speedTest.js @@ -1,31 +1,24 @@ const helper = require('./testHelper') -const pMap = require('p-map') +const assert = require('assert') describe('speed', function () { - const expectedSeconds = 2 - const jobCount = 10000 + const expectedSeconds = 4 + const jobCount = 10_000 const queue = 'speedTest' - - const jobs = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } })) - + const data = new Array(jobCount).fill(null).map((item, index) => ({ name: queue, data: { index } })) const testTitle = `should be able to fetch and complete ${jobCount} jobs in ${expectedSeconds} seconds` - let boss - - beforeEach(async function () { - const defaults = { noSupervisor: true, min: 10, max: 10 } - boss = await helper.start({ ...this.currentTest.bossConfig, ...defaults }) - await pMap(jobs, job => boss.send(job.name, job.data)) - }) - - afterEach(async function () { await helper.stop(boss) }) - it(testTitle, async function () { this.timeout(expectedSeconds * 1000) this.slow(0) - this.retries(1) + const config = { ...this.test.bossConfig, min: 10, max: 10 } + const boss = this.test.boss = await helper.start(config) + await boss.insert(data) const jobs = await boss.fetch(queue, jobCount) + + assert.strictEqual(jobCount, jobs.length) + await boss.complete(jobs.map(job => job.id)) }) }) diff --git a/test/testHelper.js b/test/testHelper.js index 16bf6b61..b4555d9a 100644 --- a/test/testHelper.js +++ b/test/testHelper.js @@ -1,19 +1,15 @@ const Db = require('../src/db') const PgBoss = require('../') -const plans = require('../src/plans') -const { COMPLETION_JOB_PREFIX } = plans const crypto = require('crypto') const sha1 = (value) => crypto.createHash('sha1').update(value).digest('hex') module.exports = { dropSchema, start, - stop, getDb, getArchivedJobById, countJobs, findJobs, - COMPLETION_JOB_PREFIX, getConfig, getConnectionString, tryCreateDb, @@ -39,6 +35,10 @@ function getConfig (options = {}) { config.schema = config.schema || 'pgboss' + config.supervise = false + config.schedule = false + config.retryLimit = 0 + const result = { ...config } return Object.assign(result, options) @@ -51,12 +51,12 @@ async function init () { await createPgCrypto(database) } -async function getDb (database) { +async function getDb ({ database, debug } = {}) { const config = getConfig() config.database = database || config.database - const db = new Db(config) + const db = new Db({ ...config, debug }) await db.open() @@ -64,7 +64,7 @@ async function getDb (database) { } async function createPgCrypto (database) { - const db = await getDb(database) + const db = await getDb({ database }) await db.executeSql('create extension if not exists pgcrypto') await db.close() } @@ -102,7 +102,7 @@ async function countJobs (schema, where, values) { } async function tryCreateDb (database) { - const db = await getDb('postgres') + const db = await getDb({ database: 'postgres' }) try { await db.executeSql(`CREATE DATABASE ${database}`) @@ -125,7 +125,3 @@ async function start (options) { } } } - -async function stop (boss, timeout = 4000) { - await boss.stop({ timeout }) -} diff --git a/test/throttleTest.js b/test/throttleTest.js index ff77afe2..7a6fc093 100644 --- a/test/throttleTest.js +++ b/test/throttleTest.js @@ -1,6 +1,6 @@ const assert = require('assert') const helper = require('./testHelper') -const delay = require('delay') +const { delay } = require('../src/tools') describe('throttle', function () { it('should only create 1 job for interval with a delay', async function () { diff --git a/test/wildcardTest.js b/test/wildcardTest.js index 73cd1161..3e369755 100644 --- a/test/wildcardTest.js +++ b/test/wildcardTest.js @@ -3,7 +3,7 @@ const helper = require('./testHelper') describe('wildcard', function () { it('fetch() should return all jobs using a wildcard pattern', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) const queue = this.test.bossConfig.schema await boss.send(`${queue}_1234`) @@ -28,16 +28,4 @@ describe('wildcard', function () { }) }) }) - - it('should not accidentally fetch state completion jobs from a pattern', async function () { - const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = this.test.bossConfig.schema - - await boss.send(`${queue}_1234`) - const job = await boss.fetch(`${queue}_*`) - await boss.complete(job.id) - const job2 = await boss.fetch(`${queue}_*`) - - assert.strictEqual(job2, null) - }) }) diff --git a/test/workTest.js b/test/workTest.js index 3d5d4fe3..5a5e79df 100644 --- a/test/workTest.js +++ b/test/workTest.js @@ -1,7 +1,6 @@ -const delay = require('delay') +const { delay } = require('../src/tools') const assert = require('assert') const helper = require('./testHelper') -const PgBoss = require('../') describe('work', function () { it('should fail with no arguments', async function () { @@ -73,18 +72,21 @@ describe('work', function () { const queue = this.test.bossConfig.schema let processCount = 0 - const newJobCheckIntervalSeconds = 5 await boss.send(queue) - const workerId = await boss.work(queue, { newJobCheckIntervalSeconds }, () => processCount++) - await delay(100) + const workerId = await boss.work(queue, { newJobCheckIntervalSeconds: 5 }, () => processCount++) + + await delay(500) + assert.strictEqual(processCount, 1) + await boss.send(queue) boss.notifyWorker(workerId) - await delay(100) + await delay(500) + assert.strictEqual(processCount, 2) }) @@ -128,8 +130,8 @@ describe('work', function () { it('should handle a batch of jobs via teamSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-teamSize' const teamSize = 4 let processCount = 0 @@ -152,8 +154,8 @@ describe('work', function () { it('should apply teamConcurrency option', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-teamConcurrency' const teamSize = 4 const teamConcurrency = 4 @@ -179,8 +181,8 @@ describe('work', function () { it('should handle a batch of jobs via batchSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) + const queue = this.test.bossConfig.schema - const queue = 'process-batchSize' const batchSize = 4 for (let i = 0; i < batchSize; i++) { @@ -199,7 +201,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - await boss.send(queue, null, { onComplete: true }) + const jobId = await boss.send(queue) await new Promise((resolve) => { boss.work(queue, { batchSize: 1 }, async jobs => { @@ -208,16 +210,16 @@ describe('work', function () { }) }) - await delay(2000) + await delay(500) - const result = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert(result) + assert.strictEqual(job.state, 'completed') }) it('returning promise applies backpressure', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = 'backpressure' + const queue = this.test.bossConfig.schema const jobCount = 4 let processCount = 0 @@ -241,30 +243,34 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - this.timeout(1000) + this.timeout(2000) - const teamSize = 4 - const teamConcurrency = 2 + const jobCount = 6 - let processCount = 0 + let workCount = 0 - for (let i = 0; i < 6; i++) { + for (let i = 0; i < jobCount; i++) { await boss.send(queue) } - const newJobCheckInterval = 100 + const options = { + teamSize: 4, + teamConcurrency: 2, + newJobCheckInterval: 500, + teamRefill: true + } - return new Promise((resolve) => { - boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - processCount++ - if (processCount === 1) { - // Test would timeout if all were blocked on this first - // process - await new Promise(resolve => setTimeout(resolve, 500)) + await new Promise((resolve) => { + boss.work(queue, options, async () => { + workCount++ + + if (workCount === 1) { + // Test would timeout if all were blocked on + await new Promise(resolve => setTimeout(resolve, 1000)) return } - if (processCount === 6) { + if (workCount === jobCount) { resolve() } }) @@ -274,73 +280,70 @@ describe('work', function () { it('does not fetch more than teamSize', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - const teamSize = 4 - const teamConcurrency = 2 - const newJobCheckInterval = 200 - let processCount = 0 - let remainCount = 0 + + const options = { + teamSize: 4, + teamConcurrency: 2, + newJobCheckInterval: 500, + teamRefill: true + } + + let workCount = 0 for (let i = 0; i < 7; i++) { await boss.send(queue) } // This should consume 5 jobs, all will block after the first job - await boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - processCount++ - if (processCount > 1) await new Promise(resolve => setTimeout(resolve, 1000)) + await boss.work(queue, options, async () => { + workCount++ + if (workCount > 1) await new Promise(resolve => setTimeout(resolve, 2000)) }) - await new Promise(resolve => setTimeout(resolve, 400)) - - // this should pick up the last 2 jobs - await boss.work(queue, { teamSize, teamConcurrency, newJobCheckInterval, teamRefill: true }, async () => { - remainCount++ - }) + await new Promise(resolve => setTimeout(resolve, 1000)) - await new Promise(resolve => setTimeout(resolve, 400)) + const remainingJobs = await boss.fetch(queue, 2) - assert(remainCount === 2) + assert.strictEqual(2, remainingJobs.length) }) it('completion should pass string wrapped in value prop', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema - const queue = 'processCompletionString' const result = 'success' - boss.work(queue, async job => result) + const jobId = await boss.send(queue) - await boss.send(queue) + await boss.work(queue, async job => result) - await delay(8000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'completed') - assert.strictEqual(job.data.response.value, result) + assert.strictEqual(job.state, 'completed') + assert.strictEqual(job.output.value, result) }) it('completion via Promise resolve() should pass object payload', async function () { - const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, onComplete: true }) - - const queue = 'processCompletionObject' + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig }) + const queue = this.test.bossConfig.schema const something = 'clever' - boss.work(queue, async job => ({ something })) - - await boss.send(queue) + const jobId = await boss.send(queue) + await boss.work(queue, async () => ({ something })) - await delay(8000) + await delay(1000) - const job = await boss.fetchCompleted(queue) + const job = await boss.getJobById(jobId) - assert.strictEqual(job.data.state, 'completed') - assert.strictEqual(job.data.response.something, something) + assert.strictEqual(job.state, 'completed') + assert.strictEqual(job.output.something, something) }) it('should allow multiple workers to the same queue per instance', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - const queue = 'multiple-workers' + const queue = this.test.bossConfig.schema await boss.work(queue, () => {}) await boss.work(queue, () => {}) @@ -348,8 +351,7 @@ describe('work', function () { it('should honor the includeMetadata option', async function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) - - const queue = 'process-includeMetadata' + const queue = this.test.bossConfig.schema await boss.send(queue) @@ -361,15 +363,8 @@ describe('work', function () { }) }) - it('should fail job at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss(this.test.bossConfig) - - const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) - - await boss.start() - - await maintenanceTick - + it('should fail job at expiration in worker', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, supervise: false }) const queue = this.test.bossConfig.schema const jobId = await boss.send(queue, null, { expireInSeconds: 1 }) @@ -384,15 +379,8 @@ describe('work', function () { assert(job.output.message.includes('handler execution exceeded')) }) - it('should fail a batch of jobs at expiration without maintenance', async function () { - const boss = this.test.boss = new PgBoss(this.test.bossConfig) - - const maintenanceTick = new Promise((resolve) => boss.on('maintenance', resolve)) - - await boss.start() - - await maintenanceTick - + it('should fail a batch of jobs at expiration in worker', async function () { + const boss = this.test.boss = await helper.start({ ...this.test.bossConfig, supervise: false }) const queue = this.test.bossConfig.schema const jobId1 = await boss.send(queue, null, { expireInSeconds: 1 }) @@ -436,15 +424,13 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1 }) - - await delay(500) + await boss.stop({ wait: true }) try { - await boss.work(queue) + await boss.work(queue, () => {}) assert(false) } catch (err) { - assert(err.message.includes('stopping')) + assert(true) } }) @@ -452,9 +438,7 @@ describe('work', function () { const boss = this.test.boss = await helper.start(this.test.bossConfig) const queue = this.test.bossConfig.schema - boss.stop({ timeout: 1 }) - - await delay(500) + boss.stop({ wait: true }) await boss.send(queue) }) diff --git a/types.d.ts b/types.d.ts index 2ffab407..6f372042 100644 --- a/types.d.ts +++ b/types.d.ts @@ -20,20 +20,20 @@ declare namespace PgBoss { } interface QueueOptions { - uuid?: "v1" | "v4"; monitorStateIntervalSeconds?: number; monitorStateIntervalMinutes?: number; } interface SchedulingOptions { - noScheduling?: boolean; + schedule?: boolean; clockMonitorIntervalSeconds?: number; clockMonitorIntervalMinutes?: number; } interface MaintenanceOptions { - noSupervisor?: boolean; + supervise?: boolean; + migrate?: boolean; deleteAfterSeconds?: number; deleteAfterMinutes?: number; @@ -56,11 +56,6 @@ declare namespace PgBoss { & RetentionOptions & RetryOptions & JobPollingOptions - & CompletionOptions - - interface CompletionOptions { - onComplete?: boolean; - } interface ExpirationOptions { expireInSeconds?: number; @@ -82,14 +77,15 @@ declare namespace PgBoss { } interface JobOptions { + id?: string, priority?: number; startAfter?: number | string | Date; singletonKey?: string; - useSingletonQueue?: boolean; singletonSeconds?: number; singletonMinutes?: number; singletonHours?: number; singletonNextSlot?: boolean; + deadLetter?: string; } interface ConnectionOptions { @@ -98,7 +94,7 @@ declare namespace PgBoss { type InsertOptions = ConnectionOptions; - type SendOptions = JobOptions & ExpirationOptions & RetentionOptions & RetryOptions & CompletionOptions & ConnectionOptions; + type SendOptions = JobOptions & ExpirationOptions & RetentionOptions & RetryOptions & ConnectionOptions; type ScheduleOptions = SendOptions & { tz?: string } @@ -109,7 +105,7 @@ declare namespace PgBoss { interface CommonJobFetchOptions { includeMetadata?: boolean; - enforceSingletonQueueActiveLimit?: boolean; + priority?: boolean; } type JobFetchOptions = CommonJobFetchOptions & { @@ -127,7 +123,6 @@ declare namespace PgBoss { type FetchOptions = { includeMetadata?: boolean; - enforceSingletonQueueActiveLimit?: boolean; } & ConnectionOptions; interface WorkHandler { @@ -183,7 +178,7 @@ declare namespace PgBoss { interface JobWithMetadata extends Job { priority: number; - state: 'created' | 'retry' | 'active' | 'completed' | 'expired' | 'cancelled' | 'failed'; + state: 'created' | 'retry' | 'active' | 'completed' | 'cancelled' | 'failed'; retrylimit: number; retrycount: number; retrydelay: number; @@ -198,7 +193,7 @@ declare namespace PgBoss { createdon: Date; completedon: Date | null; keepuntil: Date; - oncomplete: boolean, + deadletter: string, output: object } @@ -214,7 +209,7 @@ declare namespace PgBoss { singletonKey?: string; expireInSeconds?: number; keepUntil?: Date | string; - onComplete?: boolean + deadLetter?: string; } interface MonitorState { @@ -223,7 +218,6 @@ declare namespace PgBoss { retry: number; active: number; completed: number; - expired: number; cancelled: number; failed: number; } @@ -236,7 +230,7 @@ declare namespace PgBoss { id: string, name: string, options: WorkOptions, - state: 'created' | 'retry' | 'active' | 'completed' | 'expired' | 'cancelled' | 'failed', + state: 'created' | 'active' | 'stopping' | 'stopped' count: number, createdOn: Date, lastFetchedOn: Date, @@ -250,7 +244,8 @@ declare namespace PgBoss { interface StopOptions { destroy?: boolean, graceful?: boolean, - timeout?: number + timeout?: number, + wait?: boolean } interface OffWorkOptions { @@ -300,10 +295,6 @@ declare class PgBoss extends EventEmitter { sendAfter(name: string, data: object, options: PgBoss.SendOptions, dateString: string): Promise; sendAfter(name: string, data: object, options: PgBoss.SendOptions, seconds: number): Promise; - sendOnce(name: string, data: object, options: PgBoss.SendOptions, key: string): Promise; - - sendSingleton(name: string, data: object, options: PgBoss.SendOptions): Promise; - sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number): Promise; sendThrottled(name: string, data: object, options: PgBoss.SendOptions, seconds: number, key: string): Promise; @@ -320,9 +311,6 @@ declare class PgBoss extends EventEmitter { work(name: string, options: PgBoss.BatchWorkOptions & { includeMetadata: true }, handler: PgBoss.BatchWorkWithMetadataHandler): Promise; work(name: string, options: PgBoss.BatchWorkOptions, handler: PgBoss.BatchWorkHandler): Promise; - onComplete(name: string, handler: Function): Promise; - onComplete(name: string, options: PgBoss.WorkOptions, handler: Function): Promise; - offWork(name: string): Promise; offWork(options: PgBoss.OffWorkOptions): Promise; @@ -338,19 +326,11 @@ declare class PgBoss extends EventEmitter { publish(event: string, data: object): Promise; publish(event: string, data: object, options: PgBoss.SendOptions): Promise; - offComplete(name: string): Promise; - offComplete(options: PgBoss.OffWorkOptions): Promise; - fetch(name: string): Promise | null>; fetch(name: string, batchSize: number): Promise[] | null>; fetch(name: string, batchSize: number, options: PgBoss.FetchOptions & { includeMetadata: true }): Promise[] | null>; fetch(name: string, batchSize: number, options: PgBoss.FetchOptions): Promise[] | null>; - fetchCompleted(name: string): Promise | null>; - fetchCompleted(name: string, batchSize: number): Promise[] | null>; - fetchCompleted(name: string, batchSize: number, options: PgBoss.FetchOptions & { includeMetadata: true }): Promise[] | null>; - fetchCompleted(name: string, batchSize: number, options: PgBoss.FetchOptions): Promise[] | null>; - cancel(id: string, options?: PgBoss.ConnectionOptions): Promise; cancel(ids: string[], options?: PgBoss.ConnectionOptions): Promise; @@ -368,13 +348,15 @@ declare class PgBoss extends EventEmitter { getQueueSize(name: string, options?: object): Promise; getJobById(id: string, options?: PgBoss.ConnectionOptions): Promise; + createQueue(name: string, policy: 'standard' | 'short' | 'singleton' | 'stately'): Promise; deleteQueue(name: string): Promise; - deleteAllQueues(): Promise; + purgeQueue(name: string): Promise; clearStorage(): Promise; archive(): Promise; purge(): Promise; expire(): Promise; + maintain(): Promise; schedule(name: string, cron: string, data?: object, options?: PgBoss.ScheduleOptions): Promise; unschedule(name: string): Promise; diff --git a/version.json b/version.json index efaedb7f..a295918c 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "schema": 20 + "schema": 21 }