diff --git a/creator-node/.eslintrc.js b/creator-node/.eslintrc.js index 701d97e7ddf..29a77dacd28 100644 --- a/creator-node/.eslintrc.js +++ b/creator-node/.eslintrc.js @@ -3,7 +3,8 @@ module.exports = { parserOptions: { tsconfigRootDir: __dirname, ecmaVersion: 2020, - project: ['./tsconfig.json'] + project: ['./tsconfig.json'], + tsconfigRootDir: __dirname }, extends: [ 'standard', diff --git a/creator-node/compose/env/base.env b/creator-node/compose/env/base.env index 596ce05aa50..c90935fc737 100644 --- a/creator-node/compose/env/base.env +++ b/creator-node/compose/env/base.env @@ -13,6 +13,9 @@ redisPort=6379 # Can be overriden. creatorNodeIsDebug=true +# Locally we run 4 CNs so we don't want to use too many processes for each +expressAppConcurrency=2 + WAIT_HOSTS= # Rate limiting diff --git a/creator-node/package-lock.json b/creator-node/package-lock.json index 53e52724b62..a3642b7368a 100644 --- a/creator-node/package-lock.json +++ b/creator-node/package-lock.json @@ -1770,6 +1770,11 @@ "resolved": "https://registry.npmjs.org/@improbable-eng/grpc-web-node-http-transport/-/grpc-web-node-http-transport-0.15.0.tgz", "integrity": "sha512-HLgJfVolGGpjc9DWPhmMmXJx8YGzkek7jcCFO1YYkSOoO81MWRZentPOd/JiKiZuU08wtc4BG+WNuGzsQB5jZA==" }, + "@ioredis/commands": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ioredis/commands/-/commands-1.2.0.tgz", + "integrity": "sha512-Sx1pU8EM64o2BrqNpEO1CNLtKQwyhuXuqyfH7oGKCk+1a33d2r5saW8zNwm3j6BTExtjrv2BxTgzzkMwts6vGg==" + }, "@ipld/dag-pb": { "version": "2.1.17", "resolved": "https://registry.npmjs.org/@ipld/dag-pb/-/dag-pb-2.1.17.tgz", @@ -2100,6 +2105,42 @@ "core-js": "^2.5.7" } }, + "@msgpackr-extract/msgpackr-extract-darwin-arm64": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-arm64/-/msgpackr-extract-darwin-arm64-2.1.2.tgz", + "integrity": "sha512-TyVLn3S/+ikMDsh0gbKv2YydKClN8HaJDDpONlaZR+LVJmsxLFUgA+O7zu59h9+f9gX1aj/ahw9wqa6rosmrYQ==", + "optional": true + }, + "@msgpackr-extract/msgpackr-extract-darwin-x64": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-darwin-x64/-/msgpackr-extract-darwin-x64-2.1.2.tgz", + "integrity": "sha512-YPXtcVkhmVNoMGlqp81ZHW4dMxK09msWgnxtsDpSiZwTzUBG2N+No2bsr7WMtBKCVJMSD6mbAl7YhKUqkp/Few==", + "optional": true + }, + "@msgpackr-extract/msgpackr-extract-linux-arm": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm/-/msgpackr-extract-linux-arm-2.1.2.tgz", + "integrity": "sha512-42R4MAFeIeNn+L98qwxAt360bwzX2Kf0ZQkBBucJ2Ircza3asoY4CDbgiu9VWklq8gWJVSJSJBwDI+c/THiWkA==", + "optional": true + }, + "@msgpackr-extract/msgpackr-extract-linux-arm64": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-arm64/-/msgpackr-extract-linux-arm64-2.1.2.tgz", + "integrity": "sha512-vHZ2JiOWF2+DN9lzltGbhtQNzDo8fKFGrf37UJrgqxU0yvtERrzUugnfnX1wmVfFhSsF8OxrfqiNOUc5hko1Zg==", + "optional": true + }, + "@msgpackr-extract/msgpackr-extract-linux-x64": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-2.1.2.tgz", + "integrity": "sha512-RjRoRxg7Q3kPAdUSC5EUUPlwfMkIVhmaRTIe+cqHbKrGZ4M6TyCA/b5qMaukQ/1CHWrqYY2FbKOAU8Hg0pQFzg==", + "optional": true + }, + "@msgpackr-extract/msgpackr-extract-win32-x64": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@msgpackr-extract/msgpackr-extract-win32-x64/-/msgpackr-extract-win32-x64-2.1.2.tgz", + "integrity": "sha512-rIZVR48zA8hGkHIK7ED6+ZiXsjRCcAVBJbm8o89OKAMTmEAQ2QvoOxoiu3w2isAaWwzgtQIOFIqHwvZDyLKCvw==", + "optional": true + }, "@multiformats/murmur3": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/@multiformats/murmur3/-/murmur3-1.1.3.tgz", @@ -2867,7 +2908,7 @@ "@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=" }, "@protobufjs/base64": { "version": "1.1.2", @@ -2882,12 +2923,12 @@ "@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=" }, "@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=", "requires": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -2896,27 +2937,27 @@ "@protobufjs/float": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=" }, "@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=" }, "@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=" }, "@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=" }, "@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=" }, "@sindresorhus/is": { "version": "0.14.0", @@ -3364,16 +3405,6 @@ "@types/node": "*" } }, - "@types/bull": { - "version": "3.15.8", - "resolved": "https://registry.npmjs.org/@types/bull/-/bull-3.15.8.tgz", - "integrity": "sha512-8DbSPMSsZH5PWPnGEkAZLYgJEH4ghHJNKF7LB6Wr5R0/v6g+Vs+JoaA7kcvLtHE936xg2WpFPkaoaJgExOmKDw==", - "dev": true, - "requires": { - "@types/ioredis": "*", - "@types/redis": "^2.8.0" - } - }, "@types/bunyan": { "version": "1.8.8", "resolved": "https://registry.npmjs.org/@types/bunyan/-/bunyan-1.8.8.tgz", @@ -3531,15 +3562,6 @@ "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz", "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw==" }, - "@types/redis": { - "version": "2.8.32", - "resolved": "https://registry.npmjs.org/@types/redis/-/redis-2.8.32.tgz", - "integrity": "sha512-7jkMKxcGq9p242exlbsVzuJb57KqHRhNl4dHoQu2Y5v9bCAbtIXXH0R3HleSQW4CTOqpHIYUW3t6tpUj4BVQ+w==", - "dev": true, - "requires": { - "@types/node": "*" - } - }, "@types/secp256k1": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/@types/secp256k1/-/secp256k1-4.0.2.tgz", @@ -4050,7 +4072,7 @@ "strict-uri-encode": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz", - "integrity": "sha512-QwiXZgpRcKkhTj2Scnn++4PKtWsH0kpzZ62L2R6c/LUVYv7hVnZqcg2+sMuT6R7Jusu1vviK/MFsu6kNJfWlEQ==" + "integrity": "sha1-ucczDHBChi9rFC3CdLvMWGbONUY=" } } }, @@ -4095,6 +4117,11 @@ "yaeti": "^0.0.6" } }, + "@yarnpkg/lockfile": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", + "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==" + }, "JSONStream": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", @@ -4674,7 +4701,7 @@ "babel-plugin-syntax-jsx": { "version": "6.18.0", "resolved": "https://registry.npmjs.org/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz", - "integrity": "sha512-qrPaCSo9c8RHNRHIotaufGbuOBN8rtdC4QrrFFc43vyWCCz7Kl7GL1PGaXtMGQZUXrkCjNEgxDfmAuAabr/rlw==" + "integrity": "sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY=" }, "balanced-match": { "version": "1.0.2", @@ -4873,7 +4900,7 @@ "bmp-js": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/bmp-js/-/bmp-js-0.1.0.tgz", - "integrity": "sha512-vHdS19CnY3hwiNdkaqk93DvjVLfbEcI8mys4UjuWrlX1haDmroo8o4xCzh4wD6DGV6HxRCyauwhHRqMTfERtjw==" + "integrity": "sha1-4Fpj95amwf8l9Hcex62twUjAcjM=" }, "bn.js": { "version": "4.12.0", @@ -5131,7 +5158,7 @@ "buffer-equal": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/buffer-equal/-/buffer-equal-0.0.1.tgz", - "integrity": "sha512-RgSV6InVQ9ODPdLWJ5UAqBqJBOg370Nz6ZQtRzpt6nUjc8v0St97uJ4PYC6NztqIScrAXafKM3mZPMygSe1ggA==" + "integrity": "sha1-kbx0sR6kBbyRa8aqkI+q+ltKrEs=" }, "buffer-layout": { "version": "1.2.2", @@ -5168,22 +5195,38 @@ } } }, - "bull": { - "version": "4.8.2", - "resolved": "https://registry.npmjs.org/bull/-/bull-4.8.2.tgz", - "integrity": "sha512-S7CNIL9+vsbLKwOGkUI6mawY5iABKQJLZn5a7KPnxAZrDhFXkrxsHHXLCKUR/+Oqys3Vk5ElWdj0SLtK84b1Nw==", + "bullmq": { + "version": "1.91.1", + "resolved": "https://registry.npmjs.org/bullmq/-/bullmq-1.91.1.tgz", + "integrity": "sha512-u7dat9I8ZwouZ651AMZkBSvB6NVUPpnAjd4iokd9DM41whqIBnDjuL11h7+kEjcpiDKj6E+wxZiER00FqirZQg==", "requires": { - "cron-parser": "^4.2.1", - "debuglog": "^1.0.0", - "get-port": "^5.1.1", - "ioredis": "^4.28.5", + "cron-parser": "^4.6.0", + "get-port": "6.1.2", + "glob": "^8.0.3", + "ioredis": "^5.2.2", "lodash": "^4.17.21", - "msgpackr": "^1.5.2", - "p-timeout": "^3.2.0", - "semver": "^7.3.2", - "uuid": "^8.3.0" + "msgpackr": "^1.6.2", + "semver": "^7.3.7", + "tslib": "^2.0.0", + "uuid": "^9.0.0" }, "dependencies": { + "brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "requires": { + "balanced-match": "^1.0.0" + } + }, + "cron-parser": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.6.0.tgz", + "integrity": "sha512-guZNLMGUgg6z4+eGhmHGw7ft+v6OQeuHzd1gcLxCo9Yg/qoxmG3nindp2/uwGCLizEisf2H0ptqeVXeoCpP6FA==", + "requires": { + "luxon": "^3.0.1" + } + }, "debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", @@ -5192,35 +5235,83 @@ "ms": "2.1.2" } }, + "denque": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/denque/-/denque-2.1.0.tgz", + "integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==" + }, + "get-port": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/get-port/-/get-port-6.1.2.tgz", + "integrity": "sha512-BrGGraKm2uPqurfGVj/z97/zv8dPleC6x9JBNRTrDNtCkkRF4rPwrQXFgL7+I+q8QSdU4ntLQX2D7KIxSy8nGw==" + }, + "glob": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.0.3.tgz", + "integrity": "sha512-ull455NHSHI/Y1FqGaaYFaLGkNMMJbavMrEGFXG/PGrg6y7sutWHUHrz6gy6WEBH6akM1M414dWKCNs+IhKdiQ==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + } + }, "ioredis": { - "version": "4.28.5", - "resolved": "https://registry.npmjs.org/ioredis/-/ioredis-4.28.5.tgz", - "integrity": "sha512-3GYo0GJtLqgNXj4YhrisLaNNvWSNwSS2wS4OELGfGxH8I69+XfNdnmV1AyN+ZqMh0i7eX+SWjrwFKDBDgfBC1A==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/ioredis/-/ioredis-5.2.3.tgz", + "integrity": "sha512-gQNcMF23/NpvjCaa1b5YycUyQJ9rBNH2xP94LWinNpodMWVUPP5Ai/xXANn/SM7gfIvI62B5CCvZxhg5pOgyMw==", "requires": { + "@ioredis/commands": "^1.1.1", "cluster-key-slot": "^1.1.0", - "debug": "^4.3.1", - "denque": "^1.1.0", + "debug": "^4.3.4", + "denque": "^2.0.1", "lodash.defaults": "^4.2.0", - "lodash.flatten": "^4.4.0", "lodash.isarguments": "^3.1.0", - "p-map": "^2.1.0", - "redis-commands": "1.7.0", "redis-errors": "^1.2.0", "redis-parser": "^3.0.0", "standard-as-callback": "^2.1.0" } }, + "luxon": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.0.3.tgz", + "integrity": "sha512-+EfHWnF+UT7GgTnq5zXg3ldnTKL2zdv7QJgsU5bjjpbH17E3qi/puMhQyJVYuCq+FRkogvB5WB6iVvUr+E4a7w==" + }, + "minimatch": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.0.tgz", + "integrity": "sha512-9TPBGGak4nHfGZsPBohm9AWg6NoT7QTCehS3BIJABslyZbzxfV78QM2Y6+i741OPZIafFAaiiEMh5OyIrJPgtg==", + "requires": { + "brace-expansion": "^2.0.1" + } + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, - "p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", + "msgpackr": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.6.3.tgz", + "integrity": "sha512-Wtwnt2W06wNOLzV3N0XLLAJCxpwlCfFpvSZAXsu+xf71X7KuqBEDhDSjAy9nwNhQ2aK74Rd1RiRln+62tffoXw==", + "requires": { + "msgpackr-extract": "^2.0.2" + } + }, + "msgpackr-extract": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-2.1.2.tgz", + "integrity": "sha512-cmrmERQFb19NX2JABOGtrKdHMyI6RUyceaPBQ2iRz9GnDkjBWFjNJC0jyyoOfZl2U/LZE3tQCCQc4dlRyA8mcA==", + "optional": true, "requires": { - "p-finally": "^1.0.0" + "@msgpackr-extract/msgpackr-extract-darwin-arm64": "2.1.2", + "@msgpackr-extract/msgpackr-extract-darwin-x64": "2.1.2", + "@msgpackr-extract/msgpackr-extract-linux-arm": "2.1.2", + "@msgpackr-extract/msgpackr-extract-linux-arm64": "2.1.2", + "@msgpackr-extract/msgpackr-extract-linux-x64": "2.1.2", + "@msgpackr-extract/msgpackr-extract-win32-x64": "2.1.2", + "node-gyp-build-optional-packages": "5.0.3" } }, "semver": { @@ -5232,9 +5323,9 @@ } }, "uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", + "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==" } } }, @@ -5402,7 +5493,7 @@ "camelize": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/camelize/-/camelize-1.0.0.tgz", - "integrity": "sha512-W2lPwkBkMZwFlPCXhIlYgxu+7gC/NUlCtdK652DAJ1JdgV0sTrvuPFshNPrFa1TY2JOkLhgdeEBplB4ezEa+xg==" + "integrity": "sha1-FkpUg+Yw+kMh5a8HAg5TGDGyYJs=" }, "caniuse-lite": { "version": "1.0.30001241", @@ -6010,14 +6101,6 @@ "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==" }, - "cron-parser": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.4.0.tgz", - "integrity": "sha512-TrE5Un4rtJaKgmzPewh67yrER5uKM0qI9hGLDBfWb8GGRe9pn/SDkhVrdHa4z7h0SeyeNxnQnogws/H+AQANQA==", - "requires": { - "luxon": "^1.28.0" - } - }, "cross-fetch": { "version": "3.1.5", "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", @@ -6097,7 +6180,7 @@ "css-color-keywords": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/css-color-keywords/-/css-color-keywords-1.0.0.tgz", - "integrity": "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==" + "integrity": "sha1-/qJhbcZ2spYmhrOvjb2+GAskTgU=" }, "css-to-react-native": { "version": "3.0.0", @@ -6145,11 +6228,6 @@ "integrity": "sha1-IwdjLUwEOCuN+KMvcLiVBG1SdF8=", "dev": true }, - "debuglog": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/debuglog/-/debuglog-1.0.1.tgz", - "integrity": "sha1-qiT/uaw9+aI1GDfPstJ5NgzXhJI=" - }, "decamelize": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", @@ -8046,7 +8124,7 @@ "fast-stable-stringify": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fast-stable-stringify/-/fast-stable-stringify-1.0.0.tgz", - "integrity": "sha512-wpYMUmFu5f00Sm0cj2pfivpmawLZ0NKdviQ4w9zJeR8JVtOpOxHmLaJuj0vxvGqMJQWyP/COUkF75/57OKyRag==" + "integrity": "sha1-XFVDRisiru79NtBbNOUceMuG0xM=" }, "fastq": { "version": "1.13.0", @@ -8206,6 +8284,59 @@ "path-exists": "^4.0.0" } }, + "find-yarn-workspace-root": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", + "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", + "requires": { + "micromatch": "^4.0.2" + }, + "dependencies": { + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "requires": { + "fill-range": "^7.0.1" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" + }, + "micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "requires": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + } + }, + "picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "requires": { + "is-number": "^7.0.0" + } + } + } + }, "fix-hmr": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/fix-hmr/-/fix-hmr-1.0.2.tgz", @@ -8646,11 +8777,6 @@ "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", "dev": true }, - "get-port": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/get-port/-/get-port-5.1.1.tgz", - "integrity": "sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==" - }, "get-stdin": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-6.0.0.tgz", @@ -9414,6 +9540,11 @@ } } }, + "is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==" + }, "is-extendable": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", @@ -9768,6 +9899,14 @@ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==", "dev": true }, + "is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "requires": { + "is-docker": "^2.0.0" + } + }, "isarray": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", @@ -9776,8 +9915,7 @@ "isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" }, "isobject": { "version": "3.0.1", @@ -10370,6 +10508,14 @@ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", "dev": true }, + "klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "requires": { + "graceful-fs": "^4.1.11" + } + }, "latest-version": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-3.1.0.tgz", @@ -10564,11 +10710,6 @@ "es5-ext": "~0.10.2" } }, - "luxon": { - "version": "1.28.0", - "resolved": "https://registry.npmjs.org/luxon/-/luxon-1.28.0.tgz", - "integrity": "sha512-TfTiyvZhwBYM/7QdAVDh+7dBTBA29v4ik0Ce9zda3Mnf8on1S5KJI8P2jKFZ8+5C0jhmr0KwJEO/Wdpm0VeWJQ==" - }, "make-dir": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", @@ -10927,73 +11068,6 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" }, - "msgpackr": { - "version": "1.5.7", - "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.5.7.tgz", - "integrity": "sha512-Hsa80i8W4BiObSMHslfnwC+CC1CYHZzoXJZn0+3EvoCEOgt3c5QlXhdcjgFk2aZxMgpV8aUFZqJyQUCIp4UrzA==", - "requires": { - "msgpackr-extract": "^1.1.4" - } - }, - "msgpackr-extract": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/msgpackr-extract/-/msgpackr-extract-1.1.4.tgz", - "integrity": "sha512-WQbHvsThprXh+EqZYy+SQFEs7z6bNM7a0vgirwUfwUcphWGT2mdPcpyLCNiRsN6w5q5VKJUMblHY+tNEyceb9Q==", - "optional": true, - "requires": { - "msgpackr-extract-darwin-arm64": "1.1.0", - "msgpackr-extract-darwin-x64": "1.1.0", - "msgpackr-extract-linux-arm": "1.1.0", - "msgpackr-extract-linux-arm64": "1.1.0", - "msgpackr-extract-linux-x64": "1.1.0", - "msgpackr-extract-win32-x64": "1.1.0", - "node-gyp-build-optional-packages": "^4.3.2" - }, - "dependencies": { - "node-gyp-build-optional-packages": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-4.3.2.tgz", - "integrity": "sha512-P5Ep3ISdmwcCkZIaBaQamQtWAG0facC89phWZgi5Z3hBU//J6S48OIvyZWSPPf6yQMklLZiqoosWAZUj7N+esA==", - "optional": true - } - } - }, - "msgpackr-extract-darwin-arm64": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-darwin-arm64/-/msgpackr-extract-darwin-arm64-1.1.0.tgz", - "integrity": "sha512-s1kHoT12tS2cCQOv+Wl3I+/cYNJXBPtwQqGA+dPYoXmchhXiE0Nso+BIfvQ5PxbmAyjj54Q5o7PnLTqVquNfZA==", - "optional": true - }, - "msgpackr-extract-darwin-x64": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-darwin-x64/-/msgpackr-extract-darwin-x64-1.1.0.tgz", - "integrity": "sha512-yx/H/i12IKg4eWGu/eKdKzJD4jaYvvujQSaVmeOMCesbSQnWo5X6YR9TFjoiNoU9Aexk1KufzL9gW+1DozG1yw==", - "optional": true - }, - "msgpackr-extract-linux-arm": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-linux-arm/-/msgpackr-extract-linux-arm-1.1.0.tgz", - "integrity": "sha512-0VvSCqi12xpavxl14gMrauwIzHqHbmSChUijy/uo3mpjB1Pk4vlisKpZsaOZvNJyNKj0ACi5jYtbWnnOd7hYGw==", - "optional": true - }, - "msgpackr-extract-linux-arm64": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-linux-arm64/-/msgpackr-extract-linux-arm64-1.1.0.tgz", - "integrity": "sha512-AxFle3fHNwz2V4CYDIGFxI6o/ZuI0lBKg0uHI8EcCMUmDE5mVAUWYge5WXmORVvb8sVWyVgFlmi3MTu4Ve6tNQ==", - "optional": true - }, - "msgpackr-extract-linux-x64": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-linux-x64/-/msgpackr-extract-linux-x64-1.1.0.tgz", - "integrity": "sha512-O+XoyNFWpdB8oQL6O/YyzffPpmG5rTNrr1nKLW70HD2ENJUhcITzbV7eZimHPzkn8LAGls1tBaMTHQezTBpFOw==", - "optional": true - }, - "msgpackr-extract-win32-x64": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/msgpackr-extract-win32-x64/-/msgpackr-extract-win32-x64-1.1.0.tgz", - "integrity": "sha512-6AJdM5rNsL4yrskRfhujVSPEd6IBpgvsnIT/TPowKNLQ62iIdryizPY2PJNFiW3AJcY249AHEiDBXS1cTDPxzA==", - "optional": true - }, "multer": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/multer/-/multer-1.4.2.tgz", @@ -11222,8 +11296,7 @@ "nice-try": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", - "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==", - "dev": true + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" }, "nise": { "version": "1.5.3", @@ -11314,6 +11387,12 @@ "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==" }, + "node-gyp-build-optional-packages": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/node-gyp-build-optional-packages/-/node-gyp-build-optional-packages-5.0.3.tgz", + "integrity": "sha512-k75jcVzk5wnnc/FMxsf4udAoTEUv2jY3ycfdSd3yWu6Cnd1oee6/CfZJApyscA4FJOmdoixWwiwOyf16RzD5JA==", + "optional": true + }, "node-localstorage": { "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-localstorage/-/node-localstorage-1.3.1.tgz", @@ -11726,6 +11805,15 @@ "mimic-fn": "^1.0.0" } }, + "open": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", + "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "requires": { + "is-docker": "^2.0.0", + "is-wsl": "^2.1.1" + } + }, "optionator": { "version": "0.9.1", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", @@ -11743,8 +11831,7 @@ "os-tmpdir": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", - "dev": true + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=" }, "p-cancelable": { "version": "1.1.0", @@ -11946,6 +12033,102 @@ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=", "dev": true }, + "patch-package": { + "version": "6.4.7", + "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-6.4.7.tgz", + "integrity": "sha512-S0vh/ZEafZ17hbhgqdnpunKDfzHQibQizx9g8yEf5dcVk3KOflOfdufRXQX8CSEkyOQwuM/bNz1GwKvFj54kaQ==", + "requires": { + "@yarnpkg/lockfile": "^1.1.0", + "chalk": "^2.4.2", + "cross-spawn": "^6.0.5", + "find-yarn-workspace-root": "^2.0.0", + "fs-extra": "^7.0.1", + "is-ci": "^2.0.0", + "klaw-sync": "^6.0.0", + "minimist": "^1.2.0", + "open": "^7.4.2", + "rimraf": "^2.6.3", + "semver": "^5.6.0", + "slash": "^2.0.0", + "tmp": "^0.0.33" + }, + "dependencies": { + "ci-info": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", + "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==" + }, + "cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "requires": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + } + }, + "fs-extra": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", + "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "requires": { + "graceful-fs": "^4.1.2", + "jsonfile": "^4.0.0", + "universalify": "^0.1.0" + } + }, + "glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "is-ci": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", + "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", + "requires": { + "ci-info": "^2.0.0" + } + }, + "minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "requires": { + "glob": "^7.1.3" + } + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + }, + "slash": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", + "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==" + } + } + }, "path-dirname": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz", @@ -11972,8 +12155,7 @@ "path-key": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "dev": true + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { "version": "1.0.7", @@ -12441,7 +12623,7 @@ "qr.js": { "version": "0.0.0", "resolved": "https://registry.npmjs.org/qr.js/-/qr.js-0.0.0.tgz", - "integrity": "sha512-c4iYnWb+k2E+vYpRimHqSu575b1/wKl4XFeJGpFmrJQz5I88v9aY2czh7s0w36srfCM1sXgC/xpoJz5dJfq+OQ==" + "integrity": "sha1-ys6GOG9ZoNuAUPqQ2baw6IoeNk8=" }, "qrcode.react": { "version": "1.0.1", @@ -13506,7 +13688,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dev": true, "requires": { "shebang-regex": "^1.0.0" } @@ -13514,8 +13695,7 @@ "shebang-regex": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "dev": true + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" }, "shimmer": { "version": "1.2.1", @@ -13658,7 +13838,7 @@ "slide": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/slide/-/slide-1.1.6.tgz", - "integrity": "sha512-NwrtjCg+lZoqhFU8fOwl4ay2ei8PaqCBOUV3/ektPY9trO1yQ1oXEfmHAhKArUVUr/hOHvy5f6AdP17dCM0zMw==" + "integrity": "sha1-VusCfWW00tzmyy4tMsTUr8nh1wc=" }, "snake-case": { "version": "3.0.4", @@ -14894,7 +15074,6 @@ "version": "0.0.33", "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, "requires": { "os-tmpdir": "~1.0.2" } @@ -15868,7 +16047,6 @@ "version": "1.3.1", "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, "requires": { "isexe": "^2.0.0" } @@ -16046,7 +16224,7 @@ "wif": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/wif/-/wif-2.0.6.tgz", - "integrity": "sha512-HIanZn1zmduSF+BQhkE+YXIbEiH0xPr1012QbFEGB0xsKqJii0/SqJjyn8dFv6y36kOznMgMB+LGcbZTJ1xACQ==", + "integrity": "sha1-CNP1IFbGZnkplyb63g1DKudLRwQ=", "requires": { "bs58check": "<3.0.0" } @@ -16162,7 +16340,7 @@ "write-file-atomic": { "version": "1.3.4", "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-1.3.4.tgz", - "integrity": "sha512-SdrHoC/yVBPpV0Xq/mUZQIpW2sWXAShb/V4pomcJXh92RuaO+f3UTWItiR3Px+pLnV2PvC2/bfn5cwr5X6Vfxw==", + "integrity": "sha1-+Aek8LHZ6ROuekgRLmzDrxmRtF8=", "requires": { "graceful-fs": "^4.1.11", "imurmurhash": "^0.1.4", @@ -16243,7 +16421,7 @@ "xmlhttprequest": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz", - "integrity": "sha512-58Im/U0mlVBLM38NdZjHyhuMtCqa61469k2YP/AaPbvCoV9aQGUpbJBj1QRm2ytRiVQBD/fsw7L2bJGDVQswBA==" + "integrity": "sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw=" }, "xtend": { "version": "4.0.2", diff --git a/creator-node/package.json b/creator-node/package.json index 8ee2abb0c43..a3118e2a615 100644 --- a/creator-node/package.json +++ b/creator-node/package.json @@ -17,7 +17,8 @@ "test:coverage:ci": "nyc --reporter=lcov --reporter=text npm run test:ci && nyc report --reporter=text-lcov | coveralls", "lint:fix": "eslint --fix --ext=js,ts src", "lint": "eslint --ext=js,ts src", - "build": "./node_modules/.bin/tsc --project tsconfig.build.json" + "build": "./node_modules/.bin/tsc --project tsconfig.build.json", + "postinstall": "patch-package" }, "keywords": [], "author": "", @@ -48,7 +49,7 @@ "bl": "^4.1.0", "body-parser": "^1.18.3", "buffer": "5.4.2", - "bull": "4.8.2", + "bullmq": "^1.91.1", "bunyan": "^1.8.15", "cids": "0.8.0", "commander": "^6.2.1", @@ -77,6 +78,7 @@ "jimp": "^0.6.1", "lodash": "4.17.21", "multer": "^1.4.0", + "patch-package": "^6.4.7", "pg": "^8.0.3", "prettier-config-standard": "^4.0.0", "prom-client": "^14.0.1", @@ -92,12 +94,12 @@ "web3": "1.2.8" }, "devDependencies": { - "@types/bull": "^3.15.8", "@types/bunyan": "^1.8.8", "@types/chai": "^4.3.3", "@types/eth-sig-util": "^2.1.1", "@types/express": "4.17.12", "@types/fs-extra": "^9.0.13", + "@types/ioredis": "^4.28.10", "@types/lodash": "^4.14.182", "@types/mocha": "^9.1.1", "@types/node": "^18.7.9", @@ -133,6 +135,7 @@ "lodash": "Vuln in < 4.17.13, fixed by https://github.com/lodash/lodash/pull/4336" }, "scriptsComments": { + "start": "Runs multiple processes using cluster. Starts as primary since process.env.NODE_UNIQUE_ID=undefined", "coverage": "Runs nyc on tests/ dir and outputs results in ./nyc_output. Can be used for vscode extensions.", "report": "Generates static html files representing code coverage per test file and outputs them into /coverage." } diff --git a/creator-node/patches/README.md b/creator-node/patches/README.md new file mode 100644 index 00000000000..ab4fd409680 --- /dev/null +++ b/creator-node/patches/README.md @@ -0,0 +1,2 @@ +Folder contains patch files auto-generated by [patch-package](https://www.npmjs.com/package/patch-package). +These are applied during `npm run postinstall` to modify dependencies in node_modules. \ No newline at end of file diff --git a/creator-node/patches/bullmq+1.91.1.patch b/creator-node/patches/bullmq+1.91.1.patch new file mode 100644 index 00000000000..c2610af5ae3 --- /dev/null +++ b/creator-node/patches/bullmq+1.91.1.patch @@ -0,0 +1,30 @@ +diff --git a/node_modules/bullmq/dist/cjs/classes/child-pool.js b/node_modules/bullmq/dist/cjs/classes/child-pool.js +index 2e47bee..0369e22 100644 +--- a/node_modules/bullmq/dist/cjs/classes/child-pool.js ++++ b/node_modules/bullmq/dist/cjs/classes/child-pool.js +@@ -8,6 +8,16 @@ const process_utils_1 = require("./process-utils"); + const interfaces_1 = require("../interfaces"); + const utils_1 = require("../utils"); + const CHILD_KILL_TIMEOUT = 30000; ++ ++const getFreePort = async () => { ++ return new Promise((res) => { ++ const srv = require("net").createServer(); ++ srv.listen(0, () => { ++ const port = srv.address().port; ++ srv.close((err) => res(port)); ++ }); ++ }); ++}; + const convertExecArgv = async (execArgv) => { + const standard = []; + const convertedArgs = []; +@@ -18,7 +28,7 @@ const convertExecArgv = async (execArgv) => { + } + else { + const argName = arg.split('=')[0]; +- const port = await (await Promise.resolve().then(() => require('get-port'))).default(); ++ const port = await getFreePort(); + convertedArgs.push(`${argName}=${port}`); + } + } diff --git a/creator-node/scripts/start.sh b/creator-node/scripts/start.sh index a9d6265f815..b2c4aa72d8d 100755 --- a/creator-node/scripts/start.sh +++ b/creator-node/scripts/start.sh @@ -65,6 +65,10 @@ if [[ "$contentCacheLayerEnabled" == "true" ]]; then openresty -p /usr/local/openresty -c /usr/local/openresty/conf/nginx.conf fi +# Postinstall applies patches in the patches/ folder via patch-package. This fixes a bug in BullMQ: +# https://github.com/taskforcesh/bullmq/issues/1424 +npm run postinstall +# index.js runs multiple processes using cluster. Starts as primary since process.env.NODE_UNIQUE_ID=undefined if [[ "$devMode" == "true" ]]; then if [ "$link_libs" = true ]; then cd ../audius-libs diff --git a/creator-node/src/AsyncProcessingQueue.js b/creator-node/src/AsyncProcessingQueue.js index 562f16f7e72..b334f660f4f 100644 --- a/creator-node/src/AsyncProcessingQueue.js +++ b/creator-node/src/AsyncProcessingQueue.js @@ -1,7 +1,8 @@ -const Bull = require('bull') +const { Queue, Worker } = require('bullmq') const { logger: genericLogger } = require('./logging') const config = require('./config') const redisClient = require('./redis') +const { clusterUtils } = require('./utils') // Processing fns const { @@ -27,6 +28,7 @@ const PROCESS_STATES = Object.freeze({ DONE: 'DONE', FAILED: 'FAILED' }) +const QUEUE_NAME = 'async-processing' const ASYNC_PROCESSING_QUEUE_HISTORY = 500 @@ -39,48 +41,55 @@ const ASYNC_PROCESSING_QUEUE_HISTORY = 500 class AsyncProcessingQueue { constructor(libs, prometheusRegistry) { - this.queue = new Bull('asyncProcessing', { - redis: { - host: config.get('redisHost'), - port: config.get('redisPort') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue(QUEUE_NAME, { + connection, defaultJobOptions: { removeOnComplete: ASYNC_PROCESSING_QUEUE_HISTORY, removeOnFail: ASYNC_PROCESSING_QUEUE_HISTORY } }) - prometheusRegistry.startQueueMetrics(this.queue) - this.libs = libs const untracedProcessTask = this.processTask - this.queue.process(MAX_CONCURRENCY, async (job, done) => { - const { logContext, parentSpanContext, task } = job.data - const processTask = instrumentTracing({ - name: `AsyncProcessingQueue.process ${task}`, - fn: untracedProcessTask, - context: this, - options: { - // if a parentSpanContext is provided - // reference it so the async queue job can remember - // who enqueued it - links: parentSpanContext - ? [ - { - context: parentSpanContext - } - ] - : [], - attributes: { - requestID: logContext.requestID, - [tracing.CODE_FILEPATH]: __filename + const worker = new Worker( + QUEUE_NAME, + async (job) => { + const { logContext, parentSpanContext, task } = job.data + const processTask = instrumentTracing({ + name: `AsyncProcessingQueue.process ${task}`, + fn: untracedProcessTask, + context: this, + options: { + // if a parentSpanContext is provided + // reference it so the async queue job can remember + // who enqueued it + links: parentSpanContext + ? [ + { + context: parentSpanContext + } + ] + : [], + attributes: { + requestID: logContext.requestID, + [tracing.CODE_FILEPATH]: __filename + } } - } - }) + }) - await processTask(job, done) - }) + await processTask(job) + }, + { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker(MAX_CONCURRENCY) + } + ) + prometheusRegistry.startQueueMetrics(this.queue, worker) this.PROCESS_NAMES = PROCESS_NAMES this.PROCESS_STATES = PROCESS_STATES @@ -90,7 +99,7 @@ class AsyncProcessingQueue { this.constructProcessKey = this.constructAsyncProcessingKey.bind(this) } - async processTask(job, done) { + async processTask(job) { const { logContext, task } = job.data const func = this.getFn(task) @@ -107,7 +116,6 @@ class AsyncProcessingQueue { logContext, req: job.data.req }) - done(null, {}) } else { this.logStatus( `Succesfully handed off transcoding and segmenting to sp=${sp}. Wrapping up remainder of track association..` @@ -117,12 +125,12 @@ class AsyncProcessingQueue { logContext, req: { ...job.data.req, transcodeFilePath, segmentFileNames } }) - done(null, { response: { transcodeFilePath, segmentFileNames } }) + return { response: { transcodeFilePath, segmentFileNames } } } } else { try { const response = await this.monitorProgress(task, func, job.data) - done(null, { response }) + return { response } } catch (e) { tracing.recordException(e) this.logError( @@ -131,7 +139,7 @@ class AsyncProcessingQueue { }: ${e.toString()}`, logContext ) - done(e.toString()) + return e.toString() } } } @@ -185,12 +193,12 @@ class AsyncProcessingQueue { async addTask(params) { const { logContext, task } = params - this.logStatus( + await this.logStatus( `Adding ${task} task! uuid=${logContext.requestID}}`, logContext ) - const job = await this.queue.add(params) + const job = await this.queue.add(QUEUE_NAME, params) return job } diff --git a/creator-node/src/ImageProcessingQueue.js b/creator-node/src/ImageProcessingQueue.js index 8a2e9ef807b..1bbde9e995d 100644 --- a/creator-node/src/ImageProcessingQueue.js +++ b/creator-node/src/ImageProcessingQueue.js @@ -1,7 +1,11 @@ -const Bull = require('bull') +const { Queue, QueueEvents, Worker } = require('bullmq') +const path = require('path') const os = require('os') + const config = require('./config') const { logger: genericLogger } = require('./logging') +const { clusterUtils } = require('./utils') +const resizeImage = require('./resizeImage') const imageProcessingMaxConcurrency = config.get( 'imageProcessingMaxConcurrency' @@ -22,34 +26,34 @@ const IMAGE_PROCESSING_QUEUE_HISTORY = 500 class ImageProcessingQueue { constructor(prometheusRegistry = null) { - this.queue = new Bull('image-processing-queue', { - redis: { - port: config.get('redisPort'), - host: config.get('redisHost') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue('image-processing-queue', { + connection, defaultJobOptions: { removeOnComplete: IMAGE_PROCESSING_QUEUE_HISTORY, removeOnFail: IMAGE_PROCESSING_QUEUE_HISTORY } }) + // Process jobs sandboxed - https://docs.bullmq.io/guide/workers/sandboxed-processors + const processorFile = path.join(__dirname, 'resizeImage.js') + const worker = new Worker('image-processing-queue', processorFile, { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker(MAX_CONCURRENCY) + }) if (prometheusRegistry !== null && prometheusRegistry !== undefined) { - prometheusRegistry.startQueueMetrics(this.queue) + prometheusRegistry.startQueueMetrics(this.queue, worker) } - /** - * Queue will process tasks concurrently if provided a concurrency number and a - * path to file containing job processor function - * https://github.com/OptimalBits/bull/tree/013c51942e559517c57a117c27a550a0fb583aa8#separate-processes - */ - this.queue.process( - PROCESS_NAMES.resizeImage /** job processor name */, - MAX_CONCURRENCY /** job processor concurrency */, - `${__dirname}/resizeImage.js` /** path to job processor function */ - ) - this.logStatus = this.logStatus.bind(this) this.resizeImage = this.resizeImage.bind(this) + + this.queueEvents = new QueueEvents('image-processing-queue', { + connection + }) } /** @@ -96,7 +100,8 @@ class ImageProcessingQueue { square, logContext }) - const result = await job.finished() + + const result = await job.waitUntilFinished(this.queueEvents) return result } } diff --git a/creator-node/src/TranscodingQueue.js b/creator-node/src/TranscodingQueue.js index fb6fa07abdf..e1caa9f9877 100644 --- a/creator-node/src/TranscodingQueue.js +++ b/creator-node/src/TranscodingQueue.js @@ -1,9 +1,10 @@ -const Bull = require('bull') +const { Queue, QueueEvents, Worker } = require('bullmq') const os = require('os') const config = require('./config') const ffmpeg = require('./ffmpeg') const { logger: genericLogger } = require('./logging') +const { clusterUtils } = require('./utils') const TRANSCODING_MAX_CONCURRENCY = config.get('transcodingMaxConcurrency') const MAX_ACTIVE_JOBS = config.get('maximumTranscodingActiveJobs') @@ -25,85 +26,91 @@ const TRANSCODING_QUEUE_HISTORY = 500 class TranscodingQueue { constructor() { - this.queue = new Bull('transcoding-queue', { - redis: { - port: config.get('redisPort'), - host: config.get('redisHost') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue('transcoding-queue', { + connection, defaultJobOptions: { removeOnComplete: TRANSCODING_QUEUE_HISTORY, removeOnFail: TRANSCODING_QUEUE_HISTORY } }) + this.queueEvents = new QueueEvents('transcoding-queue', { + connection + }) this.logStatus('Initialized TranscodingQueue') - // NOTE: Specifying max concurrency here dictates the max concurrency for - // *any* process fn below - // See https://github.com/OptimalBits/bull/blob/develop/REFERENCE.md#queueprocess - this.queue.process( - PROCESS_NAMES.segment, - MAX_CONCURRENCY, - async (job, done) => { - const start = Date.now() - const { fileDir, fileName, logContext } = job.data - - try { - this.logStatus(`Segmenting ${fileDir} ${fileName}`, logContext) - - const response = await ffmpeg.segmentFile(fileDir, fileName, { - logContext - }) - this.logStatus( - `Successfully completed segment job ${fileDir} ${fileName} in duration ${ - Date.now() - start - }ms`, - logContext - ) - done(null, response) - } catch (e) { - this.logStatus( - `Segment Job Error ${e} in duration ${Date.now() - start}ms`, - logContext - ) - done(e) - } - } - ) - - this.queue.process( - PROCESS_NAMES.transcode320, - /* inherited */ 0, - async (job, done) => { - const start = Date.now() - const { fileDir, fileName, logContext } = job.data - - try { - this.logStatus( - `transcoding to 320kbps ${fileDir} ${fileName}`, - logContext - ) - - const transcodeFilePath = await ffmpeg.transcodeFileTo320( - fileDir, - fileName, - { - logContext + const worker = new Worker( + 'transcoding-queue', + async (job) => { + switch (job.name) { + case PROCESS_NAMES.segment: { + const start = Date.now() + const { fileDir, fileName, logContext } = job.data + + try { + await this.logStatus( + `Segmenting ${fileDir} ${fileName}`, + logContext + ) + + const response = await ffmpeg.segmentFile(fileDir, fileName, { + logContext + }) + await this.logStatus( + `Successfully completed segment job ${fileDir} ${fileName} in duration ${ + Date.now() - start + }ms`, + logContext + ) + return response + } catch (e) { + await this.logStatus( + `Segment Job Error ${e} in duration ${Date.now() - start}ms`, + logContext + ) + return e } - ) - this.logStatus( - `Successfully completed Transcode320 job ${fileDir} ${fileName} in duration ${ - Date.now() - start - }ms`, - logContext - ) - done(null, { transcodeFilePath }) - } catch (e) { - this.logStatus( - `Transcode320 Job Error ${e} in duration ${Date.now() - start}`, - logContext - ) - done(e) + } + case PROCESS_NAMES.transcode320: { + const start = Date.now() + const { fileDir, fileName, logContext } = job.data + + try { + this.logStatus( + `transcoding to 320kbps ${fileDir} ${fileName}`, + logContext + ) + + const transcodeFilePath = await ffmpeg.transcodeFileTo320( + fileDir, + fileName, + { + logContext + } + ) + this.logStatus( + `Successfully completed Transcode320 job ${fileDir} ${fileName} in duration ${ + Date.now() - start + }ms`, + logContext + ) + return { transcodeFilePath } + } catch (e) { + this.logStatus( + `Transcode320 Job Error ${e} in duration ${Date.now() - start}`, + logContext + ) + return e + } + } } + }, + { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker(MAX_CONCURRENCY) } ) @@ -172,7 +179,7 @@ class TranscodingQueue { logContext ) - const result = await job.finished() + const result = await job.waitUntilFinished(this.queueEvents) this.logStatus( `Segment job successful, fileDir=${fileDir}, fileName=${fileName}`, logContext @@ -202,7 +209,7 @@ class TranscodingQueue { logContext ) - const result = await job.finished() + const result = await job.waitUntilFinished(this.queueEvents) this.logStatus( `Transcode320 job successful, fileDir=${fileDir}, fileName=${fileName}`, logContext diff --git a/creator-node/src/blacklistManager.js b/creator-node/src/blacklistManager.js index 32a52437049..af3efb11ee8 100644 --- a/creator-node/src/blacklistManager.js +++ b/creator-node/src/blacklistManager.js @@ -2,6 +2,7 @@ const { logger } = require('./logging') const models = require('./models') const redis = require('./redis') const config = require('./config') +const { clusterUtils } = require('./utils') const CID_WHITELIST = new Set(config.get('cidWhitelist').split(',')) @@ -27,26 +28,29 @@ class BlacklistManager { static async init() { try { - this.log('Initializing BlacklistManager...') - - const { trackIdsToBlacklist, userIdsToBlacklist, segmentsToBlacklist } = - await this.getDataToBlacklist() - await this.fetchCIDsAndAddToRedis({ - trackIdsToBlacklist, - userIdsToBlacklist, - segmentsToBlacklist - }) + this._log('Initializing BlacklistManager...') + + // Adding to redis only needs to be done once, but multiple workers all run the app with their own BlacklistManager instance + if (clusterUtils.isThisWorkerInit()) { + const { trackIdsToBlacklist, userIdsToBlacklist, segmentsToBlacklist } = + await this._getDataToBlacklist() + await this._fetchCIDsAndAddToRedis({ + trackIdsToBlacklist, + userIdsToBlacklist, + segmentsToBlacklist + }) + } this.initialized = true - this.log('Initialized BlacklistManager') + this._log('Initialized BlacklistManager') } catch (e) { throw new Error(`Could not init BlacklistManager: ${e.message}`) } } /** Return list of trackIds, userIds, and CIDs to be blacklisted. */ - static async getDataToBlacklist() { + static async _getDataToBlacklist() { // CBL = ContentBlacklist const tracksFromCBL = await models.ContentBlacklist.findAll({ attributes: ['value'], @@ -86,13 +90,13 @@ class BlacklistManager { * 2. Add the trackIds and userIds to redis as sets. * 3. Create mapping of explicitly blacklisted tracks with the structure in redis. */ - static async fetchCIDsAndAddToRedis({ + static async _fetchCIDsAndAddToRedis({ trackIdsToBlacklist = [], userIdsToBlacklist = [], segmentsToBlacklist = [] }) { // Get all tracks from users and combine with explicit trackIds to BL - const tracksFromUsers = await this.getTracksFromUsers(userIdsToBlacklist) + const tracksFromUsers = await this._getTracksFromUsers(userIdsToBlacklist) const allTrackIdsToBlacklist = trackIdsToBlacklist.concat( tracksFromUsers.map((track) => track.blockchainId) ) @@ -101,12 +105,12 @@ class BlacklistManager { const allTrackIdsToBlacklistSet = new Set(allTrackIdsToBlacklist) try { - await this.addToRedis( + await this._addToRedis( REDIS_SET_BLACKLIST_TRACKID_KEY, allTrackIdsToBlacklist ) - await this.addToRedis(REDIS_SET_BLACKLIST_USERID_KEY, userIdsToBlacklist) - await this.addToRedis( + await this._addToRedis(REDIS_SET_BLACKLIST_USERID_KEY, userIdsToBlacklist) + await this._addToRedis( REDIS_SET_BLACKLIST_SEGMENTCID_KEY, segmentsToBlacklist ) @@ -140,18 +144,18 @@ class BlacklistManager { i + PROCESS_TRACKS_BATCH_SIZE ) - this.logDebug( + this._logDebug( `[addAggregateCIDsToRedis] - tracks slice size: ${tracksSlice.length}` ) const segmentsFromTrackIdsToBlacklist = - await BlacklistManager.getCIDsToBlacklist(tracksSlice, transaction) + await BlacklistManager._getCIDsToBlacklist(tracksSlice, transaction) - this.logDebug( + this._logDebug( `[addAggregateCIDsToRedis] - number of segments: ${segmentsFromTrackIdsToBlacklist.length}` ) - await BlacklistManager.addToRedis( + await BlacklistManager._addToRedis( REDIS_SET_BLACKLIST_SEGMENTCID_KEY, segmentsFromTrackIdsToBlacklist ) @@ -172,13 +176,13 @@ class BlacklistManager { * Given trackIds and userIds to remove from blacklist, fetch all segmentCIDs. * Also remove the trackIds, userIds, and segmentCIDs from redis blacklist sets to prevent future interaction. */ - static async fetchCIDsAndRemoveFromRedis({ + static async _fetchCIDsAndRemoveFromRedis({ trackIdsToRemove = [], userIdsToRemove = [], segmentsToRemove = [] }) { // Get all tracks from users and combine with explicit trackIds to BL - const tracksFromUsers = await this.getTracksFromUsers(userIdsToRemove) + const tracksFromUsers = await this._getTracksFromUsers(userIdsToRemove) const allTrackIdsToBlacklist = trackIdsToRemove.concat( tracksFromUsers.map((track) => track.blockchainId) ) @@ -187,7 +191,7 @@ class BlacklistManager { const allTrackIdsToBlacklistSet = new Set(allTrackIdsToBlacklist) // Retrieves CIDs from deduped trackIds - const segmentsFromTrackIds = await this.getCIDsToBlacklist([ + const segmentsFromTrackIds = await this._getCIDsToBlacklist([ ...allTrackIdsToBlacklistSet ]) @@ -196,15 +200,15 @@ class BlacklistManager { segmentCIDsToRemove = [...segmentCIDsToRemoveSet] try { - await this.removeFromRedis( + await this._removeFromRedis( REDIS_SET_BLACKLIST_TRACKID_KEY, allTrackIdsToBlacklist ) - await this.removeFromRedis( + await this._removeFromRedis( REDIS_SET_BLACKLIST_USERID_KEY, userIdsToRemove ) - await this.removeFromRedis( + await this._removeFromRedis( REDIS_SET_BLACKLIST_SEGMENTCID_KEY, segmentCIDsToRemove ) @@ -217,7 +221,7 @@ class BlacklistManager { * Retrieves track objects from specified users * @param {int[]} userIdsBlacklist */ - static async getTracksFromUsers(userIdsBlacklist) { + static async _getTracksFromUsers(userIdsBlacklist) { let tracks = [] if (userIdsBlacklist.length > 0) { @@ -239,7 +243,7 @@ class BlacklistManager { * @param {Object} transaction sequelize transaction object * @returns {Object[]} array of track model objects from table */ - static async getAllCIDsFromTrackIdsInDb(trackIds, transaction) { + static async _getAllCIDsFromTrackIdsInDb(trackIds, transaction) { const queryConfig = { where: { blockchainId: trackIds } } if (transaction) { queryConfig.transaction = transaction @@ -254,8 +258,8 @@ class BlacklistManager { * @param {Object} transaction sequelize transaction object * @returns {string[]} all CIDs that are blacklisted from input track ids */ - static async getCIDsToBlacklist(inputTrackIds, transaction) { - const tracks = await this.getAllCIDsFromTrackIdsInDb( + static async _getCIDsToBlacklist(inputTrackIds, transaction) { + const tracks = await this._getAllCIDsFromTrackIdsInDb( inputTrackIds, transaction ) @@ -301,40 +305,40 @@ class BlacklistManager { } static async add({ values, type }) { - await this.addToDb({ values, type }) + await this._addToDb({ values, type }) // add to redis switch (type) { case 'USER': // add user ids to redis under userid key + its associated track segments - await this.fetchCIDsAndAddToRedis({ userIdsToBlacklist: values }) + await this._fetchCIDsAndAddToRedis({ userIdsToBlacklist: values }) break case 'TRACK': // add track ids to redis under trackid key + its associated track segments - await this.fetchCIDsAndAddToRedis({ trackIdsToBlacklist: values }) + await this._fetchCIDsAndAddToRedis({ trackIdsToBlacklist: values }) break case 'CID': // add segments to redis under segment key - await this.fetchCIDsAndAddToRedis({ segmentsToBlacklist: values }) + await this._fetchCIDsAndAddToRedis({ segmentsToBlacklist: values }) break } } static async remove({ values, type }) { - await this.removeFromDb({ values, type }) + await this._removeFromDb({ values, type }) switch (type) { case 'USER': // Remove user ids from redis under userid key + its associated track segments - await this.fetchCIDsAndRemoveFromRedis({ userIdsToRemove: values }) + await this._fetchCIDsAndRemoveFromRedis({ userIdsToRemove: values }) break case 'TRACK': // Remove track ids from redis under trackid key + its associated track segments - await this.fetchCIDsAndRemoveFromRedis({ trackIdsToRemove: values }) + await this._fetchCIDsAndRemoveFromRedis({ trackIdsToRemove: values }) break case 'CID': // Remove segments from redis under segment key - await this.fetchCIDsAndRemoveFromRedis({ segmentsToRemove: values }) + await this._fetchCIDsAndRemoveFromRedis({ segmentsToRemove: values }) break } } @@ -344,7 +348,7 @@ class BlacklistManager { * @param {number} id user or track id * @param {'USER'|'TRACK'|'CID'} type */ - static async addToDb({ values, type }) { + static async _addToDb({ values, type }) { try { await models.ContentBlacklist.bulkCreate( values.map((value) => ({ @@ -357,7 +361,7 @@ class BlacklistManager { throw new Error(`Error with adding to ContentBlacklist: ${e}`) } - this.log( + this._log( `Sucessfully added entries with type (${type}) and values (${values}) to the ContentBlacklist table!` ) return { type, values } @@ -368,7 +372,7 @@ class BlacklistManager { * @param {number} id user or track id * @param {'USER'|'TRACK'|'CID'} type */ - static async removeFromDb({ values, type }) { + static async _removeFromDb({ values, type }) { let numRowsDestroyed try { numRowsDestroyed = await models.ContentBlacklist.destroy({ @@ -384,13 +388,13 @@ class BlacklistManager { } if (numRowsDestroyed > 0) { - this.logDebug( + this._logDebug( `Removed entry with type [${type}] and values [${values.toString()}] to the ContentBlacklist table!` ) return { type, values } } - this.logDebug( + this._logDebug( `Entry with type [${type}] and id [${values.toString()}] does not exist in ContentBlacklist.` ) return null @@ -406,14 +410,14 @@ class BlacklistManager { static async _addToRedisChunkHelper(redisKey, data) { const redisAddMaxItemsSize = 100000 try { - this.logDebug( + this._logDebug( `About to call _addToRedisChunkHelper for ${redisKey} with data of length ${data.length}` ) for (let i = 0; i < data.length; i += redisAddMaxItemsSize) { await redis.sadd(redisKey, data.slice(i, i + redisAddMaxItemsSize)) } } catch (e) { - this.logError( + this._logError( `Unable to call _addToRedisChunkHelper for ${redisKey}: ${e.message}` ) } @@ -425,7 +429,7 @@ class BlacklistManager { * @param {number[] | string[] | Object} data either array of userIds, trackIds, CIDs, or * @param {number?} expirationSec number of seconds for entry in redis to expire */ - static async addToRedis(redisKey, data, expirationSec = null) { + static async _addToRedis(redisKey, data, expirationSec = null) { switch (redisKey) { case REDIS_MAP_TRACKID_TO_SEGMENTCIDS_KEY: { // Add "MAP.TRACKID.SEGMENTCIDS:::" to set of cids into redis @@ -446,7 +450,7 @@ class BlacklistManager { } if (errors.length > 0) { - this.logWarn(errors.toString()) + this._logWarn(errors.toString()) } break } @@ -458,7 +462,7 @@ class BlacklistManager { if (!data || data.length === 0) return try { await this._addToRedisChunkHelper(redisKey, data) - this.logDebug(`redis set add ${redisKey} successful`) + this._logDebug(`redis set add ${redisKey} successful`) } catch (e) { throw new Error(`Unable to add ${redisKey}:${data}: ${e.toString()}`) } @@ -472,7 +476,7 @@ class BlacklistManager { * @param {string} redisKey type of value * @param {number[] | string[] | Object} data either array of userIds, trackIds, CIDs, or */ - static async removeFromRedis(redisKey, data) { + static async _removeFromRedis(redisKey, data) { switch (redisKey) { case REDIS_SET_BLACKLIST_SEGMENTCID_KEY: case REDIS_SET_BLACKLIST_TRACKID_KEY: @@ -481,7 +485,7 @@ class BlacklistManager { if (!data || data.length === 0) return try { const resp = await redis.srem(redisKey, data) - this.logDebug(`redis set remove ${redisKey} response ${resp}`) + this._logDebug(`redis set remove ${redisKey} response ${resp}`) } catch (e) { throw new Error( `Unable to remove ${redisKey}:${data}: ${e.toString()}` @@ -518,7 +522,7 @@ class BlacklistManager { trackId = parseInt(trackId) // Check to see if CID belongs to input trackId from redis. - let cidsOfInputTrackId = await this.getAllCIDsFromTrackIdInRedis(trackId) + let cidsOfInputTrackId = await this._getAllCIDsFromTrackIdInRedis(trackId) // If nothing is found, check redis to see if track is valid. // If valid, add the mapping redis for quick lookup later. @@ -532,11 +536,11 @@ class BlacklistManager { } // Check the db for the segments - const track = (await this.getAllCIDsFromTrackIdsInDb([trackId]))[0] + const track = (await this._getAllCIDsFromTrackIdsInDb([trackId]))[0] // If segments are not found, add to invalid trackIds set if (!track) { - await this.addToRedis( + await this._addToRedis( REDIS_SET_INVALID_TRACKIDS_KEY, [trackId], // Set expiry in case track with this trackId eventually gets uploaded to CN @@ -551,7 +555,7 @@ class BlacklistManager { (s) => s.multihash ) - await this.addToRedis( + await this._addToRedis( REDIS_MAP_TRACKID_TO_SEGMENTCIDS_KEY, { [trackId]: cidsOfInputTrackId }, SEGMENTCID_TO_TRACKID_EXPIRATION_SECONDS @@ -568,14 +572,14 @@ class BlacklistManager { return false } catch (e) { // Error in checking CID. Default to false. - this.logError( + this._logError( `Error in checking if CID=${cid} is servable: ${e.toString()}` ) return false } } - static getTypes() { + static _getTypes() { return types } @@ -651,26 +655,26 @@ class BlacklistManager { * @param {number} trackId * @returns {string[]} cids associated with trackId */ - static async getAllCIDsFromTrackIdInRedis(trackId) { + static async _getAllCIDsFromTrackIdInRedis(trackId) { const redisKey = this.getRedisTrackIdToCIDsKey(trackId) return redis.smembers(redisKey) } - // Logger wrapper methods + // logger wrapper methods - static logDebug(msg) { + static _logDebug(msg) { logger.debug(`BlacklistManager DEBUG: ${msg}`) } - static log(msg) { + static _log(msg) { logger.info(`BlacklistManager: ${msg}`) } - static logWarn(msg) { + static _logWarn(msg) { logger.warn(`BlacklistManager WARNING: ${msg}`) } - static logError(msg) { + static _logError(msg) { logger.error(`BlacklistManager ERROR: ${msg}`) } } diff --git a/creator-node/src/components/replicaSet/replicaSetController.js b/creator-node/src/components/replicaSet/replicaSetController.js index 822f41c40a0..482f0d03f3e 100644 --- a/creator-node/src/components/replicaSet/replicaSetController.js +++ b/creator-node/src/components/replicaSet/replicaSetController.js @@ -221,7 +221,7 @@ const mergePrimaryAndSecondaryController = async (req, res) => { } } - await recurringSyncQueue.add({ + await recurringSyncQueue.add('recurring-sync', { syncType, syncMode, syncRequestParameters diff --git a/creator-node/src/config.js b/creator-node/src/config.js index ad84e1f3e25..b19097a0a9e 100644 --- a/creator-node/src/config.js +++ b/creator-node/src/config.js @@ -264,6 +264,12 @@ const config = convict({ env: 'printSequelizeLogs', default: false }, + expressAppConcurrency: { + doc: 'Number of processes to spawn, where each process runs its own Content Node. Default 0 to run one process per core (auto-detected)', + format: 'nat', + env: 'expressAppConcurrency', + default: 0 + }, // Transcoding settings transcodingMaxConcurrency: { diff --git a/creator-node/src/index.ts b/creator-node/src/index.ts index a33b27074e7..0cdb098dad7 100644 --- a/creator-node/src/index.ts +++ b/creator-node/src/index.ts @@ -1,5 +1,9 @@ 'use strict' +import type { Cluster } from 'cluster' +import { clusterUtils } from './utils' +const cluster: Cluster = require('cluster') + const { setupTracing } = require('./tracer') setupTracing('content-node') @@ -21,53 +25,8 @@ const exitWithError = (...msg: any[]) => { process.exit(1) } -const verifyDBConnection = async () => { - try { - logger.info('Verifying DB connection...') - await sequelize.authenticate() // runs SELECT 1+1 AS result to check db connection - logger.info('DB connected successfully!') - } catch (connectionError) { - exitWithError('Error connecting to DB:', connectionError) - } -} - -const runDBMigrations = async () => { - try { - logger.info('Executing database migrations...') - await runMigrations() - logger.info('Migrations completed successfully') - } catch (migrationError) { - exitWithError('Error in migrations:', migrationError) - } -} - -const connectToDBAndRunMigrations = async () => { - await verifyDBConnection() - await clearRunningQueries() - await runDBMigrations() -} - -/** - * Setting a different port is necessary for OpenResty to work. If OpenResty - * is enabled, have the app run on port 3000. Else, run on its configured port. - * @returns the port number to configure the Content Node app - */ -const getPort = () => { - const contentCacheLayerEnabled = config.get('contentCacheLayerEnabled') - - if (contentCacheLayerEnabled) { - return 3000 - } - - return config.get('port') -} - -const startApp = async () => { - logger.info('Configuring service...') - +const verifyConfigAndDb = async () => { await config.asyncConfig() - - // fail if delegateOwnerWallet & delegatePrivateKey not present const delegateOwnerWallet = config.get('delegateOwnerWallet') const delegatePrivateKey = config.get('delegatePrivateKey') const creatorNodeEndpoint = config.get('creatorNodeEndpoint') @@ -78,7 +37,7 @@ const startApp = async () => { ) } - // fail if delegateOwnerWallet doesn't derive from delegatePrivateKey + // Fail if delegateOwnerWallet doesn't derive from delegatePrivateKey const privateKeyBuffer = Buffer.from( config.get('delegatePrivateKey').replace('0x', ''), 'hex' @@ -88,16 +47,6 @@ const startApp = async () => { if (walletAddress !== config.get('delegateOwnerWallet').toLowerCase()) { throw new Error('Invalid delegatePrivateKey/delegateOwnerWallet pair') } - - const trustedNotifierEnabled = !!config.get('trustedNotifierID') - const nodeOperatorEmailAddress = config.get('nodeOperatorEmailAddress') - - if (!trustedNotifierEnabled && !nodeOperatorEmailAddress) { - exitWithError( - 'Cannot startup without a trustedNotifierID or nodeOperatorEmailAddress' - ) - } - try { const solDelegateKeypair = Keypair.fromSeed(privateKeyBuffer) const solDelegatePrivateKey = solDelegateKeypair.secretKey @@ -111,14 +60,37 @@ const startApp = async () => { ) } - await connectToDBAndRunMigrations() + // Fail if Trusted Notifier isn't configured properly + const trustedNotifierEnabled = !!config.get('trustedNotifierID') + const nodeOperatorEmailAddress = config.get('nodeOperatorEmailAddress') + if (!trustedNotifierEnabled && !nodeOperatorEmailAddress) { + exitWithError( + 'Cannot startup without a trustedNotifierID or nodeOperatorEmailAddress' + ) + } - const nodeMode = config.get('devMode') ? 'Dev Mode' : 'Production Mode' - await serviceRegistry.initServices() - logger.info(`Initialized services (Node running in ${nodeMode})`) + try { + logger.info('Verifying DB connection...') + await sequelize.authenticate() // runs SELECT 1+1 AS result to check db connection + logger.info('DB connected successfully!') + } catch (connectionError) { + exitWithError('Error connecting to DB:', connectionError) + } +} - const appInfo = initializeApp(getPort(), serviceRegistry) - logger.info('Initialized app and server') +// The primary process performs one-time validation and spawns worker processes that each run the Express app +const startAppForPrimary = async () => { + logger.info(`Primary process with pid=${process.pid} is running`) + + await verifyConfigAndDb() + await clearRunningQueries() + try { + logger.info('Executing database migrations...') + await runMigrations() + logger.info('Migrations completed successfully') + } catch (migrationError) { + exitWithError('Error in migrations:', migrationError) + } // Clear all redis locks try { @@ -127,14 +99,71 @@ const startApp = async () => { logger.warn(`Could not clear write locks. Skipping..: ${e.message}`) } - // Initialize services that do not require the server, but do not need to be awaited. - serviceRegistry.initServicesAsynchronously() + const numWorkers = clusterUtils.getNumWorkers() + logger.info(`Spawning ${numWorkers} processes to run the Express app...`) + const firstWorker = cluster.fork() + // Wait for the first worker to perform one-time init logic before spawning other workers + firstWorker.on('message', (msg) => { + if (msg?.cmd === 'initComplete') { + for (let i = 0; i < numWorkers - 1; i++) { + cluster.fork() + } + } + }) - // Some Services cannot start until server is up. Start them now - // No need to await on this as this process can take a while and can run in the background - serviceRegistry.initServicesThatRequireServer(appInfo.app) + for (const worker of Object.values(cluster.workers || {})) { + worker?.on('message', (msg) => { + if (msg?.cmd === 'setSpecialWorkerId') { + clusterUtils.specialWorkerId = msg?.val + } + }) + } - // when app terminates, close down any open DB connections gracefully + // Respawn workers and update each worker's knowledge of who the special worker is. + // The primary process doesn't need to be respawned because the whole app stops if the primary stops (since the workers are child processes of the primary) + cluster.on('exit', (worker, code, signal) => { + logger.info( + `Worker process with pid=${worker.process.pid} died because ${ + signal || code + }. Respawning...` + ) + const newWorker = cluster.fork() + if (clusterUtils.specialWorkerId === worker.id) { + logger.info( + 'The worker that died was the special worker. Setting a new special worker...' + ) + clusterUtils.specialWorkerId = newWorker.id + for (const worker of Object.values(cluster.workers || {})) { + worker?.send({ cmd: 'setSpecialWorkerId', val: newWorker.id }) + } + } + }) +} + +// Workers don't share memory, so each one is its own Express instance with its own version of objects like serviceRegistry +const startAppForWorker = async () => { + /** + * Setting a different port is necessary for OpenResty to work. If OpenResty + * is enabled, have the app run on port 3000. Else, run on its configured port. + * @returns the port number to configure the Content Node app + */ + const getPort = () => { + const contentCacheLayerEnabled = config.get('contentCacheLayerEnabled') + + if (contentCacheLayerEnabled) { + return 3000 + } + + return config.get('port') + } + + logger.info( + `Worker process with pid=${process.pid} and worker ID=${cluster.worker?.id} is running` + ) + + await verifyConfigAndDb() + + // When app terminates, close down any open DB connections gracefully ON_DEATH((signal: any, error: any) => { // NOTE: log messages emitted here may be swallowed up if using the bunyan CLI (used by // default in `npm start` command). To see messages emitted after a kill signal, do not @@ -145,5 +174,33 @@ const startApp = async () => { appInfo.server.close() } }) + + const nodeMode = config.get('devMode') ? 'Dev Mode' : 'Production Mode' + + await serviceRegistry.initServices() + logger.info(`Initialized services (Node running in ${nodeMode})`) + const appInfo = initializeApp(getPort(), serviceRegistry) + logger.info('Initialized app and server') + + // Make the first worker wait for some services to be fully up before spinning up other workers + serviceRegistry.initServicesAsynchronously() + if (clusterUtils.isThisWorkerInit()) { + await serviceRegistry.initServicesThatRequireServer(appInfo.app) + } else { + serviceRegistry.initServicesThatRequireServer(appInfo.app) + } + + if (clusterUtils.isThisWorkerInit() && process.send) { + process.send({ cmd: 'initComplete' }) + } +} + +if (cluster.isMaster) { + // eslint-disable-next-line @typescript-eslint/no-floating-promises + startAppForPrimary() +} else if (cluster.isWorker) { + // eslint-disable-next-line @typescript-eslint/no-floating-promises + startAppForWorker() +} else { + throw new Error("Can't determine if process is primary or worker in cluster") } -startApp() diff --git a/creator-node/src/monitors/MonitoringQueue.js b/creator-node/src/monitors/MonitoringQueue.js index 40de792694c..f04a24aeefe 100644 --- a/creator-node/src/monitors/MonitoringQueue.js +++ b/creator-node/src/monitors/MonitoringQueue.js @@ -1,8 +1,10 @@ -const Bull = require('bull') +const { Queue, Worker } = require('bullmq') + const redis = require('../redis') const config = require('../config') const { MONITORS, getMonitorRedisKey } = require('./monitors') const { logger } = require('../logging') +const { clusterUtils } = require('../utils') const QUEUE_INTERVAL_MS = 60 * 1000 @@ -23,28 +25,29 @@ const MONITORING_QUEUE_HISTORY = 500 */ class MonitoringQueue { constructor() { - this.queue = new Bull('monitoring-queue', { - redis: { - port: config.get('redisPort'), - host: config.get('redisHost') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue('monitoring-queue', { + connection, defaultJobOptions: { removeOnComplete: MONITORING_QUEUE_HISTORY, removeOnFail: MONITORING_QUEUE_HISTORY } }) - // Clean up anything that might be still stuck in the queue on restart - this.queue.empty() - - this.seedInitialValues() + // Clean up anything that might be still stuck in the queue on restart and run once instantly + if (clusterUtils.isThisWorkerInit()) { + this.queue.drain(true) + this.seedInitialValues() + } - this.queue.process( - PROCESS_NAMES.monitor, - /* concurrency */ 1, - async (job, done) => { + const worker = new Worker( + 'monitoring-queue', + async (job) => { try { - this.logStatus('Starting') + await this.logStatus('Starting') // Iterate over each monitor and set a new value if the cached // value is not fresh. @@ -55,13 +58,11 @@ class MonitoringQueue { this.logStatus(`Error on ${monitor.name} ${e}`) } }) - - done(null, {}) } catch (e) { this.logStatus(`Error ${e}`) - done(e) } - } + }, + { connection } ) } @@ -84,16 +85,16 @@ class MonitoringQueue { if (isFresh) return const value = await monitor.func() - this.logStatus(`Computed value for ${monitor.name} ${value}`) + await this.logStatus(`Computed value for ${monitor.name} ${value}`) // Set the value - redis.set(key, value) + await redis.set(key, value) if (monitor.ttl) { // Set a TTL (in seconds) key to track when this value needs refreshing. // We store a separate TTL key rather than expiring the value itself // so that in the case of an error, the current value can still be read - redis.set(ttlKey, 1, 'EX', monitor.ttl) + await redis.set(ttlKey, 1, 'EX', monitor.ttl) } } @@ -115,12 +116,12 @@ class MonitoringQueue { async start() { try { // Run the job immediately - await this.queue.add(PROCESS_NAMES.monitor) + await this.queue.add(PROCESS_NAMES.monitor, {}) // Then enqueue the job to run on a regular interval setInterval(async () => { try { - await this.queue.add(PROCESS_NAMES.monitor) + await this.queue.add(PROCESS_NAMES.monitor, {}) } catch (e) { this.logStatus('Failed to enqueue!') } diff --git a/creator-node/src/serviceRegistry.js b/creator-node/src/serviceRegistry.js index fcda30cd450..6499828e0a9 100644 --- a/creator-node/src/serviceRegistry.js +++ b/creator-node/src/serviceRegistry.js @@ -106,9 +106,9 @@ class ServiceRegistry { // If error occurs in initializing these services, do not continue with app start up. try { - await this.blacklistManager.init() await this.monitoringQueue.start() await this.sessionExpirationQueue.start() + await this.blacklistManager.init() } catch (e) { this.logError(e.message) // eslint-disable-next-line no-process-exit diff --git a/creator-node/src/services/SessionExpirationQueue.js b/creator-node/src/services/SessionExpirationQueue.js index 44914ff03ea..2f2dc1bc817 100644 --- a/creator-node/src/services/SessionExpirationQueue.js +++ b/creator-node/src/services/SessionExpirationQueue.js @@ -1,9 +1,11 @@ -const Bull = require('bull') +const { Queue, Worker } = require('bullmq') const Sequelize = require('sequelize') + const sessionManager = require('../sessionManager') const config = require('../config') const { logger } = require('../logging') const { SessionToken } = require('../models') +const { clusterUtils } = require('../utils') const RUN_INTERVAL = 60 * 1000 * 60 * 24 // daily run const SESSION_EXPIRATION_AGE = 60 * 1000 * 60 * 24 * 14 // 2 weeks @@ -21,28 +23,59 @@ class SessionExpirationQueue { this.sessionExpirationAge = SESSION_EXPIRATION_AGE this.batchSize = BATCH_SIZE this.runInterval = RUN_INTERVAL - this.queue = new Bull('session-expiration-queue', { - redis: { - port: config.get('redisPort'), - host: config.get('redisHost') - }, + this.logStatus = this.logStatus.bind(this) + this.expireSessions = this.expireSessions.bind(this) + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue('session-expiration-queue', { + connection, defaultJobOptions: { removeOnComplete: true, removeOnFail: true } }) - this.logStatus = this.logStatus.bind(this) - this.expireSessions = this.expireSessions.bind(this) + } + + async expireSessions(sessionExpiredCondition) { + const sessionsToDelete = await SessionToken.findAll( + Object.assign(sessionExpiredCondition, { limit: this.batchSize }) + ) + await sessionManager.deleteSessions(sessionsToDelete) + } + + /** + * Logs a status message and includes current queue info + * @param {string} message + */ + async logStatus(message) { + const { waiting, active, completed, failed, delayed } = + await this.queue.getJobCounts() + logger.info( + `Session Expiration Queue: ${message} || active: ${active}, waiting: ${waiting}, failed ${failed}, delayed: ${delayed}, completed: ${completed} ` + ) + } + + /** + * Starts the session expiration queue on a daily cron. + */ + async start() { + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } // Clean up anything that might be still stuck in the queue on restart - this.queue.empty() + if (clusterUtils.isThisWorkerInit()) { + await this.queue.drain(true) + } - this.queue.process( - PROCESS_NAMES.expire_sessions, - /* concurrency */ 1, - async (job, done) => { + const worker = new Worker( + 'session-expiration-queue', + async (job) => { try { - this.logStatus('Starting') + await this.logStatus('Starting') let progress = 0 const SESSION_EXPIRED_CONDITION = { where: { @@ -56,7 +89,7 @@ class SessionExpirationQueue { const numExpiredSessions = await SessionToken.count( SESSION_EXPIRED_CONDITION ) - this.logStatus( + await this.logStatus( `${numExpiredSessions} expired sessions ready for deletion.` ) @@ -64,55 +97,34 @@ class SessionExpirationQueue { while (sessionsToDelete > 0) { await this.expireSessions(SESSION_EXPIRED_CONDITION) progress += (this.batchSize / numExpiredSessions) * 100 - job.progress(progress) + job.updateProgress(progress) sessionsToDelete -= this.batchSize } - done(null, {}) + return {} } catch (e) { - this.logStatus(`Error ${e}`) - done(e) + await this.logStatus(`Error ${e}`) + return e } - } - ) - } - - async expireSessions(sessionExpiredCondition) { - const sessionsToDelete = await SessionToken.findAll( - Object.assign(sessionExpiredCondition, { limit: this.batchSize }) - ) - await sessionManager.deleteSessions(sessionsToDelete) - } - - /** - * Logs a status message and includes current queue info - * @param {string} message - */ - async logStatus(message) { - const { waiting, active, completed, failed, delayed } = - await this.queue.getJobCounts() - logger.info( - `Session Expiration Queue: ${message} || active: ${active}, waiting: ${waiting}, failed ${failed}, delayed: ${delayed}, completed: ${completed} ` + }, + { connection } ) - } - /** - * Starts the session expiration queue on a daily cron. - */ - async start() { try { - // Run the job immediately - await this.queue.add(PROCESS_NAMES.expire_sessions) + if (clusterUtils.isThisWorkerSpecial()) { + // Run the job immediately + await this.queue.add(PROCESS_NAMES.expire_sessions, {}) - // Then enqueue the job to run on a regular interval - setInterval(async () => { - try { - await this.queue.add(PROCESS_NAMES.expire_sessions) - } catch (e) { - this.logStatus('Failed to enqueue!') - } - }, this.runInterval) + // Then enqueue the job to run on a regular interval + setInterval(async () => { + try { + await this.queue.add(PROCESS_NAMES.expire_sessions, {}) + } catch (e) { + await this.logStatus('Failed to enqueue!') + } + }, this.runInterval) + } } catch (e) { - this.logStatus('Startup failed!') + await this.logStatus('Startup failed!') } } } diff --git a/creator-node/src/services/prometheusMonitoring/prometheusRegistry.ts b/creator-node/src/services/prometheusMonitoring/prometheusRegistry.ts index 04e04b227d2..1319b18bb4d 100644 --- a/creator-node/src/services/prometheusMonitoring/prometheusRegistry.ts +++ b/creator-node/src/services/prometheusMonitoring/prometheusRegistry.ts @@ -1,4 +1,4 @@ -import type { Job, Queue } from 'bull' +import type { Job, Queue, Worker } from 'bullmq' import { NAMESPACE_PREFIX, @@ -9,6 +9,8 @@ import { } from './prometheus.constants' import * as PrometheusClient from 'prom-client' +const cluster = require('cluster') + /** * See `prometheusMonitoring/README.md` for usage details */ @@ -103,49 +105,28 @@ export class PrometheusRegistry { /** * @param queue the bull queue to collect metrics on - * @param useGlobal whether to search jobs via global callbacks or not + * @param worker the bull worker to collect metrics on * * This function is used to collect prometheus metrics on bull queues * by registering callbacks when jobs fail, wait, or complete */ - public startQueueMetrics(queue: Queue, useGlobal = false) { + public startQueueMetrics(queue: Queue, worker: Worker) { const labels = { queue_name: queue.name } - if (useGlobal) { - queue.on('global:completed', async (jobId: number) => { - const job = await queue.getJob(jobId) - const job_name = job?.data?.task || job?.name || '' - this.recordJobMetrics( - { job_name, ...labels }, - JOB_STATUS.COMPLETED, - job! - ) - }) - queue.on('global:failed', async (jobId: number) => { - const job = await queue.getJob(jobId) - const job_name = job?.data?.task || job?.name || '' - this.recordJobMetrics({ job_name, ...labels }, JOB_STATUS.FAILED, job!) - }) - } else { - queue.on('completed', (job: Job) => { - const job_name = job?.data?.task || job.name - this.recordJobMetrics( - { job_name, ...labels }, - JOB_STATUS.COMPLETED, - job - ) - }) - queue.on('failed', (job: Job) => { - const job_name = job?.data?.task || job.name - this.recordJobMetrics({ job_name, ...labels }, JOB_STATUS.FAILED, job) - }) - } + worker.on('completed', (job: Job, result: any, prev: string) => { + const job_name = job?.data?.task || job.name + this.recordJobMetrics({ job_name, ...labels }, JOB_STATUS.COMPLETED, job) + }) + worker.on('failed', (job: Job, error: Error, prev: string) => { + const job_name = job?.data?.task || job.name + this.recordJobMetrics({ job_name, ...labels }, JOB_STATUS.FAILED, job) + }) const metricInterval = setInterval(() => { queue - .getJobCounts() + .getJobCounts('completed', 'failed', 'delayed', 'active', 'waiting') .then(({ completed, failed, delayed, active, waiting }) => { this.getMetric(this.metricNames.JOBS_COMPLETED_TOTAL_GAUGE).set( labels, diff --git a/creator-node/src/services/stateMachineManager/index.js b/creator-node/src/services/stateMachineManager/index.js index 3fbbe759fca..f04d68e70ad 100644 --- a/creator-node/src/services/stateMachineManager/index.js +++ b/creator-node/src/services/stateMachineManager/index.js @@ -1,4 +1,5 @@ const _ = require('lodash') +const { QueueEvents } = require('bullmq') const config = require('../../config') const { logger: baseLogger } = require('../../logging') @@ -13,6 +14,8 @@ const { } = require('./stateMachineConstants') const makeOnCompleteCallback = require('./makeOnCompleteCallback') const { updateContentNodeChainInfo } = require('../ContentNodeInfoManager') +const SyncRequestDeDuplicator = require('./stateReconciliation/SyncRequestDeDuplicator') +const { clusterUtils } = require('../../utils') /** * Manages the queue for monitoring the state of Content Nodes and @@ -49,54 +52,71 @@ class StateMachineManager { prometheusRegistry ) - // Upon completion, make queue jobs record metrics and enqueue other jobs as necessary - const queueNameToQueueMap = { - [QUEUE_NAMES.MONITOR_STATE]: { - queue: monitorStateQueue, - maxWaitingJobs: 10 - }, - [QUEUE_NAMES.FIND_SYNC_REQUESTS]: { - queue: findSyncRequestsQueue, - maxWaitingJobs: 10 - }, - [QUEUE_NAMES.FIND_REPLICA_SET_UPDATES]: { - queue: findReplicaSetUpdatesQueue, - maxWaitingJobs: 10 - }, - [QUEUE_NAMES.MANUAL_SYNC]: { - queue: manualSyncQueue, - maxWaitingJobs: 1000 - }, - [QUEUE_NAMES.RECURRING_SYNC]: { - queue: recurringSyncQueue, - maxWaitingJobs: 100000 - }, - [QUEUE_NAMES.UPDATE_REPLICA_SET]: { - queue: updateReplicaSetQueue, - maxWaitingJobs: 10000 - }, - [QUEUE_NAMES.RECOVER_ORPHANED_DATA]: { - queue: recoverOrphanedDataQueue, - maxWaitingJobs: 10 - } + if (clusterUtils.isThisWorkerInit()) { + await SyncRequestDeDuplicator.clear() } - for (const [queueName, { queue }] of Object.entries(queueNameToQueueMap)) { - queue.on( - 'global:completed', - makeOnCompleteCallback( - queueName, - queueNameToQueueMap, - prometheusRegistry - ).bind(this) - ) + if (clusterUtils.isThisWorkerSpecial()) { + // Upon completion, make queue jobs record metrics and enqueue other jobs as necessary + const queueNameToQueueMap = { + [QUEUE_NAMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP]: { + queue: cNodeEndpointToSpIdMapQueue, + maxWaitingJobs: 10 + }, + [QUEUE_NAMES.MONITOR_STATE]: { + queue: monitorStateQueue, + maxWaitingJobs: 10 + }, + [QUEUE_NAMES.FIND_SYNC_REQUESTS]: { + queue: findSyncRequestsQueue, + maxWaitingJobs: 10 + }, + [QUEUE_NAMES.FIND_REPLICA_SET_UPDATES]: { + queue: findReplicaSetUpdatesQueue, + maxWaitingJobs: 10 + }, + [QUEUE_NAMES.MANUAL_SYNC]: { + queue: manualSyncQueue, + maxWaitingJobs: 1000 + }, + [QUEUE_NAMES.RECURRING_SYNC]: { + queue: recurringSyncQueue, + maxWaitingJobs: 100000 + }, + [QUEUE_NAMES.UPDATE_REPLICA_SET]: { + queue: updateReplicaSetQueue, + maxWaitingJobs: 10000 + }, + [QUEUE_NAMES.RECOVER_ORPHANED_DATA]: { + queue: recoverOrphanedDataQueue, + maxWaitingJobs: 10 + } + } + for (const queueName of Object.keys(queueNameToQueueMap)) { + const queueEvents = new QueueEvents(queueName, { + connection: { + host: config.get('redisHost'), + port: config.get('redisPort') + } + }) + queueEvents.on( + 'completed', + makeOnCompleteCallback( + queueName, + queueNameToQueueMap, + prometheusRegistry + ).bind(this) + ) + + // Update the mapping in this StateMachineManager whenever a job successfully fetches it + if (queueName === QUEUE_NAMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP) { + queueEvents.on( + 'completed', + this.updateMapOnMapFetchJobComplete.bind(this) + ) + } + } } - // Update the mapping in this StateMachineManager whenever a job successfully fetches it - cNodeEndpointToSpIdMapQueue.on( - 'global:completed', - this.updateMapOnMapFetchJobComplete.bind(this) - ) - return { monitorStateQueue, findSyncRequestsQueue, @@ -114,16 +134,20 @@ class StateMachineManager { * - enabled (to the highest enabled mode configured) if the job fetched the mapping successfully * - disabled if the job encountered an error fetching the mapping * @param {number} jobId the ID of the job that completed - * @param {string} resultString the stringified JSON of the job's returnValue + * @param {string} returnvalue the stringified JSON of the job's returnValue */ - async updateMapOnMapFetchJobComplete(jobId, resultString) { + updateMapOnMapFetchJobComplete({ jobId, returnvalue }, id) { // Bull serializes the job result into redis, so we have to deserialize it into JSON let jobResult = {} try { - jobResult = JSON.parse(resultString) || {} + if (typeof returnvalue === 'string' || returnvalue instanceof String) { + jobResult = JSON.parse(returnvalue) || {} + } else { + jobResult = returnvalue || {} + } } catch (e) { baseLogger.warn( - `Failed to parse cNodeEndpoint->spId map jobId ${jobId} result string: ${resultString}` + `Failed to parse cNodeEndpoint->spId map jobId ${jobId} result string: ${returnvalue}` ) return } diff --git a/creator-node/src/services/stateMachineManager/makeOnCompleteCallback.ts b/creator-node/src/services/stateMachineManager/makeOnCompleteCallback.ts index bead9deeeed..194e643f751 100644 --- a/creator-node/src/services/stateMachineManager/makeOnCompleteCallback.ts +++ b/creator-node/src/services/stateMachineManager/makeOnCompleteCallback.ts @@ -1,4 +1,5 @@ import type Logger from 'bunyan' +import type { Queue } from 'bullmq' import type { AnyJobParams, QueueNameToQueueMap, @@ -8,7 +9,6 @@ import type { import type { UpdateReplicaSetJobParams } from './stateReconciliation/types' import type { TQUEUE_NAMES } from './stateMachineConstants' -import { Queue } from 'bull' import { instrumentTracing, tracing } from '../../tracer' const { logger: baseLogger, createChildLogger } = require('../../logging') @@ -53,7 +53,13 @@ function makeOnCompleteCallback( queueNameToQueueMap: QueueNameToQueueMap, prometheusRegistry: any ) { - return async function (jobId: string, resultString: string) { + return async function ( + { + jobId, + returnvalue + }: { jobId: string; returnvalue: string | AnyDecoratedJobReturnValue }, + id: string + ) { // Create a logger so that we can filter logs by the tags `queue` and `jobId` = const logger = createChildLogger(baseLogger, { queue: nameOfQueueWithCompletedJob, @@ -75,8 +81,12 @@ function makeOnCompleteCallback( // Bull serializes the job result into redis, so we have to deserialize it into JSON let jobResult: AnyDecoratedJobReturnValue try { - logger.info(`Job successfully completed. Parsing result: ${resultString}`) - jobResult = JSON.parse(resultString) || {} + logger.info(`Job successfully completed. Parsing result`) + if (typeof returnvalue === 'string' || returnvalue instanceof String) { + jobResult = JSON.parse(returnvalue as string) || {} + } else { + jobResult = returnvalue || {} + } } catch (e: any) { logger.error(`Failed to parse job result string: ${e.message}`) return @@ -155,6 +165,7 @@ const enqueueJobs = async ( const bulkAddResult = await queueToAddTo.addBulk( jobs.map((job) => { return { + name: 'defaultName', data: { enqueuedBy: `${triggeredByQueueName}#${triggeredByJobId}`, ...job diff --git a/creator-node/src/services/stateMachineManager/stateMachineUtils.js b/creator-node/src/services/stateMachineManager/stateMachineUtils.js index 073bb3da14d..608aeb17824 100644 --- a/creator-node/src/services/stateMachineManager/stateMachineUtils.js +++ b/creator-node/src/services/stateMachineManager/stateMachineUtils.js @@ -1,4 +1,4 @@ -const BullQueue = require('bull') +const { Queue, Worker, QueueScheduler } = require('bullmq') const { libs } = require('@audius/sdk') const CreatorNode = libs.CreatorNode @@ -11,7 +11,7 @@ const { METRIC_LABELS } = require('../../services/prometheusMonitoring/prometheus.constants') const config = require('../../config') -const { logger, createChildLogger } = require('../../logging') +const { logger: baseLogger, createChildLogger } = require('../../logging') const { generateTimestampAndSignature } = require('../../apiSigning') const { BATCH_CLOCK_STATUS_REQUEST_TIMEOUT, @@ -19,6 +19,7 @@ const { MAX_USER_BATCH_CLOCK_FETCH_RETRIES } = require('./stateMachineConstants') const { instrumentTracing, tracing } = require('../../tracer') +const { clusterUtils } = require('../../utils') const MAX_BATCH_CLOCK_STATUS_BATCH_SIZE = config.get( 'maxBatchClockStatusBatchSize' @@ -89,7 +90,7 @@ const retrieveUserInfoFromReplicaSet = async (replicaToWalletMap) => { // If failed to get response after all attempts, add replica to `unhealthyPeers` list for reconfig if (errorMsg) { - logger.error( + baseLogger.error( `[retrieveUserInfoFromReplicaSet] Could not fetch clock values from replica ${replica}: ${errorMsg.toString()}` ) unhealthyPeers.add(replica) @@ -267,57 +268,68 @@ const makeMetricToRecord = ( const makeQueue = ({ name, + processor, + logger, removeOnComplete, removeOnFail, - lockDuration, - prometheusRegistry = null, - limiter = null + prometheusRegistry, + globalConcurrency = 1, + limiter = null, + onFailCallback = null }) => { - // Settings config from https://github.com/OptimalBits/bull/blob/develop/REFERENCE.md#advanced-settings - const queue = new BullQueue(name, { - redis: { - host: config.get('redisHost'), - port: config.get('redisPort') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + const queue = new Queue(name, { + connection, defaultJobOptions: { removeOnComplete, removeOnFail - }, - settings: { - // Should be sufficiently larger than expected job runtime - lockDuration, - // We never want to re-process stalled jobs - maxStalledCount: 0 - }, + } + }) + + const worker = new Worker(name, processor, { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker(globalConcurrency), limiter }) + if (limiter) { + const scheduler = new QueueScheduler(name, { connection }) + } + + _registerQueueEvents(worker, logger) + queue.on( + 'failed', + onFailCallback || + ((job, error, prev) => { + const loggerWithId = createChildLogger(logger, { + jobId: job?.id || 'unknown' + }) + loggerWithId.error( + `Job failed to complete. ID=${job?.id}. Error=${error}` + ) + }) + ) if (prometheusRegistry !== null && prometheusRegistry !== undefined) { - prometheusRegistry.startQueueMetrics(queue) + prometheusRegistry.startQueueMetrics(queue, worker) } - return queue + return { queue, worker, logger } } -const registerQueueEvents = (queue, queueLogger) => { - queue.on('global:waiting', (jobId) => { - const logger = createChildLogger(queueLogger, { jobId }) - logger.info('Job waiting') - }) - queue.on('global:active', (jobId, jobPromise) => { - const logger = createChildLogger(queueLogger, { jobId }) +const _registerQueueEvents = (worker, queueLogger) => { + worker.on('active', (job, prev) => { + const logger = createChildLogger(queueLogger, { jobId: job.id }) logger.info('Job active') }) - queue.on('global:lock-extension-failed', (jobId, err) => { - const logger = createChildLogger(queueLogger, { jobId }) - logger.error(`Job lock extension failed. Error: ${err}`) + worker.on('error', (error) => { + queueLogger.error(`Job error - ${error}`) }) - queue.on('global:stalled', (jobId) => { + worker.on('stalled', (jobId, prev) => { const logger = createChildLogger(queueLogger, { jobId }) - logger.error('Job stalled') - }) - queue.on('global:error', (error) => { - queueLogger.error(`Queue Job Error - ${error}`) + logger.info('Job stalled') }) } @@ -327,6 +339,5 @@ module.exports = { makeGaugeIncToRecord, makeGaugeSetToRecord, retrieveUserInfoFromReplicaSet, - makeQueue, - registerQueueEvents + makeQueue } diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/fetchCNodeEndpointToSpIdMap.jobProcessor.ts b/creator-node/src/services/stateMachineManager/stateMonitoring/fetchCNodeEndpointToSpIdMap.jobProcessor.ts index a3a624fd329..b747e6bbd12 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/fetchCNodeEndpointToSpIdMap.jobProcessor.ts +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/fetchCNodeEndpointToSpIdMap.jobProcessor.ts @@ -10,6 +10,7 @@ import { getMapOfSpIdToChainInfo } from '../../ContentNodeInfoManager' import { stringifyMap } from '../../../utils' +import { QUEUE_NAMES } from '../stateMachineConstants' /** * Processes a job to update the cNodeEndpoint->spId map by reading the chain. @@ -25,7 +26,10 @@ async function fetchCNodeEndpointToSpIdMap({ > { await updateContentNodeChainInfo(logger) return { - cNodeEndpointToSpIdMap: stringifyMap(await getMapOfSpIdToChainInfo(logger)) + cNodeEndpointToSpIdMap: stringifyMap(await getMapOfSpIdToChainInfo(logger)), + jobsToEnqueue: { + [QUEUE_NAMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP]: [{}] + } } } diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.ts b/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.ts index 938a85eb147..96c29dcaeb3 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.ts +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/findSyncRequests.jobProcessor.ts @@ -283,13 +283,14 @@ async function _findSyncsForUser( syncMode === SYNC_MODES.MergePrimaryAndSecondary ) { try { - const { duplicateSyncReq, syncReqToEnqueue } = getNewOrExistingSyncReq({ - userWallet: wallet, - primaryEndpoint: thisContentNodeEndpoint, - secondaryEndpoint: secondary, - syncType: SyncType.Recurring, - syncMode - }) + const { duplicateSyncReq, syncReqToEnqueue } = + await getNewOrExistingSyncReq({ + userWallet: wallet, + primaryEndpoint: thisContentNodeEndpoint, + secondaryEndpoint: secondary, + syncType: SyncType.Recurring, + syncMode + }) if (!_.isEmpty(syncReqToEnqueue)) { result = 'new_sync_request_enqueued' diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/index.js b/creator-node/src/services/stateMachineManager/stateMonitoring/index.js index 67bf8247f8c..ee564f1fd26 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/index.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/index.js @@ -1,16 +1,15 @@ -const BullQueue = require('bull') const _ = require('lodash') const config = require('../../../config') const { QUEUE_HISTORY, QUEUE_NAMES, - MAX_QUEUE_RUNTIMES, STATE_MONITORING_QUEUE_INIT_DELAY_MS } = require('../stateMachineConstants') -const { makeQueue, registerQueueEvents } = require('../stateMachineUtils') +const { makeQueue } = require('../stateMachineUtils') const processJob = require('../processJob') const { logger: baseLogger, createChildLogger } = require('../../../logging') +const { clusterUtils } = require('../../../utils') const { getLatestUserIdFromDiscovery } = require('./stateMonitoringUtils') const monitorStateJobProcessor = require('./monitorState.jobProcessor') const findSyncRequestsJobProcessor = require('./findSyncRequests.jobProcessor') @@ -38,75 +37,94 @@ const cNodeEndpointToSpIdMapQueueLogger = createChildLogger(baseLogger, { */ class StateMonitoringManager { async init(discoveryNodeEndpoint, prometheusRegistry) { - // Create and start queue to fetch cNodeEndpoint->spId mapping - const cNodeEndpointToSpIdMapQueue = makeQueue({ + // Create queue to fetch cNodeEndpoint->spId mapping + const { queue: cNodeEndpointToSpIdMapQueue } = makeQueue({ name: QUEUE_NAMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP, + processor: this.makeProcessJob( + fetchCNodeEndpointToSpIdMapJobProcessor, + cNodeEndpointToSpIdMapQueueLogger, + prometheusRegistry + ).bind(this), + logger: cNodeEndpointToSpIdMapQueueLogger, removeOnComplete: QUEUE_HISTORY.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP, removeOnFail: QUEUE_HISTORY.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP, - lockDuration: MAX_QUEUE_RUNTIMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP, prometheusRegistry, limiter: { max: 1, duration: config.get('fetchCNodeEndpointToSpIdMapIntervalMs') + }, + onFailCallback: (job, error, prev) => { + cNodeEndpointToSpIdMapQueueLogger.error( + `Queue Job Failed - ID ${job?.id} - Error ${error}` + ) + cNodeEndpointToSpIdMapQueue.add('retry-after-fail', {}) } }) - await this.startEndpointToSpIdMapQueue( - cNodeEndpointToSpIdMapQueue, - prometheusRegistry - ) // Create queue to slice through batches of users and gather data to be passed to find-sync and find-replica-set-update jobs - const monitorStateQueue = makeQueue({ + const { queue: monitorStateQueue } = makeQueue({ name: QUEUE_NAMES.MONITOR_STATE, + processor: this.makeProcessJob( + monitorStateJobProcessor, + monitorStateLogger, + prometheusRegistry + ).bind(this), + logger: monitorStateLogger, removeOnComplete: QUEUE_HISTORY.MONITOR_STATE, removeOnFail: QUEUE_HISTORY.MONITOR_STATE, - lockDuration: MAX_QUEUE_RUNTIMES.MONITOR_STATE, prometheusRegistry, limiter: { // Bull doesn't allow either of these to be set to 0, so we'll pause the queue later if the jobs per interval is 0 max: config.get('stateMonitoringQueueRateLimitJobsPerInterval') || 1, duration: config.get('stateMonitoringQueueRateLimitInterval') || 1 + }, + onFailCallback: (job, error, prev) => { + const logger = createChildLogger(monitorStateLogger, { + jobId: job?.id || 'unknown' + }) + logger.error(`Job failed to complete. ID=${job?.id}. Error=${error}`) + this.enqueueMonitorStateJobAfterFailure(monitorStateQueue, job) } }) // Create queue to find sync requests - const findSyncRequestsQueue = makeQueue({ + const { queue: findSyncRequestsQueue } = makeQueue({ name: QUEUE_NAMES.FIND_SYNC_REQUESTS, + processor: this.makeProcessJob( + findSyncRequestsJobProcessor, + findSyncRequestsLogger, + prometheusRegistry + ).bind(this), + logger: findSyncRequestsLogger, removeOnComplete: QUEUE_HISTORY.FIND_SYNC_REQUESTS, removeOnFail: QUEUE_HISTORY.FIND_SYNC_REQUESTS, - lockDuration: MAX_QUEUE_RUNTIMES.FIND_SYNC_REQUESTS, prometheusRegistry }) // Create queue to find replica set updates - const findReplicaSetUpdatesQueue = makeQueue({ + const { queue: findReplicaSetUpdatesQueue } = makeQueue({ name: QUEUE_NAMES.FIND_REPLICA_SET_UPDATES, + processor: this.makeProcessJob( + findReplicaSetUpdatesJobProcessor, + findReplicaSetUpdatesLogger, + prometheusRegistry + ).bind(this), + logger: findReplicaSetUpdatesLogger, removeOnComplete: QUEUE_HISTORY.FIND_REPLICA_SET_UPDATES, removeOnFail: QUEUE_HISTORY.FIND_REPLICA_SET_UPDATES, - lockDuration: MAX_QUEUE_RUNTIMES.FIND_REPLICA_SET_UPDATES, prometheusRegistry }) - this.registerMonitoringQueueEventHandlersAndJobProcessors({ - monitorStateQueue, - findSyncRequestsQueue, - findReplicaSetUpdatesQueue, - cNodeEndpointToSpIdMapQueue, - monitorStateJobFailureCallback: this.enqueueMonitorStateJobAfterFailure, - processMonitorStateJob: - this.makeProcessMonitorStateJob(prometheusRegistry).bind(this), - processFindSyncRequestsJob: - this.makeProcessFindSyncRequestsJob(prometheusRegistry).bind(this), - processFindReplicaSetUpdatesJob: - this.makeProcessFindReplicaSetUpdatesJob(prometheusRegistry).bind(this) - }) - // Clear any old state if redis was running but the rest of the server restarted - await monitorStateQueue.obliterate({ force: true }) - await findSyncRequestsQueue.obliterate({ force: true }) - await findReplicaSetUpdatesQueue.obliterate({ force: true }) + if (clusterUtils.isThisWorkerInit()) { + await cNodeEndpointToSpIdMapQueue.obliterate({ force: true }) + await monitorStateQueue.obliterate({ force: true }) + await findSyncRequestsQueue.obliterate({ force: true }) + await findReplicaSetUpdatesQueue.obliterate({ force: true }) + } - // Enqueue first monitor-state job + // Start recurring queues that need an initial job to get started + await this.startEndpointToSpIdMapQueue(cNodeEndpointToSpIdMapQueue) await this.startMonitorStateQueue(monitorStateQueue, discoveryNodeEndpoint) return { @@ -117,96 +135,6 @@ class StateMonitoringManager { } } - /** - * Registers event handlers for logging and job success/failure. - * @param {Object} params - * @param {Object} params.monitoringStateQueue the monitor-state queue to register events for - * @param {Object} params.findSyncRequestsQueue the find-sync-requests queue to register events for - * @param {Object} params.findReplicaSetUpdatesQueue the find-replica-set-updates queue to register events for - * @param {Object} params.cNodeEndpointToSpIdMapQueue the queue that fetches the cNodeEndpoint->spId map - * @param {Function} params.monitorStateJobFailureCallback the function to call when a monitorState job fails - * @param {Function} params.processMonitorStateJob the function to call when processing a job from the queue to monitor state - * @param {Function} params.processFindSyncRequestsJob the function to call when processing a job from the queue to find sync requests that potentially need to be issued - * @param {Function} params.processFindReplicaSetUpdatesJob the function to call when processing a job from the queue to find users' replica sets that are unhealthy and need to be updated - */ - registerMonitoringQueueEventHandlersAndJobProcessors({ - monitorStateQueue, - findSyncRequestsQueue, - findReplicaSetUpdatesQueue, - cNodeEndpointToSpIdMapQueue, - monitorStateJobFailureCallback, - processMonitorStateJob, - processFindSyncRequestsJob, - processFindReplicaSetUpdatesJob - }) { - // Add handlers for logging - registerQueueEvents(monitorStateQueue, monitorStateLogger) - registerQueueEvents(findSyncRequestsQueue, findSyncRequestsLogger) - registerQueueEvents(findReplicaSetUpdatesQueue, findReplicaSetUpdatesLogger) - registerQueueEvents( - cNodeEndpointToSpIdMapQueue, - cNodeEndpointToSpIdMapQueueLogger - ) - - // Log when a job fails to complete and re-enqueue another monitoring job - monitorStateQueue.on('failed', (job, err) => { - const logger = createChildLogger(monitorStateLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - monitorStateJobFailureCallback(monitorStateQueue, job) - }) - findSyncRequestsQueue.on('failed', (job, err) => { - const logger = createChildLogger(findSyncRequestsLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - findReplicaSetUpdatesQueue.on('failed', (job, err) => { - const logger = createChildLogger(findReplicaSetUpdatesLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - cNodeEndpointToSpIdMapQueue.on('failed', (job, err) => { - const logger = createChildLogger(cNodeEndpointToSpIdMapQueueLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - - // Register the logic that gets executed to process each new job from the queues - monitorStateQueue.process(1 /** concurrency */, processMonitorStateJob) - findSyncRequestsQueue.process( - 1 /** concurrency */, - processFindSyncRequestsJob - ) - findReplicaSetUpdatesQueue.process( - 1 /** concurrency */, - processFindReplicaSetUpdatesJob - ) - } - - /** - * Adds handlers for when a job fails to complete (or completes with an error) or successfully completes. - * Handlers enqueue another job to fetch the cNodeEndpoint->spId map again. - * @param {BullQueue} queue the cNodeToEndpointSpIdMap queue - */ - makeCNodeToEndpointSpIdMapReEnqueueItself(queue) { - queue.on('completed', (job, result) => { - cNodeEndpointToSpIdMapQueueLogger.info( - `Queue Job Completed - ID ${job?.id} - Result ${JSON.stringify(result)}` - ) - queue.add({}) - }) - queue.on('failed', (job, err) => { - cNodeEndpointToSpIdMapQueueLogger.error( - `Queue Job Failed - ID ${job?.id} - Error ${err}` - ) - queue.add({}) - }) - } - /** * Enqueues a job that picks up where the previous failed job left off. * @param monitoringQueue the queue to re-add the job to @@ -217,7 +145,7 @@ class StateMonitoringManager { data: { lastProcessedUserId, discoveryNodeEndpoint } } = failedJob - monitoringQueue.add({ + monitoringQueue.add('retry-after-fail', { lastProcessedUserId, discoveryNodeEndpoint }) @@ -245,33 +173,25 @@ class StateMonitoringManager { const lastProcessedUserId = _.random(0, latestUserId) // Enqueue first monitorState job after a delay. This job requeues itself upon completion or failure - await queue.add( - /** data */ - { - lastProcessedUserId, - discoveryNodeEndpoint - }, - /** opts */ { delay: STATE_MONITORING_QUEUE_INIT_DELAY_MS } - ) + if (clusterUtils.isThisWorkerInit()) { + await queue.add( + 'first-job', + { + lastProcessedUserId, + discoveryNodeEndpoint + }, + { delay: STATE_MONITORING_QUEUE_INIT_DELAY_MS } + ) + } } /** - * Clears the cNodeEndpoint->spId map queue and adds an initial job. + * Adds an initial job to the cNodeEndpoint->spId map queue. * Future jobs are added to the queue as a result of this initial job succeeding/failing. * @param {Object} queue the cNodeEndpoint->spId map queue to consume jobs from * @param {Object} prometheusRegistry the registry of metrics from src/services/prometheusMonitoring/prometheusRegistry.js */ - async startEndpointToSpIdMapQueue(queue, prometheusRegistry) { - // Clear any old state if redis was running but the rest of the server restarted - await queue.obliterate({ force: true }) - - queue.process( - 1 /** concurrency */, - this.makeProcessFetchCNodeEndpointToSpIdMapJob(prometheusRegistry).bind( - this - ) - ) - + async startEndpointToSpIdMapQueue(queue) { // Since we can't pass 0 to Bull's limiter.max, enforce a rate limit of 0 by // pausing the queue and not enqueuing the first job if (config.get('stateMonitoringQueueRateLimitJobsPerInterval') === 0) { @@ -280,52 +200,13 @@ class StateMonitoringManager { } // Enqueue first job, which requeues itself upon completion or failure - await queue.add({}) - this.makeCNodeToEndpointSpIdMapReEnqueueItself(queue) - } - - /* - * Job processor boilerplate - */ - - makeProcessMonitorStateJob(prometheusRegistry) { - return async (job) => - processJob( - job, - monitorStateJobProcessor, - monitorStateLogger, - prometheusRegistry - ) - } - - makeProcessFindSyncRequestsJob(prometheusRegistry) { - return async (job) => - processJob( - job, - findSyncRequestsJobProcessor, - findSyncRequestsLogger, - prometheusRegistry - ) - } - - makeProcessFindReplicaSetUpdatesJob(prometheusRegistry) { - return async (job) => - processJob( - job, - findReplicaSetUpdatesJobProcessor, - findReplicaSetUpdatesLogger, - prometheusRegistry - ) + if (clusterUtils.isThisWorkerInit()) { + await queue.add('first-job', {}) + } } - makeProcessFetchCNodeEndpointToSpIdMapJob(prometheusRegistry) { - return async (job) => - processJob( - job, - fetchCNodeEndpointToSpIdMapJobProcessor, - cNodeEndpointToSpIdMapQueueLogger, - prometheusRegistry - ) + makeProcessJob(processor, logger, prometheusRegistry) { + return async (job) => processJob(job, processor, logger, prometheusRegistry) } } diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/SyncRequestDeDuplicator.js b/creator-node/src/services/stateMachineManager/stateReconciliation/SyncRequestDeDuplicator.js index b0ba45c01f6..bb9ba3e2c8e 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/SyncRequestDeDuplicator.js +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/SyncRequestDeDuplicator.js @@ -1,3 +1,7 @@ +const _ = require('lodash') + +const redisClient = require('../../../redis') + /** * Ensure a sync request for (syncType, userWallet, secondaryEndpoint) can only be enqueued once * This is used to ensure multiple concurrent sync tasks are not being redundantly used on a single user @@ -7,17 +11,56 @@ * jobs by property and would require a linear iteration over the full job list */ class SyncRequestDeDuplicator { - constructor() { - this.waitingSyncsByUserWalletMap = {} - } - /** Stringify properties to enable storage with a flat map */ _getSyncKey(syncType, userWallet, secondaryEndpoint, immediate = false) { return `${syncType}::${userWallet}::${secondaryEndpoint}::${immediate}` } + /** + * Returns array of all keys in Redis matching pattern, using redis SCAN + * https://github.com/luin/ioredis#streamify-scanning + * + * @returns array | Error + */ + async _getAllKeys() { + const stream = redisClient.scanStream({ + match: this._getPatternForAllKeys() + }) + + const keySet = new Set() + return new Promise((resolve, reject) => { + stream.on('data', (keys = []) => { + keys.forEach((key) => { + keySet.add(key) + }) + }) + stream.on('end', () => { + resolve(Array.from(keySet).filter(Boolean)) + }) + stream.on('error', (e) => { + reject(e) + }) + }) + } + + /** + * Builds redis key pattern given params, using today as the default date + * and wildcard matcher for every other default param. + * Key pattern string can map to one or multiple keys. + */ + _getPatternForAllKeys() { + return `*::*::*::*` + } + + async clear() { + const keys = await this._getAllKeys() + for (const key of keys) { + await redisClient.del(key) + } + } + /** Return job info of sync with given properties if present else null */ - getDuplicateSyncJobInfo( + async getDuplicateSyncJobInfo( syncType, userWallet, secondaryEndpoint, @@ -30,12 +73,15 @@ class SyncRequestDeDuplicator { immediate ) - const duplicateSyncJobInfo = this.waitingSyncsByUserWalletMap[syncKey] - return duplicateSyncJobInfo || null + const duplicateSyncJobInfo = JSON.parse( + (await redisClient.get(syncKey)) || '{}' + ) + if (_.isEmpty(duplicateSyncJobInfo)) return null + return duplicateSyncJobInfo } /** Record job info for sync with given properties */ - recordSync( + async recordSync( syncType, userWallet, secondaryEndpoint, @@ -49,11 +95,11 @@ class SyncRequestDeDuplicator { immediate ) - this.waitingSyncsByUserWalletMap[syncKey] = jobProps + await redisClient.set(syncKey, JSON.stringify(jobProps)) } /** Remove sync with given properties */ - removeSync(syncType, userWallet, secondaryEndpoint, immediate = false) { + async removeSync(syncType, userWallet, secondaryEndpoint, immediate = false) { const syncKey = this._getSyncKey( syncType, userWallet, @@ -61,7 +107,7 @@ class SyncRequestDeDuplicator { immediate ) - delete this.waitingSyncsByUserWalletMap[syncKey] + await redisClient.del(syncKey) } } diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/index.js b/creator-node/src/services/stateMachineManager/stateReconciliation/index.js index ce013eb1575..3674004e449 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/index.js +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/index.js @@ -1,12 +1,9 @@ const config = require('../../../config') -const { - QUEUE_HISTORY, - QUEUE_NAMES, - MAX_QUEUE_RUNTIMES -} = require('../stateMachineConstants') -const { makeQueue, registerQueueEvents } = require('../stateMachineUtils') +const { QUEUE_HISTORY, QUEUE_NAMES } = require('../stateMachineConstants') +const { makeQueue } = require('../stateMachineUtils') const processJob = require('../processJob') const { logger: baseLogger, createChildLogger } = require('../../../logging') +const { clusterUtils } = require('../../../utils') const handleSyncRequestJobProcessor = require('./issueSyncRequest.jobProcessor') const updateReplicaSetJobProcessor = require('./updateReplicaSet.jobProcessor') const { @@ -34,49 +31,87 @@ const recoverOrphanedDataLogger = createChildLogger(baseLogger, { */ class StateReconciliationManager { async init(discoveryNodeEndpoint, prometheusRegistry) { - const manualSyncQueue = makeQueue({ + const { queue: manualSyncQueue } = makeQueue({ name: QUEUE_NAMES.MANUAL_SYNC, + processor: this.makeProcessJob( + handleSyncRequestJobProcessor, + manualSyncLogger, + prometheusRegistry + ).bind(this), + logger: manualSyncLogger, + globalConcurrency: config.get('maxManualRequestSyncJobConcurrency'), removeOnComplete: QUEUE_HISTORY.MANUAL_SYNC, removeOnFail: QUEUE_HISTORY.MANUAL_SYNC, - lockDuration: MAX_QUEUE_RUNTIMES.MANUAL_SYNC, prometheusRegistry }) - const recurringSyncQueue = makeQueue({ + const { queue: recurringSyncQueue } = makeQueue({ name: QUEUE_NAMES.RECURRING_SYNC, + processor: this.makeProcessJob( + handleSyncRequestJobProcessor, + recurringSyncLogger, + prometheusRegistry + ).bind(this), + logger: recurringSyncLogger, + globalConcurrency: config.get('maxRecurringRequestSyncJobConcurrency'), removeOnComplete: QUEUE_HISTORY.RECURRING_SYNC, removeOnFail: QUEUE_HISTORY.RECURRING_SYNC, - lockDuration: MAX_QUEUE_RUNTIMES.RECURRING_SYNC, prometheusRegistry }) - const updateReplicaSetQueue = makeQueue({ + const { queue: updateReplicaSetQueue } = makeQueue({ name: QUEUE_NAMES.UPDATE_REPLICA_SET, + processor: this.makeProcessJob( + updateReplicaSetJobProcessor, + updateReplicaSetLogger, + prometheusRegistry + ).bind(this), + logger: updateReplicaSetLogger, + globalConcurrency: config.get('maxUpdateReplicaSetJobConcurrency'), removeOnComplete: QUEUE_HISTORY.UPDATE_REPLICA_SET, removeOnFail: QUEUE_HISTORY.UPDATE_REPLICA_SET, - lockDuration: MAX_QUEUE_RUNTIMES.UPDATE_REPLICA_SET, prometheusRegistry }) - const recoverOrphanedDataQueue = makeQueue({ + const { queue: recoverOrphanedDataQueue } = makeQueue({ name: QUEUE_NAMES.RECOVER_ORPHANED_DATA, + processor: this.makeProcessJob( + recoverOrphanedDataJobProcessor, + recoverOrphanedDataLogger, + prometheusRegistry + ).bind(this), + logger: recoverOrphanedDataLogger, removeOnComplete: QUEUE_HISTORY.RECOVER_ORPHANED_DATA, removeOnFail: QUEUE_HISTORY.RECOVER_ORPHANED_DATA, - lockDuration: MAX_QUEUE_RUNTIMES.RECOVER_ORPHANED_DATA, prometheusRegistry, limiter: { // Bull doesn't allow either of these to be set to 0, so we'll pause the queue later if the jobs per interval is 0 max: config.get('recoverOrphanedDataQueueRateLimitJobsPerInterval') || 1, duration: config.get('recoverOrphanedDataQueueRateLimitInterval') || 1 + }, + onFailCallback: (job, error, prev) => { + const logger = createChildLogger(recoverOrphanedDataLogger, { + jobId: job?.id || 'unknown' + }) + logger.error(`Job failed to complete. ID=${job?.id}. Error=${error}`) + // This is a recurring job that re-enqueues itself on success, so we want to also re-enqueue on failure + const { + data: { discoveryNodeEndpoint } + } = job + recoverOrphanedDataQueue.add('retry-after-fail', { + discoveryNodeEndpoint + }) } }) // Clear any old state if redis was running but the rest of the server restarted - await manualSyncQueue.obliterate({ force: true }) - await recurringSyncQueue.obliterate({ force: true }) - await updateReplicaSetQueue.obliterate({ force: true }) - await recoverOrphanedDataQueue.obliterate({ force: true }) + if (clusterUtils.isThisWorkerInit()) { + await manualSyncQueue.obliterate({ force: true }) + await recurringSyncQueue.obliterate({ force: true }) + await updateReplicaSetQueue.obliterate({ force: true }) + await recoverOrphanedDataQueue.obliterate({ force: true }) + } // Queue the first recoverOrphanedData job, which will re-enqueue itself await this.startRecoverOrphanedDataQueue( @@ -84,21 +119,6 @@ class StateReconciliationManager { discoveryNodeEndpoint ) - this.registerQueueEventHandlersAndJobProcessors({ - manualSyncQueue, - recurringSyncQueue, - updateReplicaSetQueue, - recoverOrphanedDataQueue, - processManualSync: - this.makeProcessManualSyncJob(prometheusRegistry).bind(this), - processRecurringSync: - this.makeProcessRecurringSyncJob(prometheusRegistry).bind(this), - processUpdateReplicaSet: - this.makeProcessUpdateReplicaSetJob(prometheusRegistry).bind(this), - recoverOrphanedData: - this.makeRecoverOrphanedDataJob(prometheusRegistry).bind(this) - }) - return { manualSyncQueue, recurringSyncQueue, @@ -107,81 +127,6 @@ class StateReconciliationManager { } } - /** - * Registers event handlers for logging and job success/failure. - * @param {Object} params.queue the queue to register events for - * @param {Object} params.manualSyncQueue the manual sync queue - * @param {Object} params.recurringSyncQueue the recurring sync queue - * @param {Object} params.updateReplicaSetQueue the updateReplicaSetQueue queue - * @param {Object} params.recoverOrphanedDataQueue the recoverOrphanedDataQueue queue - * @param {Function} params.processManualSync the function to call when processing a manual sync job from the queue - * @param {Function} params.processRecurringSync the function to call when processing a recurring sync job from the queue - * @param {Function} params.processUpdateReplicaSet the function to call when processing an update-replica-set job from the queue - * @param {Function} params.recoverOrphanedData the function to call when processing a recover-orphaned-data job from the queue - */ - registerQueueEventHandlersAndJobProcessors({ - manualSyncQueue, - recurringSyncQueue, - updateReplicaSetQueue, - recoverOrphanedDataQueue, - processManualSync, - processRecurringSync, - processUpdateReplicaSet, - recoverOrphanedData - }) { - // Add handlers for logging - registerQueueEvents(manualSyncQueue, manualSyncLogger) - registerQueueEvents(recurringSyncQueue, recurringSyncLogger) - registerQueueEvents(updateReplicaSetQueue, updateReplicaSetLogger) - registerQueueEvents(recoverOrphanedDataQueue, recoverOrphanedDataLogger) - - // Log when a job fails to complete - manualSyncQueue.on('failed', (job, err) => { - const logger = createChildLogger(manualSyncLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - recurringSyncQueue.on('failed', (job, err) => { - const logger = createChildLogger(recurringSyncLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - updateReplicaSetQueue.on('failed', (job, err) => { - const logger = createChildLogger(updateReplicaSetLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - }) - recoverOrphanedDataQueue.on('failed', (job, err) => { - const logger = createChildLogger(recoverOrphanedDataLogger, { - jobId: job?.id || 'unknown' - }) - logger.error(`Job failed to complete. ID=${job?.id}. Error=${err}`) - // This is a recurring job that re-enqueues itself on success, so we want to also re-enqueue on failure - const { - data: { discoveryNodeEndpoint } - } = job - recoverOrphanedDataQueue.add({ discoveryNodeEndpoint }) - }) - - // Register the logic that gets executed to process each new job from the queues - manualSyncQueue.process( - config.get('maxManualRequestSyncJobConcurrency'), - processManualSync - ) - recurringSyncQueue.process( - config.get('maxRecurringRequestSyncJobConcurrency'), - processRecurringSync - ) - updateReplicaSetQueue.process( - config.get('maxUpdateReplicaSetJobConcurrency'), - processUpdateReplicaSet - ) - recoverOrphanedDataQueue.process(1 /** concurrency */, recoverOrphanedData) - } - /** * Adds a job that will find+reconcile data on nodes outside of a user's replica set. * Future jobs are added to the queue as a result of this initial job succeeding or failing to complete. @@ -197,51 +142,13 @@ class StateReconciliationManager { } // Enqueue first recoverOrphanedData job after a delay. This job requeues itself upon completion or failure - await queue.add({ discoveryNodeEndpoint }) - } - - /* - * Job processor boilerplate - */ - - makeProcessManualSyncJob(prometheusRegistry) { - return async (job) => - processJob( - job, - handleSyncRequestJobProcessor, - manualSyncLogger, - prometheusRegistry - ) - } - - makeProcessRecurringSyncJob(prometheusRegistry) { - return async (job) => - processJob( - job, - handleSyncRequestJobProcessor, - recurringSyncLogger, - prometheusRegistry - ) - } - - makeProcessUpdateReplicaSetJob(prometheusRegistry) { - return async (job) => - processJob( - job, - updateReplicaSetJobProcessor, - updateReplicaSetLogger, - prometheusRegistry - ) + if (clusterUtils.isThisWorkerInit()) { + await queue.add('first-job', { discoveryNodeEndpoint }) + } } - makeRecoverOrphanedDataJob(prometheusRegistry) { - return async (job) => - processJob( - job, - recoverOrphanedDataJobProcessor, - recoverOrphanedDataLogger, - prometheusRegistry - ) + makeProcessJob(processor, logger, prometheusRegistry) { + return async (job) => processJob(job, processor, logger, prometheusRegistry) } } diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.ts b/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.ts index 85ffad794c9..afdb0af36b7 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.ts +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.ts @@ -198,7 +198,7 @@ async function _handleIssueSyncRequest({ * It is ok for two identical syncs to be present in Active and Waiting, just not two in Waiting. */ // eslint-disable-next-line node/no-sync - SyncRequestDeDuplicator.removeSync( + await SyncRequestDeDuplicator.removeSync( syncType, userWallet, secondaryEndpoint, diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/stateReconciliationUtils.js b/creator-node/src/services/stateMachineManager/stateReconciliation/stateReconciliationUtils.js index 02991a2d711..9d4c9a79310 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/stateReconciliationUtils.js +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/stateReconciliationUtils.js @@ -22,7 +22,7 @@ const HEALTHY_NODES_CACHE_KEY = 'stateMachineHealthyContentNodes' * syncReqToEnqueue * } */ -const getNewOrExistingSyncReq = ({ +const getNewOrExistingSyncReq = async ({ userWallet, primaryEndpoint, secondaryEndpoint, @@ -45,12 +45,13 @@ const getNewOrExistingSyncReq = ({ * If duplicate sync already exists, do not add and instead return existing sync job info * Ignore syncMode when checking for duplicates, since it doesn't matter */ - const duplicateSyncJobInfo = SyncRequestDeDuplicator.getDuplicateSyncJobInfo( - syncType, - userWallet, - secondaryEndpoint, - immediate - ) + const duplicateSyncJobInfo = + await SyncRequestDeDuplicator.getDuplicateSyncJobInfo( + syncType, + userWallet, + secondaryEndpoint, + immediate + ) if (duplicateSyncJobInfo) { logger.info( `getNewOrExistingSyncReq() Failure - a sync of type ${syncType} is already waiting for user wallet ${userWallet} against secondary ${secondaryEndpoint}` @@ -88,7 +89,7 @@ const getNewOrExistingSyncReq = ({ } // eslint-disable-next-line node/no-sync - SyncRequestDeDuplicator.recordSync( + await SyncRequestDeDuplicator.recordSync( syncType, userWallet, secondaryEndpoint, @@ -113,7 +114,7 @@ const _issueSyncRequestsUntilSynced = async ( queue ) => { // Issue syncRequest before polling secondary for replication - const { duplicateSyncReq, syncReqToEnqueue } = getNewOrExistingSyncReq({ + const { duplicateSyncReq, syncReqToEnqueue } = await getNewOrExistingSyncReq({ userWallet: wallet, secondaryEndpoint: secondaryUrl, primaryEndpoint: primaryUrl, @@ -126,7 +127,7 @@ const _issueSyncRequestsUntilSynced = async ( logger.warn(`Duplicate sync request: ${JSON.stringify(duplicateSyncReq)}`) return } else if (!_.isEmpty(syncReqToEnqueue)) { - await queue.add({ + await queue.add('manual-sync', { enqueuedBy: 'issueSyncRequestsUntilSynced', ...syncReqToEnqueue }) diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.ts b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.ts index af048631749..09943698fed 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.ts +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.ts @@ -723,7 +723,7 @@ const _issueUpdateReplicaSetOp = async ( // Enqueue a sync from new primary to new secondary1. If there is no diff, then this is a no-op. const { duplicateSyncReq, syncReqToEnqueue: syncToEnqueueToSecondary1 } = - getNewOrExistingSyncReq({ + await getNewOrExistingSyncReq({ userWallet: wallet, primaryEndpoint: newPrimary, secondaryEndpoint: newSecondary1, diff --git a/creator-node/src/services/stateMachineManager/types.ts b/creator-node/src/services/stateMachineManager/types.ts index 97850c7562b..7a74f78f2ae 100644 --- a/creator-node/src/services/stateMachineManager/types.ts +++ b/creator-node/src/services/stateMachineManager/types.ts @@ -1,5 +1,7 @@ import type Logger from 'bunyan' +import type { Queue } from 'bullmq' import type { + FetchCNodeEndpointToSpIdMapJobParams, FindReplicaSetUpdateJobParams, FindReplicaSetUpdatesJobReturnValue, FindSyncRequestsJobParams, @@ -16,8 +18,6 @@ import type { } from './stateReconciliation/types' import type { TQUEUE_NAMES } from './stateMachineConstants' -import { Queue } from 'bull' - export type QueueNameToQueueMap = Record< TQUEUE_NAMES, { @@ -45,12 +45,14 @@ export type AnyJobParams = | FindReplicaSetUpdateJobParams | IssueSyncRequestJobParams | UpdateReplicaSetJobParams + | FetchCNodeEndpointToSpIdMapJobParams export type AnyDecoratedJobParams = | DecoratedJobParams | DecoratedJobParams | DecoratedJobParams | DecoratedJobParams | DecoratedJobParams + | DecoratedJobParams /** * Job return values (outputs) @@ -61,6 +63,7 @@ export type ParamsForJobsToEnqueue = | FindReplicaSetUpdateJobParams | IssueSyncRequestJobParams | UpdateReplicaSetJobParamsWithoutEnabledReconfigModes + | FetchCNodeEndpointToSpIdMapJobParams export type JobsToEnqueue = Partial< Record > diff --git a/creator-node/src/services/sync/skippedCIDsRetryService.js b/creator-node/src/services/sync/skippedCIDsRetryService.js index ce5f7523682..fe1b5d94ffd 100644 --- a/creator-node/src/services/sync/skippedCIDsRetryService.js +++ b/creator-node/src/services/sync/skippedCIDsRetryService.js @@ -1,8 +1,9 @@ -const Bull = require('bull') +const { Queue, Worker } = require('bullmq') const models = require('../../models') const { logger } = require('../../logging') const utils = require('../../utils') +const { clusterUtils } = require('../../utils') const { saveFileForMultihashToFS } = require('../../fileManager') const LogPrefix = '[SkippedCIDsRetryQueue]' @@ -17,41 +18,20 @@ class SkippedCIDsRetryQueue { if (!nodeConfig || !libs) { throw new Error(`${LogPrefix} Cannot start without nodeConfig, libs`) } - - this.queue = new Bull('skipped-cids-retry-queue', { - redis: { - port: nodeConfig.get('redisPort'), - host: nodeConfig.get('redisHost') - }, + this.nodeConfig = nodeConfig + this.libs = libs + const connection = { + host: nodeConfig.get('redisHost'), + port: nodeConfig.get('redisPort') + } + this.queue = new Queue('skipped-cids-retry-queue', { + connection, defaultJobOptions: { // these required since completed/failed jobs data set can grow infinitely until memory exhaustion removeOnComplete: RETRY_QUEUE_HISTORY, removeOnFail: RETRY_QUEUE_HISTORY } }) - - // Clean up anything that might be still stuck in the queue on restart - this.queue.empty() - - const SkippedCIDsRetryQueueJobIntervalMs = nodeConfig.get( - 'skippedCIDsRetryQueueJobIntervalMs' - ) - const CIDMaxAgeMs = - nodeConfig.get('skippedCIDRetryQueueMaxAgeHr') * 60 * 60 * 1000 // convert from Hr to Ms - - this.queue.process(async (job, done) => { - try { - await this.process(CIDMaxAgeMs, libs) - } catch (e) { - this.logError(`Failed to process job || Error: ${e.message}`) - } - - // Re-enqueue job after some interval - await utils.timeout(SkippedCIDsRetryQueueJobIntervalMs, false) - await this.queue.add({ startTime: Date.now() }) - - done() - }) } logInfo(msg) { @@ -62,10 +42,42 @@ class SkippedCIDsRetryQueue { logger.error(`${LogPrefix} ${msg}`) } - // Add first job to queue async init() { try { - await this.queue.add({ startTime: Date.now() }) + const connection = { + host: this.nodeConfig.get('redisHost'), + port: this.nodeConfig.get('redisPort') + } + + // Clean up anything that might be still stuck in the queue on restart + if (clusterUtils.isThisWorkerInit()) { + await this.queue.drain(true) + } + + const SkippedCIDsRetryQueueJobIntervalMs = this.nodeConfig.get( + 'skippedCIDsRetryQueueJobIntervalMs' + ) + const CIDMaxAgeMs = + this.nodeConfig.get('skippedCIDRetryQueueMaxAgeHr') * 60 * 60 * 1000 // convert from Hr to Ms + + const worker = new Worker( + 'skipped-cids-retry-queue', + async (job) => { + try { + await this.process(CIDMaxAgeMs, this.libs) + } catch (e) { + this.logError(`Failed to process job || Error: ${e.message}`) + } + + // Re-enqueue job after some interval + await utils.timeout(SkippedCIDsRetryQueueJobIntervalMs, false) + await this.queue.add('skipped-cids-retry', { startTime: Date.now() }) + }, + { connection } + ) + + // Add first job to queue + await this.queue.add('skipped-cids-retry', { startTime: Date.now() }) this.logInfo(`Successfully initialized and enqueued initial job.`) } catch (e) { this.logError(`Failed to start`) diff --git a/creator-node/src/services/sync/syncImmediateQueue.js b/creator-node/src/services/sync/syncImmediateQueue.js index ac294b0d7bd..975947831c1 100644 --- a/creator-node/src/services/sync/syncImmediateQueue.js +++ b/creator-node/src/services/sync/syncImmediateQueue.js @@ -1,6 +1,7 @@ -const Bull = require('bull') -const { instrumentTracing, tracing } = require('../../tracer') +const { Queue, QueueEvents, Worker } = require('bullmq') +const { clusterUtils } = require('../../utils') +const { instrumentTracing, tracing } = require('../../tracer') const { logger, logInfoWithDuration, @@ -10,7 +11,6 @@ const { const secondarySyncFromPrimary = require('./secondarySyncFromPrimary') const SYNC_QUEUE_HISTORY = 500 -const LOCK_DURATION = 1000 * 60 * 5 // 5 minutes /** * SyncImmediateQueue - handles enqueuing and processing of immediate manual Sync jobs on secondary @@ -29,56 +29,61 @@ class SyncImmediateQueue { this.redis = redis this.serviceRegistry = serviceRegistry - this.queue = new Bull('sync-immediate-processing-queue', { - redis: { - host: this.nodeConfig.get('redisHost'), - port: this.nodeConfig.get('redisPort') - }, + const connection = { + host: nodeConfig.get('redisHost'), + port: nodeConfig.get('redisPort') + } + this.queue = new Queue('sync-immediate-processing-queue', { + connection, defaultJobOptions: { removeOnComplete: SYNC_QUEUE_HISTORY, removeOnFail: SYNC_QUEUE_HISTORY - }, - settings: { - lockDuration: LOCK_DURATION, - // We never want to re-process stalled jobs - maxStalledCount: 0 } }) + this.queueEvents = new QueueEvents('sync-immediate-processing-queue', { + connection + }) - const jobProcessorConcurrency = this.nodeConfig.get( - 'syncQueueMaxConcurrency' - ) - this.queue.process(jobProcessorConcurrency, async (job) => { - // Get the `parentSpanContext` from the job data - // so the job can reference what span enqueued it - const { parentSpanContext } = job.data + const worker = new Worker( + 'sync-immediate-processing-queue', + async (job) => { + // Get the `parentSpanContext` from the job data + // so the job can reference what span enqueued it + const { parentSpanContext } = job.data - const untracedProcessTask = this.processTask - const processTask = instrumentTracing({ - name: 'syncImmediateQueue.process', - fn: untracedProcessTask, - options: { - // if a parentSpanContext is provided - // reference it so the async queue job can remember - // who enqueued it - links: parentSpanContext - ? [ - { - context: parentSpanContext - } - ] - : [], - attributes: { - [tracing.CODE_FILEPATH]: __filename + const untracedProcessTask = this.processTask + const processTask = instrumentTracing({ + name: 'syncImmediateQueue.process', + fn: untracedProcessTask, + options: { + // if a parentSpanContext is provided + // reference it so the async queue job can remember + // who enqueued it + links: parentSpanContext + ? [ + { + context: parentSpanContext + } + ] + : [], + attributes: { + [tracing.CODE_FILEPATH]: __filename + } } - } - }) + }) - // `processTask()` on longer has access to `this` after going through the tracing wrapper - // so to mitigate that, we're manually adding `this.serviceRegistry` to the job data - job.data = { ...job.data, serviceRegistry: this.serviceRegistry } - return await processTask(job) - }) + // `processTask()` on longer has access to `this` after going through the tracing wrapper + // so to mitigate that, we're manually adding `this.serviceRegistry` to the job data + job.data = { ...job.data, serviceRegistry: this.serviceRegistry } + return await processTask(job) + }, + { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker( + this.nodeConfig.get('syncQueueMaxConcurrency') + ) + } + ) } async processTask(job) { @@ -123,14 +128,14 @@ class SyncImmediateQueue { logContext, parentSpanContext }) { - const job = await this.queue.add({ + const job = await this.queue.add('process-sync-immediate', { wallet, creatorNodeEndpoint, forceResyncConfig, logContext, parentSpanContext }) - const result = await job.finished() + const result = await job.waitUntilFinished(this.queueEvents) return result } } diff --git a/creator-node/src/services/sync/syncQueue.js b/creator-node/src/services/sync/syncQueue.js index 456d063881a..40c6ed0ee3c 100644 --- a/creator-node/src/services/sync/syncQueue.js +++ b/creator-node/src/services/sync/syncQueue.js @@ -1,6 +1,7 @@ -const Bull = require('bull') -const { instrumentTracing, tracing } = require('../../tracer') +const { Queue, Worker } = require('bullmq') +const { clusterUtils } = require('../../utils') +const { instrumentTracing, tracing } = require('../../tracer') const { logger, logInfoWithDuration, @@ -10,7 +11,6 @@ const { const secondarySyncFromPrimary = require('./secondarySyncFromPrimary') const SYNC_QUEUE_HISTORY = 500 -const LOCK_DURATION = 1000 * 60 * 30 // 30 minutes /** * SyncQueue - handles enqueuing and processing of Sync jobs on secondary @@ -27,19 +27,15 @@ class SyncQueue { this.redis = redis this.serviceRegistry = serviceRegistry - this.queue = new Bull('sync-processing-queue', { - redis: { - host: this.nodeConfig.get('redisHost'), - port: this.nodeConfig.get('redisPort') - }, + const connection = { + host: nodeConfig.get('redisHost'), + port: nodeConfig.get('redisPort') + } + this.queue = new Queue('sync-processing-queue', { + connection, defaultJobOptions: { removeOnComplete: SYNC_QUEUE_HISTORY, removeOnFail: SYNC_QUEUE_HISTORY - }, - settings: { - lockDuration: LOCK_DURATION, - // We never want to re-process stalled jobs - maxStalledCount: 0 } }) @@ -51,34 +47,40 @@ class SyncQueue { * * @dev TODO - consider recording failures in redis */ - const jobProcessorConcurrency = this.nodeConfig.get( - 'syncQueueMaxConcurrency' - ) - this.queue.process(jobProcessorConcurrency, async (job) => { - const { parentSpanContext } = job.data - const untracedProcessTask = this.processTask - const processTask = instrumentTracing({ - name: 'syncQueue.process', - fn: untracedProcessTask, - options: { - links: parentSpanContext - ? [ - { - context: parentSpanContext - } - ] - : [], - attributes: { - [tracing.CODE_FILEPATH]: __filename + const worker = new Worker( + 'sync-processing-queue', + async (job) => { + const { parentSpanContext } = job.data + const untracedProcessTask = this.processTask + const processTask = instrumentTracing({ + name: 'syncQueue.process', + fn: untracedProcessTask, + options: { + links: parentSpanContext + ? [ + { + context: parentSpanContext + } + ] + : [], + attributes: { + [tracing.CODE_FILEPATH]: __filename + } } - } - }) + }) - // `processTask()` on longer has access to `this` after going through the tracing wrapper - // so to mitigate that, we're manually adding `this.serviceRegistry` to the job data - job.data = { ...job.data, serviceRegistry: this.serviceRegistry } - return await processTask(job) - }) + // `processTask()` on longer has access to `this` after going through the tracing wrapper + // so to mitigate that, we're manually adding `this.serviceRegistry` to the job data + job.data = { ...job.data, serviceRegistry: this.serviceRegistry } + return await processTask(job) + }, + { + connection, + concurrency: clusterUtils.getConcurrencyPerWorker( + this.nodeConfig.get('syncQueueMaxConcurrency') + ) + } + ) } async processTask(job) { @@ -129,7 +131,7 @@ class SyncQueue { logContext, parentSpanContext }) { - const job = await this.queue.add({ + const job = await this.queue.add('process-sync', { wallet, creatorNodeEndpoint, blockNumber, diff --git a/creator-node/src/snapbackSM/snapbackSM.js b/creator-node/src/snapbackSM/snapbackSM.js index 18a1598a4ed..88ce4d63362 100644 --- a/creator-node/src/snapbackSM/snapbackSM.js +++ b/creator-node/src/snapbackSM/snapbackSM.js @@ -1,4 +1,3 @@ -const Bull = require('bull') const axios = require('axios') const _ = require('lodash') const retry = require('async-retry') @@ -339,21 +338,7 @@ class SnapbackSM { } // Initialize bull queue instance with provided name and settings - createBullQueue(queueName, settings = {}, limiter = null) { - return new Bull(queueName, { - redis: { - port: this.nodeConfig.get('redisPort'), - host: this.nodeConfig.get('redisHost') - }, - defaultJobOptions: { - // removeOnComplete is required since the completed jobs data set will grow infinitely until memory exhaustion - removeOnComplete: SNAPBACK_QUEUE_HISTORY, - removeOnFail: SNAPBACK_QUEUE_HISTORY - }, - settings, - limiter - }) - } + createBullQueue(queueName, settings = {}, limiter = null) {} // Randomly select an initial slice randomStartingSlice() { diff --git a/creator-node/src/utils/clusterUtils.ts b/creator-node/src/utils/clusterUtils.ts new file mode 100644 index 00000000000..6dd5c6ac858 --- /dev/null +++ b/creator-node/src/utils/clusterUtils.ts @@ -0,0 +1,62 @@ +import type { Cluster } from 'cluster' +import type { CpuInfo } from 'os' +const cluster: Cluster = require('cluster') +const { cpus }: { cpus: () => CpuInfo[] } = require('os') + +const config = require('../config') + +/** + * Some tasks are only done on one worker, which is determined by this util. Those tasks are: + * - listen for state machine jobs to complete and run onComplete callbacks + * - regularly add jobs to the session expiration queue on an interval + */ +class ClusterUtils { + private _specialWorkerId = 1 + get specialWorkerId(): number { + return this._specialWorkerId + } + + set specialWorkerId(specialWorkerId: number) { + this._specialWorkerId = specialWorkerId + } + + /** + * Returns true if this current worker process is the first worker, which performs + * some special initialization logic that other workers don't need to duplicate. + */ + isThisWorkerInit() { + return cluster.worker?.id === 1 + } + + isThisWorkerSpecial() { + return cluster.worker?.id === this._specialWorkerId + } + + getNumWorkers() { + // This is called `cpus()` but it actually returns the # of logical cores, which is possibly higher than # of physical cores if there's hyperthreading + const logicalCores = cpus().length + return config.get('expressAppConcurrency') || logicalCores + } + + getConcurrencyForEnvVar(envVar: string) { + const globalConcurrency = config.get(envVar) + return this.getConcurrencyPerWorker(globalConcurrency) + } + + /** + * Calculates the concurrency that each worker should have to achieve the given global concurrency. + * Note that a global concurrency of 1 is not possible with multiple workers, as per the docs: + * https://docs.bullmq.io/guide/workers/concurrency + * This means that if the global concurrency given is set to 1, it will have to be 1 per worker not 1 globally. + * @param globalConcurrency the global concurrency to achieve by splitting concurrency across workers + * @returns concurrency that each worker process on this machine needs to achieve the desired global concurrency + */ + getConcurrencyPerWorker(globalConcurrency: number) { + const numWorkers = this.getNumWorkers() + const concurrencyPerWorker = Math.floor(globalConcurrency / numWorkers) + return concurrencyPerWorker || 1 + } +} + +const clusterUtils = new ClusterUtils() +export { clusterUtils } diff --git a/creator-node/src/utils/index.ts b/creator-node/src/utils/index.ts index a9c8b066e7b..ec1820a5ab6 100644 --- a/creator-node/src/utils/index.ts +++ b/creator-node/src/utils/index.ts @@ -29,6 +29,7 @@ import { currentNodeShouldHandleTranscode, getAllRegisteredCNodes } from './contentNodeUtils' +import { clusterUtils } from './clusterUtils' export type { ReplicaSet } from './strToReplicaSet' export { @@ -47,7 +48,8 @@ export { validateAssociatedWallets, validateMetadata, strToReplicaSet, - stringifyMap + stringifyMap, + clusterUtils } module.exports = { @@ -68,5 +70,6 @@ module.exports = { strToReplicaSet, stringifyMap, verifyCIDMatchesExpected, - EMPTY_FILE_CID + EMPTY_FILE_CID, + clusterUtils } diff --git a/creator-node/test/StateMonitoringManager.test.js b/creator-node/test/StateMonitoringManager.test.js index a1af2a8cc77..78ef406e572 100644 --- a/creator-node/test/StateMonitoringManager.test.js +++ b/creator-node/test/StateMonitoringManager.test.js @@ -4,16 +4,12 @@ const chai = require('chai') const sinon = require('sinon') const { expect } = chai const proxyquire = require('proxyquire') -const BullQueue = require('bull') const { getApp } = require('./lib/app') const { getLibsMock } = require('./lib/libsMock') const config = require('../src/config') const StateMonitoringManager = require('../src/services/stateMachineManager/stateMonitoring') -const { - QUEUE_NAMES -} = require('../src/services/stateMachineManager/stateMachineConstants') chai.use(require('sinon-chai')) chai.use(require('chai-as-promised')) @@ -50,90 +46,6 @@ describe('test StateMonitoringManager initialization, events, and re-enqueuing', return prometheusRegistry } - function getProcessJobMock() { - const loggerStub = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } - const createChildLogger = sandbox.stub().returns(loggerStub) - const processJobMock = proxyquire( - '../src/services/stateMachineManager/processJob.ts', - { - '../../logging': { - createChildLogger - }, - '../../redis': { - set: sandbox.stub() - } - } - ) - return { processJobMock, loggerStub } - } - - it('creates the queue and registers its event handlers', async function () { - // Mock the latest userId, which is used during init as an upper bound - // to start the monitoring queue at a random user - const discoveryNodeEndpoint = 'https://discoveryNodeEndpoint.co' - nock(discoveryNodeEndpoint).get('/latest/user').reply(200, { data: 0 }) - - // Initialize StateMonitoringManager and spy on its registerMonitoringQueueEventHandlersAndJobProcessors function - const stateMonitoringManager = new StateMonitoringManager() - sandbox.spy( - stateMonitoringManager, - 'registerMonitoringQueueEventHandlersAndJobProcessors' - ) - const { - monitorStateQueue, - findSyncRequestsQueue, - findReplicaSetUpdatesQueue, - cNodeEndpointToSpIdMapQueue - } = await stateMonitoringManager.init( - discoveryNodeEndpoint, - getPrometheusRegistry() - ) - - // Verify that the queue was successfully initialized and that its event listeners were registered - expect(monitorStateQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(findSyncRequestsQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(findReplicaSetUpdatesQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(cNodeEndpointToSpIdMapQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect( - stateMonitoringManager.registerMonitoringQueueEventHandlersAndJobProcessors - ).to.have.been.calledOnce - expect( - stateMonitoringManager.registerMonitoringQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('monitorStateQueue') - .that.has.deep.property('name', QUEUE_NAMES.MONITOR_STATE) - expect( - stateMonitoringManager.registerMonitoringQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('findSyncRequestsQueue') - .that.has.deep.property('name', QUEUE_NAMES.FIND_SYNC_REQUESTS) - expect( - stateMonitoringManager.registerMonitoringQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('findReplicaSetUpdatesQueue') - .that.has.deep.property('name', QUEUE_NAMES.FIND_REPLICA_SET_UPDATES) - expect( - stateMonitoringManager.registerMonitoringQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('cNodeEndpointToSpIdMapQueue') - .that.has.deep.property( - 'name', - QUEUE_NAMES.FETCH_C_NODE_ENDPOINT_TO_SP_ID_MAP - ) - }) - it('kicks off an initial job when initting', async function () { // Mock the latest userId, which is used during init as an upper bound // to start the monitoring queue at a random user @@ -144,6 +56,11 @@ describe('test StateMonitoringManager initialization, events, and re-enqueuing', const MockStateMonitoringManager = proxyquire( '../src/services/stateMachineManager/stateMonitoring/index.js', { + '../../../utils': { + clusterUtils: { + isThisWorkerInit: () => true + } + }, '../../../config': config } ) @@ -154,6 +71,7 @@ describe('test StateMonitoringManager initialization, events, and re-enqueuing', discoveryNodeEndpoint, getPrometheusRegistry() ) + await monitorStateQueue.getJobs('delayed') // Verify that the queue has the correct initial job in it return expect(monitorStateQueue.getJobs('delayed')) @@ -192,124 +110,6 @@ describe('test StateMonitoringManager initialization, events, and re-enqueuing', .fulfilled.and.be.empty }) - it('processes monitorState jobs with expected data and returns the expected results', async function () { - // Mock StateMonitoringManager to have monitorState job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const processStateMonitoringJobStub = sandbox - .stub() - .resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateMonitoringManager = proxyquire( - '../src/services/stateMachineManager/stateMonitoring/index.js', - { - './monitorState.jobProcessor': processStateMonitoringJobStub, - '../processJob': processJobMock - } - ) - - // Verify that StateMonitoringManager returns our dummy data - const job = { - id: 9, - data: { - lastProcessedUserId: 2, - discoveryNodeEndpoint: 'http://test_endpoint.co' - } - } - await expect( - new MockStateMonitoringManager().makeProcessMonitorStateJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(processStateMonitoringJobStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - lastProcessedUserId: job.data.lastProcessedUserId, - discoveryNodeEndpoint: job.data.discoveryNodeEndpoint - }) - }) - - it('processes findSyncRequests jobs with expected data and returns the expected results', async function () { - // Mock StateMonitoringManager to have findSyncRequests job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const processFindSyncRequestsJobStub = sandbox - .stub() - .resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateMonitoringManager = proxyquire( - '../src/services/stateMachineManager/stateMonitoring/index.js', - { - './findSyncRequests.jobProcessor': processFindSyncRequestsJobStub, - '../processJob': processJobMock - } - ) - - // Verify that StateMonitoringManager returns our dummy data - const job = { - id: 9, - data: { - users: [], - unhealthyPeers: [], - replicaSetNodesToUserClockStatusesMap: {}, - userSecondarySyncMetricsMap: {} - } - } - await expect( - new MockStateMonitoringManager().makeProcessFindSyncRequestsJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(processFindSyncRequestsJobStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - users: job.data.users, - unhealthyPeers: job.data.unhealthyPeers, - replicaSetNodesToUserClockStatusesMap: - job.data.replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap: job.data.userSecondarySyncMetricsMap - }) - }) - - it('processes findReplicaSetUpdates jobs with expected data and returns the expected results', async function () { - // Mock StateMonitoringManager to have findReplicaSetUpdates job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const processfindReplicaSetUpdatesJobStub = sandbox - .stub() - .resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateMonitoringManager = proxyquire( - '../src/services/stateMachineManager/stateMonitoring/index.js', - { - './findReplicaSetUpdates.jobProcessor': - processfindReplicaSetUpdatesJobStub, - '../processJob': processJobMock - } - ) - - // Verify that StateMonitoringManager returns our dummy data - const job = { - id: 9, - data: { - users: [], - unhealthyPeers: [], - replicaSetNodesToUserClockStatusesMap: {}, - userSecondarySyncMetricsMap: {} - } - } - await expect( - new MockStateMonitoringManager().makeProcessFindReplicaSetUpdatesJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect( - processfindReplicaSetUpdatesJobStub - ).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - users: job.data.users, - unhealthyPeers: job.data.unhealthyPeers, - replicaSetNodesToUserClockStatusesMap: - job.data.replicaSetNodesToUserClockStatusesMap, - userSecondarySyncMetricsMap: job.data.userSecondarySyncMetricsMap - }) - }) - it('re-enqueues a new job with the correct data after a job fails', async function () { // Initialize StateMonitoringManager and stubbed queue.add() const discoveryNodeEndpoint = 'http://test_dn.co' @@ -334,7 +134,7 @@ describe('test StateMonitoringManager initialization, events, and re-enqueuing', ) // Verify that the queue has the correct initial job in it - expect(queueAdd).to.have.been.calledOnceWithExactly({ + expect(queueAdd).to.have.been.calledOnceWithExactly('retry-after-fail', { lastProcessedUserId: prevJobProcessedUserId, discoveryNodeEndpoint: discoveryNodeEndpoint }) diff --git a/creator-node/test/StateReconciliationManager.test.js b/creator-node/test/StateReconciliationManager.test.js deleted file mode 100644 index d80ad2e7cd1..00000000000 --- a/creator-node/test/StateReconciliationManager.test.js +++ /dev/null @@ -1,277 +0,0 @@ -/* eslint-disable no-unused-expressions */ -const nock = require('nock') -const chai = require('chai') -const sinon = require('sinon') -const { expect } = chai -const proxyquire = require('proxyquire') -const BullQueue = require('bull') - -const { getApp } = require('./lib/app') -const { getLibsMock } = require('./lib/libsMock') - -const config = require('../src/config') -const StateReconciliationManager = require('../src/services/stateMachineManager/stateReconciliation') -const { - QUEUE_NAMES, - SyncType -} = require('../src/services/stateMachineManager/stateMachineConstants') - -chai.use(require('sinon-chai')) -chai.use(require('chai-as-promised')) - -describe('test StateReconciliationManager initialization, events, and job processors', function () { - let server, sandbox - beforeEach(async function () { - const appInfo = await getApp(getLibsMock()) - await appInfo.app.get('redisClient').flushdb() - server = appInfo.server - sandbox = sinon.createSandbox() - - nock.disableNetConnect() - }) - - afterEach(async function () { - await server.close() - nock.cleanAll() - nock.enableNetConnect() - sandbox.restore() - }) - - function getPrometheusRegistry() { - const startTimerStub = sandbox.stub().returns(() => {}) - const startQueueMetricsStub = sandbox.stub().returns(() => {}) - const getMetricStub = sandbox.stub().returns({ - startTimer: startTimerStub - }) - const prometheusRegistry = { - getMetric: getMetricStub, - metricNames: {}, - startQueueMetrics: startQueueMetricsStub - } - return prometheusRegistry - } - - function getProcessJobMock() { - const loggerStub = { - info: sandbox.stub(), - warn: sandbox.stub(), - error: sandbox.stub() - } - const createChildLogger = sandbox.stub().returns(loggerStub) - const processJobMock = proxyquire( - '../src/services/stateMachineManager/processJob.ts', - { - '../../logging': { - createChildLogger - }, - '../../redis': { - set: sandbox.stub() - } - } - ) - return { processJobMock, loggerStub } - } - - it('creates the queues and registers their event handlers', async function () { - // Initialize StateReconciliationManager and spy on its registerQueueEventHandlersAndJobProcessors function - const stateReconciliationManager = new StateReconciliationManager() - sandbox.spy( - stateReconciliationManager, - 'registerQueueEventHandlersAndJobProcessors' - ) - const discoveryNodeEndpoint = 'https://dn1.co' - const { - manualSyncQueue, - recurringSyncQueue, - updateReplicaSetQueue, - recoverOrphanedDataQueue - } = await stateReconciliationManager.init( - discoveryNodeEndpoint, - getPrometheusRegistry() - ) - - // Verify that the queues were successfully initialized and that their event listeners were registered - expect(manualSyncQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(recurringSyncQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(updateReplicaSetQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect(recoverOrphanedDataQueue).to.exist.and.to.be.instanceOf(BullQueue) - expect( - stateReconciliationManager.registerQueueEventHandlersAndJobProcessors - ).to.have.been.calledOnce - expect( - stateReconciliationManager.registerQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('manualSyncQueue') - .that.has.deep.property('name', QUEUE_NAMES.MANUAL_SYNC) - expect( - stateReconciliationManager.registerQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('recurringSyncQueue') - .that.has.deep.property('name', QUEUE_NAMES.RECURRING_SYNC) - expect( - stateReconciliationManager.registerQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('updateReplicaSetQueue') - .that.has.deep.property('name', QUEUE_NAMES.UPDATE_REPLICA_SET) - expect( - stateReconciliationManager.registerQueueEventHandlersAndJobProcessors.getCall( - 0 - ).args[0] - ) - .to.have.property('recoverOrphanedDataQueue') - .that.has.deep.property('name', QUEUE_NAMES.RECOVER_ORPHANED_DATA) - }) - - it('processes manual sync jobs with expected data and returns the expected results', async function () { - // Mock StateReconciliationManager to have issueSyncRequest job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const issueSyncReqStub = sandbox.stub().resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateReconciliationManager = proxyquire( - '../src/services/stateMachineManager/stateReconciliation/index.js', - { - './issueSyncRequest.jobProcessor': issueSyncReqStub, - '../processJob': processJobMock - } - ) - - // Verify that StateReconciliationManager returns our dummy data - const job = { - id: 9, - data: { - syncType: SyncType.MANUAL, - syncRequestParameters: 'test' - } - } - await expect( - new MockStateReconciliationManager().makeProcessManualSyncJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(issueSyncReqStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - syncType: SyncType.MANUAL, - syncRequestParameters: 'test' - }) - }) - - it('processes recurring sync jobs with expected data and returns the expected results', async function () { - // Mock StateReconciliationManager to have issueSyncRequest job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const issueSyncReqStub = sandbox.stub().resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateReconciliationManager = proxyquire( - '../src/services/stateMachineManager/stateReconciliation/index.js', - { - './issueSyncRequest.jobProcessor': issueSyncReqStub, - '../processJob': processJobMock - } - ) - - // Verify that StateReconciliationManager returns our dummy data - const job = { - id: 9, - data: { - syncType: SyncType.RECURRING, - syncRequestParameters: 'test' - } - } - await expect( - new MockStateReconciliationManager().makeProcessRecurringSyncJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(issueSyncReqStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - syncType: SyncType.MANUAL, - syncRequestParameters: 'test' - }) - }) - - it('processes updateReplicaSet jobs with expected data and returns the expected results', async function () { - // Mock StateReconciliationManager to have updateReplicaSet job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const updateReplicaSetStub = sandbox.stub().resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateReconciliationManager = proxyquire( - '../src/services/stateMachineManager/stateReconciliation/index.js', - { - './updateReplicaSet.jobProcessor': updateReplicaSetStub, - '../processJob': processJobMock - } - ) - - // Verify that StateReconciliationManager returns our dummy data - const wallet = '0x123456789' - const userId = 1 - const primary = 'http://cn1.co' - const secondary1 = 'http://cn2.co' - const secondary2 = 'http://cn3.co' - const unhealthyReplicas = ['test'] - const replicaSetNodesToUserClockStatusesMap = { test: 'test' } - const enabledReconfigModes = ['test1'] - const job = { - id: 9, - data: { - wallet, - userId, - primary, - secondary1, - secondary2, - unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, - enabledReconfigModes - } - } - await expect( - new MockStateReconciliationManager().makeProcessUpdateReplicaSetJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(updateReplicaSetStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - wallet, - userId, - primary, - secondary1, - secondary2, - unhealthyReplicas, - replicaSetNodesToUserClockStatusesMap, - enabledReconfigModes - }) - }) - - it('processes recoverOrphanedData jobs with expected data and returns the expected results', async function () { - // Mock StateReconciliationManager to have recoverOrphanedData job processor return dummy data and mocked processJob util - const expectedResult = { test: 'test' } - const recoverOrphanedDataStub = sandbox.stub().resolves(expectedResult) - const { processJobMock, loggerStub } = getProcessJobMock() - const MockStateReconciliationManager = proxyquire( - '../src/services/stateMachineManager/stateReconciliation/index.js', - { - './recoverOrphanedData.jobProcessor': { - default: recoverOrphanedDataStub - }, - '../processJob': processJobMock - } - ) - - // Verify that StateReconciliationManager returns our dummy data - const job = {} - await expect( - new MockStateReconciliationManager().makeRecoverOrphanedDataJob( - getPrometheusRegistry() - )(job) - ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) - expect(recoverOrphanedDataStub).to.have.been.calledOnceWithExactly({ - logger: loggerStub, - ...job - }) - }) -}) diff --git a/creator-node/test/blacklistManager.test.js b/creator-node/test/blacklistManager.test.js index cdbe5fb1182..a348e91dc3a 100644 --- a/creator-node/test/blacklistManager.test.js +++ b/creator-node/test/blacklistManager.test.js @@ -55,7 +55,7 @@ describe('test blacklistManager', () => { }) it('[isServable] if cid is in blacklist and trackId is invalid, do not serve', async () => { - await BlacklistManager.addToRedis( + await BlacklistManager._addToRedis( 'BM.SET.BLACKLIST.SEGMENTCID' /* REDIS_SET_BLACKLIST_SEGMENTCID_KEY */, [DUMMY_CID] ) @@ -92,14 +92,14 @@ describe('test blacklistManager', () => { }) it('[isServable] cid belongs to track from input trackId, and the input trackId is valid + blacklisted, do not serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.TRACKID', [1]) - await BlacklistManager.addToRedis('BM.MAP.BLACKLIST.SEGMENTCID.TRACKID', { + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.TRACKID', [1]) + await BlacklistManager._addToRedis('BM.MAP.BLACKLIST.SEGMENTCID.TRACKID', { 1: [DUMMY_CID] }) - await BlacklistManager.addToRedis('BM.MAP.TRACKID.SEGMENTCIDS', { + await BlacklistManager._addToRedis('BM.MAP.TRACKID.SEGMENTCIDS', { 1: [DUMMY_CID] }) @@ -110,10 +110,10 @@ describe('test blacklistManager', () => { }) it('[isServable] cid is in blacklist, cid belongs to track from input trackId with redis check, and the input trackId is valid + not blacklisted, allow serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) - await BlacklistManager.addToRedis('BM.MAP.TRACKID.SEGMENTCIDS', { + await BlacklistManager._addToRedis('BM.MAP.TRACKID.SEGMENTCIDS', { 1: [DUMMY_CID] }) @@ -124,10 +124,10 @@ describe('test blacklistManager', () => { }) it('[isServable] cid is in blacklist, cid does not belong to track from input trackId with redis check, and input track is invalid, do not serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) - await BlacklistManager.addToRedis('BM.SET.INVALID.TRACKIDS', [1234]) + await BlacklistManager._addToRedis('BM.SET.INVALID.TRACKIDS', [1234]) assert.deepStrictEqual( await BlacklistManager.isServable(DUMMY_CID, 1234), @@ -136,13 +136,13 @@ describe('test blacklistManager', () => { }) it('[isServable] cid is in blacklist, cid does not belong to track from input trackId with redis check, and input track is invalid with db check, do not serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) // Mock DB call to return nothing sinon - .stub(BlacklistManager, 'getAllCIDsFromTrackIdsInDb') + .stub(BlacklistManager, '_getAllCIDsFromTrackIdsInDb') .callsFake(async () => { return [] }) @@ -155,13 +155,13 @@ describe('test blacklistManager', () => { }) it('[isServable] cid is in blacklist, cid does not belong to track from input trackId with redis check, and input track is valid with db check, and cid is in track, allow serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) // Mock DB call to return proper segment sinon - .stub(BlacklistManager, 'getAllCIDsFromTrackIdsInDb') + .stub(BlacklistManager, '_getAllCIDsFromTrackIdsInDb') .callsFake(async () => { return [ { @@ -177,19 +177,19 @@ describe('test blacklistManager', () => { true ) assert.deepStrictEqual( - await BlacklistManager.getAllCIDsFromTrackIdInRedis(1), + await BlacklistManager._getAllCIDsFromTrackIdInRedis(1), [DUMMY_CID] ) }).timeout(0) it('[isServable] cid is in blacklist, cid does not belong to track from input trackId with redis check, and input track is valid with db check, and cid is not in track, do not serve', async () => { - await BlacklistManager.addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ + await BlacklistManager._addToRedis('BM.SET.BLACKLIST.SEGMENTCID', [ DUMMY_CID ]) // Mock DB call to return proper segment that is not the same as `CID` sinon - .stub(BlacklistManager, 'getAllCIDsFromTrackIdsInDb') + .stub(BlacklistManager, '_getAllCIDsFromTrackIdsInDb') .callsFake(async () => { return [ { @@ -207,7 +207,7 @@ describe('test blacklistManager', () => { false ) assert.deepStrictEqual( - await BlacklistManager.getAllCIDsFromTrackIdInRedis(1), + await BlacklistManager._getAllCIDsFromTrackIdInRedis(1), ['QmABC_tinashe_and_rei_ami'] ) }) diff --git a/creator-node/test/contentBlacklist.test.js b/creator-node/test/contentBlacklist.test.js index e5f0eb7b662..39cc82e5693 100644 --- a/creator-node/test/contentBlacklist.test.js +++ b/creator-node/test/contentBlacklist.test.js @@ -74,7 +74,7 @@ describe('test ContentBlacklist', function () { const expectedIds = [1, 2, 3, 4, 5, 6, 7] const addTrackData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, values: expectedIds }, DELEGATE_PRIVATE_KEY @@ -83,7 +83,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, 'values[]': expectedIds, signature: addTrackData.signature, timestamp: addTrackData.timestamp @@ -115,7 +115,7 @@ describe('test ContentBlacklist', function () { const ids = [43021] const addUserData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().user, + type: BlacklistManager._getTypes().user, values: ids }, DELEGATE_PRIVATE_KEY @@ -124,7 +124,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().user, + type: BlacklistManager._getTypes().user, 'values[]': ids, signature: addUserData.signature, timestamp: addUserData.timestamp @@ -133,7 +133,7 @@ describe('test ContentBlacklist', function () { const addTrackData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, values: ids }, DELEGATE_PRIVATE_KEY @@ -142,7 +142,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, 'values[]': ids, signature: addTrackData.signature, timestamp: addTrackData.timestamp @@ -152,7 +152,7 @@ describe('test ContentBlacklist', function () { const cids = [generateRandomCID()] const addCIDData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().cid, + type: BlacklistManager._getTypes().cid, values: cids }, DELEGATE_PRIVATE_KEY @@ -161,7 +161,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().cid, + type: BlacklistManager._getTypes().cid, 'values[]': cids, signature: addCIDData.signature, timestamp: addCIDData.timestamp @@ -188,7 +188,7 @@ describe('test ContentBlacklist', function () { const ids = [43021] const addUserData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().user, + type: BlacklistManager._getTypes().user, values: ids }, trustedNotifierConfig.privateKey @@ -197,7 +197,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().user, + type: BlacklistManager._getTypes().user, 'values[]': ids, signature: addUserData.signature, timestamp: addUserData.timestamp @@ -206,7 +206,7 @@ describe('test ContentBlacklist', function () { const addTrackData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, values: ids }, trustedNotifierConfig.privateKey @@ -215,7 +215,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().track, + type: BlacklistManager._getTypes().track, 'values[]': ids, signature: addTrackData.signature, timestamp: addTrackData.timestamp @@ -225,7 +225,7 @@ describe('test ContentBlacklist', function () { const cids = [generateRandomCID()] const addCIDData = generateTimestampAndSignature( { - type: BlacklistManager.getTypes().cid, + type: BlacklistManager._getTypes().cid, values: cids }, trustedNotifierConfig.privateKey @@ -234,7 +234,7 @@ describe('test ContentBlacklist', function () { await request(app) .post('/blacklist/add') .query({ - type: BlacklistManager.getTypes().cid, + type: BlacklistManager._getTypes().cid, 'values[]': cids, signature: addCIDData.signature, timestamp: addCIDData.timestamp @@ -256,7 +256,7 @@ describe('test ContentBlacklist', function () { it('should add user type and id to db and redis', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -284,7 +284,7 @@ describe('test ContentBlacklist', function () { it('should add track type and id to db and redis', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -313,7 +313,7 @@ describe('test ContentBlacklist', function () { it('should remove user type and id from db and redis', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -346,7 +346,7 @@ describe('test ContentBlacklist', function () { it('should remove track type and id from db and redis', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -379,7 +379,7 @@ describe('test ContentBlacklist', function () { it('should return success when removing a user that does not exist', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -407,7 +407,7 @@ describe('test ContentBlacklist', function () { it('should return success when removing a track that does not exist', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -435,7 +435,7 @@ describe('test ContentBlacklist', function () { it('should ignore duplicate add for track', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -471,7 +471,7 @@ describe('test ContentBlacklist', function () { it('should ignore duplicate add for user', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -508,7 +508,7 @@ describe('test ContentBlacklist', function () { it('should only blacklist partial user ids list if only some ids are found', async () => { const ids = [Utils.getRandomInt(MAX_ID), Utils.getRandomInt(MAX_ID)] libsMock.User.getUsers.returns([{ user_id: ids[0] }]) // only user @ index 0 is found - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -540,7 +540,7 @@ describe('test ContentBlacklist', function () { it('should only blacklist partial track ids list if only some ids are found', async () => { const ids = [Utils.getRandomInt(MAX_ID), Utils.getRandomInt(MAX_ID)] libsMock.Track.getTracks.returns([{ track_id: ids[0] }]) // only user @ index 0 is found - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -571,7 +571,7 @@ describe('test ContentBlacklist', function () { it('should add cids to db and redis', async () => { const cids = [generateRandomCID()] - const type = BlacklistManager.getTypes().cid + const type = BlacklistManager._getTypes().cid const { signature, timestamp } = generateTimestampAndSignature( { type, values: cids }, DELEGATE_PRIVATE_KEY @@ -596,7 +596,7 @@ describe('test ContentBlacklist', function () { it('should remove cids from db and redis', async () => { const cids = [generateRandomCID()] - const type = BlacklistManager.getTypes().cid + const type = BlacklistManager._getTypes().cid const { signature, timestamp } = generateTimestampAndSignature( { type, values: cids }, DELEGATE_PRIVATE_KEY @@ -625,7 +625,7 @@ describe('test ContentBlacklist', function () { it("should throw an error if delegate private key does not match that of the creator node's", async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const BAD_KEY = '0xBADKEY4d4a2412a443c17e1666764d3bba43e89e61129a35f9abc337ec170a5d' @@ -642,7 +642,7 @@ describe('test ContentBlacklist', function () { it('should throw an error if query params does not contain all necessary keys', async () => { const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track await request(app) .post('/blacklist/add') @@ -682,7 +682,7 @@ describe('test ContentBlacklist', function () { const ids = [trackId] // Blacklist trackId - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -712,7 +712,7 @@ describe('test ContentBlacklist', function () { const ids = [trackId] // Blacklist trackId - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -737,7 +737,7 @@ describe('test ContentBlacklist', function () { const ids = [trackId] // Blacklist trackId - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -780,7 +780,7 @@ describe('test ContentBlacklist', function () { const ids = [trackId] // Blacklist trackId - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -813,7 +813,7 @@ describe('test ContentBlacklist', function () { const ids = [track1.track.blockchainId] // Blacklist trackId - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -836,7 +836,7 @@ describe('test ContentBlacklist', function () { it('should throw an error when adding a cid to the blacklist and streaming /ipfs/:CID', async () => { const cids = [generateRandomCID()] - const type = BlacklistManager.getTypes().cid + const type = BlacklistManager._getTypes().cid const { signature, timestamp } = generateTimestampAndSignature( { type, values: cids }, DELEGATE_PRIVATE_KEY @@ -853,7 +853,7 @@ describe('test ContentBlacklist', function () { it('should throw an error if user id does not exist', async () => { libsMock.User.getUsers.returns([]) const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const resp1 = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -890,7 +890,7 @@ describe('test ContentBlacklist', function () { it('should throw an error if track id does not exist', async () => { libsMock.Track.getTracks.returns([]) const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const resp1 = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -927,7 +927,7 @@ describe('test ContentBlacklist', function () { it('should throw an error if disc prov is unable to lookup ids', async () => { libsMock.User.getUsers.returns([]) const ids = [Utils.getRandomInt(MAX_ID)] - const type = BlacklistManager.getTypes().user + const type = BlacklistManager._getTypes().user const resp1 = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY @@ -968,9 +968,9 @@ describe('test ContentBlacklist', function () { Utils.getRandomInt(MAX_ID), '###%^&' ] - const type = BlacklistManager.getTypes().cid + const type = BlacklistManager._getTypes().cid const { timestamp, signature } = generateTimestampAndSignature( - { type: BlacklistManager.getTypes().cid, values: cids }, + { type: BlacklistManager._getTypes().cid, values: cids }, DELEGATE_PRIVATE_KEY ) @@ -992,7 +992,7 @@ describe('test ContentBlacklist', function () { const ids = [trackId] // Blacklist trackId - const type = BlacklistManager.getTypes().track + const type = BlacklistManager._getTypes().track const { signature, timestamp } = generateTimestampAndSignature( { type, values: ids }, DELEGATE_PRIVATE_KEY diff --git a/creator-node/test/lib/genericBullQueueMock.js b/creator-node/test/lib/genericBullQueueMock.js index f4f25618a57..7d92533258f 100644 --- a/creator-node/test/lib/genericBullQueueMock.js +++ b/creator-node/test/lib/genericBullQueueMock.js @@ -1,35 +1,44 @@ const PrometheusRegistry = require('../../src/services/prometheusMonitoring/prometheusRegistry') -const Bull = require('bull') +const { Queue, QueueEvents, Worker } = require('bullmq') const config = require('../../src/config') // Mock monitoring queue that sets monitor values on construction class GenericBullQueue { constructor() { - this.queue = Bull('genericBullQueue', { - redis: { - host: config.get('redisHost'), - port: config.get('redisPort') - }, + const connection = { + host: config.get('redisHost'), + port: config.get('redisPort') + } + this.queue = new Queue('genericBullQueue', { + connection, defaultJobOptions: { removeOnComplete: 0, removeOnFail: 0 } }) const prometheusRegistry = new PrometheusRegistry() - prometheusRegistry.startQueueMetrics(this.queue) - this.queue.process(1, async (job) => { - const { timeout } = job.data - if (timeout) { - console.log(`waiting ${timeout}`) - setTimeout(() => console.log(`done ${timeout}`), timeout) - } + const worker = new Worker( + 'genericBullQueue', + async (job) => { + const { timeout } = job.data + if (timeout) { + console.log(`waiting ${timeout}`) + setTimeout(() => console.log(`done ${timeout}`), timeout) + } + }, + { connection } + ) + this.queueEvents = new QueueEvents('genericBullQueue', { + connection }) + + prometheusRegistry.startQueueMetrics(this.queue, worker) } async addTask(params) { - const job = await this.queue.add(params) + const job = await this.queue.add('mock-job', params) return job } diff --git a/creator-node/test/prometheus.test.js b/creator-node/test/prometheus.test.js index b1564696343..924a7b04017 100644 --- a/creator-node/test/prometheus.test.js +++ b/creator-node/test/prometheus.test.js @@ -121,9 +121,9 @@ describe('test Prometheus metrics', async function () { it('Checks the duration of a bull queue job', async function () { const genericBullQueue = new GenericBullQueue() - const job = await genericBullQueue.addTask({ timeout: 500 }) + const job = await genericBullQueue.addTask('job-name', { timeout: 500 }) - await job.finished() + await job.waitUntilFinished(genericBullQueue.queueEvents) const resp = await request(app).get('/prometheus_metrics').expect(200) assert.ok(