From 9507b15f9a53325596e7517b96bab66c80889a1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 9 Apr 2025 18:21:01 +0300 Subject: [PATCH 01/25] microservice container cap add , drop , annotations added, new rebuild endpoint for microservices --- Dockerfile | 2 +- docs/swagger.yaml | 120 +++++++++++- package-lock.json | 183 +++++------------- package.json | 6 +- src/cli/microservice.js | 43 ++++ src/controllers/microservices-controller.js | 12 ++ .../managers/microservice-cap-add-manager.js | 35 ++++ .../managers/microservice-cap-drop-manager.js | 35 ++++ src/data/managers/microservice-manager.js | 56 +++++- src/data/migrations/db_migration_v1.0.2.sql | 20 ++ src/data/models/microservice.js | 24 ++- src/data/models/microserviceCapAdd.js | 31 +++ src/data/models/microserviceCapDrop.js | 31 +++ src/routes/microservices.js | 70 +++++++ src/schemas/microservice.js | 14 ++ src/services/agent-service.js | 5 + src/services/microservices-service.js | 168 +++++++++++++++- src/services/router-service.js | 12 ++ 18 files changed, 718 insertions(+), 149 deletions(-) create mode 100644 src/data/managers/microservice-cap-add-manager.js create mode 100644 src/data/managers/microservice-cap-drop-manager.js create mode 100644 src/data/models/microserviceCapAdd.js create mode 100644 src/data/models/microserviceCapDrop.js diff --git a/Dockerfile b/Dockerfile index 4ac88738..7e3af988 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,7 +38,7 @@ RUN microdnf install -y python3 && \ pip3 install --no-cache --upgrade pip setuptools && \ microdnf install shadow-utils && \ microdnf clean all -RUN microdnf reinstall -y tzdata && microdnf clean all +RUN microdnf install -y tzdata && microdnf clean all RUN microdnf -y remove microdnf RUN useradd --uid 10000 --create-home runner RUN mkdir -p /var/log/iofog-controller && \ diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 5c8e9b11..c0f0cfc5 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1,6 +1,6 @@ openapi : "3.0.0" info: - version: 3.4.10 + version: 3.4.11 title: Datasance PoT Controller paths: /status: @@ -2185,6 +2185,64 @@ paths: description: Not Found "500": description: Internal Server Error + "/microservices/system/{uuid}/rebuild": + patch: + tags: + - Microservices + summary: Rebuilds a system microservice + operationId: rebuildSystemMicroservice + parameters: + - in: path + name: uuid + description: Microservice Uuid + required: true + schema: + type: string + security: + - userToken: [] + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + "/microservices/{uuid}/rebuild": + patch: + tags: + - Microservices + summary: Rebuilds a microservice + operationId: rebuildMicroservice + parameters: + - in: path + name: uuid + description: Microservice Uuid + required: true + schema: + type: string + security: + - userToken: [] + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error "/microservices/{uuid}/image-snapshot": post: tags: @@ -3305,6 +3363,8 @@ components: type: string config: type: string + annotations: + type: string catalogItemId: type: integer registryId: @@ -3348,6 +3408,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: @@ -3444,6 +3512,8 @@ components: type: string config: type: string + annotations: + type: string catalogItemId: type: integer registryId: @@ -3489,6 +3559,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: @@ -4133,6 +4211,8 @@ components: type: string config: type: string + annotations: + type: string rebuild: type: boolean rootHostAccess: @@ -4169,6 +4249,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: @@ -4585,6 +4673,8 @@ components: type: string config: type: string + annotations: + type: string rootHostAccess: type: boolean logLimit: @@ -4618,6 +4708,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: @@ -4652,6 +4750,8 @@ components: type: string config: type: string + annotations: + type: string catalogItemId: type: integer images: @@ -4690,6 +4790,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: @@ -4711,6 +4819,8 @@ components: type: string config: type: string + annotations: + type: string rebuild: type: boolean iofogUuid: @@ -4743,6 +4853,14 @@ components: type: array items: type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string cmd: type: array items: diff --git a/package-lock.json b/package-lock.json index ed86c5e1..8dcc7705 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,17 +1,17 @@ { "name": "@datasance/iofogcontroller", - "version": "3.4.10", + "version": "3.4.11", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.4.10", + "version": "3.4.11", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "0.4.2", - "axios": "1.7.4", + "@datasance/ecn-viewer": "0.4.3", + "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", "command-line-args": "5.2.1", @@ -92,12 +92,13 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.24.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.2.tgz", - "integrity": "sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "dev": true, "dependencies": { - "@babel/highlight": "^7.24.2", + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", "picocolors": "^1.0.0" }, "engines": { @@ -326,18 +327,18 @@ } }, "node_modules/@babel/helper-string-parser": { - "version": "7.24.1", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz", - "integrity": "sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.5.tgz", - "integrity": "sha512-3q93SSKX2TWCG30M2G2kwaKeTYgEUp5Snjuj8qm729SObL6nbtUldAi37qbxkD5gg3xnBio+f9nqpSepGZMvxA==", + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", "dev": true, "engines": { "node": ">=6.9.0" @@ -353,110 +354,26 @@ } }, "node_modules/@babel/helpers": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.5.tgz", - "integrity": "sha512-CiQmBMMpMQHwM5m01YnrM6imUG1ebgYJ+fAIW4FZe6m4qHTPaRHti+R8cggAwkdz4oXhtO4/K9JWlh+8hIfR2Q==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz", + "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", "dev": true, "dependencies": { - "@babel/template": "^7.24.0", - "@babel/traverse": "^7.24.5", - "@babel/types": "^7.24.5" + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.5.tgz", - "integrity": "sha512-8lLmua6AVh/8SLJRRVD6V8p73Hir9w5mJrhE+IPpILG31KKlI9iz5zmBYKcWPS59qSfgP9RaSBQSHHE81WKuEw==", - "dev": true, - "dependencies": { - "@babel/helper-validator-identifier": "^7.24.5", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/@babel/parser": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz", + "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", "dev": true, "dependencies": { - "has-flag": "^3.0.0" + "@babel/types": "^7.27.0" }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.5.tgz", - "integrity": "sha512-EOv5IK8arwh3LI47dz1b0tKUb/1uhHAnHJOrjgtQMIpu1uXd9mlFrJg9IUgGUgZ41Ch0K8REPTYpO7B76b4vJg==", - "dev": true, "bin": { "parser": "bin/babel-parser.js" }, @@ -465,14 +382,14 @@ } }, "node_modules/@babel/template": { - "version": "7.24.0", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.0.tgz", - "integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz", + "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.23.5", - "@babel/parser": "^7.24.0", - "@babel/types": "^7.24.0" + "@babel/code-frame": "^7.26.2", + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0" }, "engines": { "node": ">=6.9.0" @@ -532,14 +449,13 @@ "dev": true }, "node_modules/@babel/types": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.5.tgz", - "integrity": "sha512-6mQNsaLeXTw0nxYUYu+NSa4Hx4BlF1x1x8/PMFbiR+GBSr+2DkECc69b8hgy2frEodNcvPffeH8YfWd3LI6jhQ==", + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz", + "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.24.1", - "@babel/helper-validator-identifier": "^7.24.5", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" }, "engines": { "node": ">=6.9.0" @@ -556,9 +472,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.4.2.tgz", - "integrity": "sha512-+xyDmmw4q/pfzzCdz+u0c2sGaGGPj62M8u8JKE0luRNiILqmADRtVSsMCzal4sLmILgD3nkeaUeVTw+EzQcwyw==" + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.4.3.tgz", + "integrity": "sha512-7GHEwbd1JtbivEMus4QW7NxDwaWQP9E1WjXspTh3JH4048BhrsVrdfnqFt+f5n8x2QE77lT7UePyu5bbnpQXMQ==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", @@ -1910,9 +1826,9 @@ "dev": true }, "node_modules/axios": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.4.tgz", - "integrity": "sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==", + "version": "1.8.4", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.8.4.tgz", + "integrity": "sha512-eBSYY4Y68NNlHbHBMdeDmKNtDgXWhQsJcGqzO3iLUM0GraQFSS9cVgPX5I9b3lbdFKyYoAEGAZF1DwhTaljNAw==", "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.0", @@ -12343,9 +12259,9 @@ } }, "node_modules/tar-fs": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", - "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz", + "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==", "dependencies": { "chownr": "^1.1.1", "mkdirp-classic": "^0.5.2", @@ -12564,15 +12480,6 @@ "node": ">=0.6.0" } }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", diff --git a/package.json b/package.json index b8e80899..7b39fd6e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.4.10", + "version": "3.4.11", "description": "ioFog Controller project for Eclipse IoFog @ iofog.org \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,8 +55,8 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "0.4.2", - "axios": "1.7.4", + "@datasance/ecn-viewer": "0.4.3", + "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", "command-line-args": "5.2.1", diff --git a/src/cli/microservice.js b/src/cli/microservice.js index 7ccdf617..e105537e 100644 --- a/src/cli/microservice.js +++ b/src/cli/microservice.js @@ -24,6 +24,7 @@ const JSON_SCHEMA_ADD = AppHelper.stringifyCliJsonSchema( { name: 'string', config: 'string', + annotations: 'string', catalogItemId: 0, images: [ { @@ -66,6 +67,12 @@ const JSON_SCHEMA_ADD = AppHelper.stringifyCliJsonSchema( cdiDevices: [ 'string' ], + capAdd: [ + 'string' + ], + capDrop: [ + 'string' + ], runAsUser: 'string', platform: 'string', runtime: 'string' @@ -76,6 +83,7 @@ const JSON_SCHEMA_UPDATE = AppHelper.stringifyCliJsonSchema( { name: 'string', config: 'string', + annotations: 'string', rebuild: true, iofogUuid: 'string', rootHostAccess: true, @@ -108,6 +116,12 @@ const JSON_SCHEMA_UPDATE = AppHelper.stringifyCliJsonSchema( cdiDevices: [ 'string' ], + capAdd: [ + 'string' + ], + capDrop: [ + 'string' + ], runAsUser: 'string', platform: 'string', runtime: 'string' @@ -197,6 +211,13 @@ class Microservice extends BaseCLIHandler { description: 'Microservice config', group: [constants.CMD_UPDATE, constants.CMD_ADD] }, + { + name: 'annotations', + alias: 'A', + type: String, + description: 'Microservice annotations', + group: [constants.CMD_UPDATE, constants.CMD_ADD] + }, { name: 'volumes', alias: 'v', @@ -308,6 +329,22 @@ class Microservice extends BaseCLIHandler { multiple: true, group: [constants.CMD_UPDATE, constants.CMD_ADD] }, + { + name: 'capAdd', + alias: 'cA', + type: String, + description: 'A list of kernel capabilities to add to the container.', + multiple: true, + group: [constants.CMD_UPDATE, constants.CMD_ADD] + }, + { + name: 'capDrop', + alias: 'cD', + type: String, + description: 'A list of kernel capabilities to drop to the container.', + multiple: true, + group: [constants.CMD_UPDATE, constants.CMD_ADD] + }, { name: 'user', alias: 'U', @@ -624,12 +661,15 @@ const _updateMicroserviceObject = function (obj) { const microserviceObj = { name: obj.name, config: obj.config, + annotations: obj.annotations, iofogUuid: obj.iofogUuid, rootHostAccess: AppHelper.validateBooleanCliOptions(obj.rootEnable, obj.rootDisable), logSize: (obj.logSize || constants.MICROSERVICE_DEFAULT_LOG_SIZE) * 1, rebuild: obj.rebuild, cmd: obj.cmd, cdiDevices: obj.cdiDevices, + capAdd: obj.capAdd, + capDrop: obj.capDrop, runAsUser: obj.runAsUser, platform: obj.platform, runtime: obj.runtime, @@ -686,6 +726,7 @@ const _createMicroserviceObject = function (obj) { const microserviceObj = { name: obj.name, config: obj.config, + annotations: obj.annotations, catalogItemId: parseInt(obj.catalogId) || undefined, application: obj.applicationName, registryId: parseInt(obj.registryId) || undefined, @@ -695,6 +736,8 @@ const _createMicroserviceObject = function (obj) { routes: obj.routes, cmd: obj.cmd, cdiDevices: obj.cdiDevices, + capAdd: obj.capAdd, + capDrop: obj.capDrop, runAsUser: obj.runAsUser, platform: obj.platform, runtime: obj.runtime, diff --git a/src/controllers/microservices-controller.js b/src/controllers/microservices-controller.js index 6b35b3e1..55249c4a 100644 --- a/src/controllers/microservices-controller.js +++ b/src/controllers/microservices-controller.js @@ -54,6 +54,16 @@ const updateSystemMicroserviceEndPoint = async function (req) { return MicroservicesService.updateSystemMicroserviceEndPoint(microserviceUuid, microservice, false) } +const rebuildMicroserviceEndPoint = async function (req) { + const microserviceUuid = req.params.uuid + return MicroservicesService.rebuildMicroserviceEndPoint(microserviceUuid, false) +} + +const rebuildSystemMicroserviceEndPoint = async function (req) { + const microserviceUuid = req.params.uuid + return MicroservicesService.rebuildSystemMicroserviceEndPoint(microserviceUuid, false) +} + const updateMicroserviceYAMLEndPoint = async function (req) { const microserviceUuid = req.params.uuid const fileContent = req.file.buffer.toString() @@ -169,6 +179,8 @@ module.exports = { listMicroserviceBySubTagEndPoint: (listMicroserviceBySubTagEndPoint), updateMicroserviceEndPoint: (updateMicroserviceEndPoint), updateSystemMicroserviceEndPoint: (updateSystemMicroserviceEndPoint), + rebuildMicroserviceEndPoint: (rebuildMicroserviceEndPoint), + rebuildSystemMicroserviceEndPoint: (rebuildSystemMicroserviceEndPoint), deleteMicroserviceEndPoint: (deleteMicroserviceEndPoint), getMicroservicesByApplicationEndPoint: (getMicroservicesByApplicationEndPoint), createMicroserviceRouteEndPoint: (createMicroserviceRouteEndPoint), diff --git a/src/data/managers/microservice-cap-add-manager.js b/src/data/managers/microservice-cap-add-manager.js new file mode 100644 index 00000000..b92430fa --- /dev/null +++ b/src/data/managers/microservice-cap-add-manager.js @@ -0,0 +1,35 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const MicroserviceCapAdd = models.MicroserviceCapAdd + +const MicroserviceCapAddExcludedFields = [ + 'id', + 'microservice_uuid', + 'microserviceUuid' +] + +class MicroserviceCapAddManager extends BaseManager { + getEntity () { + return MicroserviceCapAdd + } + + findAllExcludeFields (where, transaction) { + return this.findAllWithAttributes(where, { exclude: MicroserviceCapAddExcludedFields }, transaction) + } +} + +const instance = new MicroserviceCapAddManager() +module.exports = instance diff --git a/src/data/managers/microservice-cap-drop-manager.js b/src/data/managers/microservice-cap-drop-manager.js new file mode 100644 index 00000000..2dce889f --- /dev/null +++ b/src/data/managers/microservice-cap-drop-manager.js @@ -0,0 +1,35 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const MicroserviceCapDrop = models.MicroserviceCapDrop + +const MicroserviceCapDropExcludedFields = [ + 'id', + 'microservice_uuid', + 'microserviceUuid' +] + +class MicroserviceCapDropManager extends BaseManager { + getEntity () { + return MicroserviceCapDrop + } + + findAllExcludeFields (where, transaction) { + return this.findAllWithAttributes(where, { exclude: MicroserviceCapDropExcludedFields }, transaction) + } +} + +const instance = new MicroserviceCapDropManager() +module.exports = instance diff --git a/src/data/managers/microservice-manager.js b/src/data/managers/microservice-manager.js index 00320b39..68ca789e 100644 --- a/src/data/managers/microservice-manager.js +++ b/src/data/managers/microservice-manager.js @@ -19,6 +19,8 @@ const MicroserviceEnv = models.MicroserviceEnv const MicroserviceExtraHost = models.MicroserviceExtraHost const MicroserviceArg = models.MicroserviceArg const MicroserviceCdiDev = models.MicroserviceCdiDev +const MicroserviceCapAdd = models.MicroserviceCapAdd +const MicroserviceCapDrop = models.MicroserviceCapDrop const VolumeMapping = models.VolumeMapping const StraceDiagnostics = models.StraceDiagnostics const CatalogItem = models.CatalogItem @@ -74,6 +76,18 @@ class MicroserviceManager extends BaseManager { required: false, attributes: ['cdiDevices'] }, + { + model: MicroserviceCapAdd, + as: 'capAdd', + required: false, + attributes: ['capAdd'] + }, + { + model: MicroserviceCapDrop, + as: 'capDrop', + required: false, + attributes: ['capDrop'] + }, { model: MicroservicePort, as: 'ports', @@ -165,6 +179,18 @@ class MicroserviceManager extends BaseManager { required: false, attributes: ['cdiDevices'] }, + { + model: MicroserviceCapAdd, + as: 'capAdd', + required: false, + attributes: ['capAdd'] + }, + { + model: MicroserviceCapDrop, + as: 'capDrop', + required: false, + attributes: ['capDrop'] + }, { model: MicroservicePort, as: 'ports', @@ -271,6 +297,18 @@ class MicroserviceManager extends BaseManager { required: false, attributes: ['cdiDevices'] }, + { + model: MicroserviceCapAdd, + as: 'capAdd', + required: false, + attributes: ['capAdd'] + }, + { + model: MicroserviceCapDrop, + as: 'capDrop', + required: false, + attributes: ['capDrop'] + }, { model: MicroservicePort, as: 'ports', @@ -384,7 +422,23 @@ class MicroserviceManager extends BaseManager { attributes: ['uuid'] }, { transaction: transaction }) } - + findSystemMicroserviceOnGet (where, transaction) { + return Microservice.findOne({ + include: [ + { + model: Application, + as: 'application', + required: true, + where: { + isSystem: true + }, + attributes: ['id'] + } + ], + where: where, + attributes: ['uuid'] + }, { transaction: transaction }) + } async findOneExcludeFields (where, transaction) { return Microservice.findOne({ include: [ diff --git a/src/data/migrations/db_migration_v1.0.2.sql b/src/data/migrations/db_migration_v1.0.2.sql index 769defbe..2eed15b6 100644 --- a/src/data/migrations/db_migration_v1.0.2.sql +++ b/src/data/migrations/db_migration_v1.0.2.sql @@ -593,3 +593,23 @@ CREATE INDEX idx_microservicepubtags_microservice_uuid ON MicroservicePubTags (m CREATE INDEX idx_microservicesubtags_microservice_uuid ON MicroservicesubTags (microservice_uuid); CREATE INDEX idx_microservicepubtags_tag_id ON MicroservicePubTags (tag_id); CREATE INDEX idx_microservicesubtags_tag_id ON MicroservicesubTags (tag_id); + +CREATE TABLE IF NOT EXISTS MicroserviceCapAdd ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + cap_add TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capAdd_microserviceUuid ON MicroserviceCapAdd (microservice_uuid); + +CREATE TABLE IF NOT EXISTS MicroserviceCapDrop ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + cap_drop TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capDrop_microserviceUuid ON MicroserviceCapDrop (microservice_uuid); + +ALTER TABLE Microservices ADD COLUMN annotations TEXT; \ No newline at end of file diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index 1856805d..bc6141cc 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -15,6 +15,11 @@ module.exports = (sequelize, DataTypes) => { field: 'config', defaultValue: '{}' }, + annotations: { + type: DataTypes.TEXT, + field: 'annotations', + defaultValue: '{}' + }, name: { type: DataTypes.TEXT, field: 'name', @@ -39,15 +44,18 @@ module.exports = (sequelize, DataTypes) => { }, runAsUser: { type: DataTypes.TEXT, - field: 'run_as_user' + field: 'run_as_user', + defaultValue: '' }, platform: { type: DataTypes.TEXT, - field: 'platform' + field: 'platform', + defaultValue: '' }, runtime: { type: DataTypes.TEXT, - field: 'runtime' + field: 'runtime', + defaultValue: '' }, logSize: { type: DataTypes.BIGINT, @@ -160,6 +168,16 @@ module.exports = (sequelize, DataTypes) => { as: 'cdiDevices' }) + Microservice.hasMany(models.MicroserviceCapAdd, { + foreignKey: 'microservice_uuid', + as: 'capAdd' + }) + + Microservice.hasMany(models.MicroserviceCapDrop, { + foreignKey: 'microservice_uuid', + as: 'capDrop' + }) + Microservice.hasMany(models.MicroserviceExtraHost, { foreignKey: 'microservice_uuid', as: 'extraHosts' diff --git a/src/data/models/microserviceCapAdd.js b/src/data/models/microserviceCapAdd.js new file mode 100644 index 00000000..6a5ee5fe --- /dev/null +++ b/src/data/models/microserviceCapAdd.js @@ -0,0 +1,31 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const MicroserviceCapAdd = sequelize.define('MicroserviceCapAdd', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + capAdd: { + type: DataTypes.TEXT, + field: 'cap_add' + } + }, { + tableName: 'MicroserviceCapAdd', + timestamps: false, + underscored: true + }) + MicroserviceCapAdd.associate = function (models) { + MicroserviceCapAdd.belongsTo(models.Microservice, { + foreignKey: { + name: 'microserviceUuid', + field: 'microservice_uuid' + }, + as: 'microservice', + onDelete: 'cascade' + }) + } + return MicroserviceCapAdd +} diff --git a/src/data/models/microserviceCapDrop.js b/src/data/models/microserviceCapDrop.js new file mode 100644 index 00000000..288faa65 --- /dev/null +++ b/src/data/models/microserviceCapDrop.js @@ -0,0 +1,31 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const MicroserviceCapDrop = sequelize.define('MicroserviceCapDrop', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + capDrop: { + type: DataTypes.TEXT, + field: 'cap_drop' + } + }, { + tableName: 'MicroserviceCapDrop', + timestamps: false, + underscored: true + }) + MicroserviceCapDrop.associate = function (models) { + MicroserviceCapDrop.belongsTo(models.Microservice, { + foreignKey: { + name: 'microserviceUuid', + field: 'microservice_uuid' + }, + as: 'microservice', + onDelete: 'cascade' + }) + } + return MicroserviceCapDrop +} diff --git a/src/routes/microservices.js b/src/routes/microservices.js index d59b59a6..a43a8cf4 100644 --- a/src/routes/microservices.js +++ b/src/routes/microservices.js @@ -305,6 +305,76 @@ module.exports = [ }) } }, + { + method: 'patch', + path: '/api/v3/microservices/:uuid/rebuild', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const rebuildMicroserviceEndPoint = ResponseDecorator.handleErrors(MicroservicesController.rebuildMicroserviceEndPoint, + successCode, errorCodes) + const responseObject = await rebuildMicroserviceEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/microservices/system/:uuid/rebuild', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const rebuildSystemMicroserviceEndPoint = ResponseDecorator.handleErrors(MicroservicesController.rebuildSystemMicroserviceEndPoint, + successCode, errorCodes) + const responseObject = await rebuildSystemMicroserviceEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: responseObject }) + }) + } + }, { method: 'patch', path: '/api/v3/microservices/yaml/:uuid', diff --git a/src/schemas/microservice.js b/src/schemas/microservice.js index 92fde6ea..386ad3f4 100644 --- a/src/schemas/microservice.js +++ b/src/schemas/microservice.js @@ -9,6 +9,7 @@ const microserviceCreate = { 'pattern': nameRegex }, 'config': { 'type': 'string' }, + 'annotations': { 'type': 'string' }, 'catalogItemId': { 'type': 'integer', 'minimum': 4 @@ -53,6 +54,12 @@ const microserviceCreate = { 'cdiDevices': { 'type': 'array', 'items': { 'type': 'string' } }, + 'capAdd': { + 'type': 'array', + 'items': { 'type': 'string' } }, + 'capDrop': { + 'type': 'array', + 'items': { 'type': 'string' } }, 'runAsUser': { 'type': 'string' }, 'platform': { 'type': 'string' }, 'runtime': { 'type': 'string' }, @@ -78,6 +85,7 @@ const microserviceUpdate = { 'pattern': nameRegex }, 'config': { 'type': 'string' }, + 'annotations': { 'type': 'string' }, 'rebuild': { 'type': 'boolean' }, 'iofogUuid': { 'type': 'string' }, 'agentName': { 'type': 'string' }, @@ -108,6 +116,12 @@ const microserviceUpdate = { 'cdiDevices': { 'type': 'array', 'items': { 'type': 'string' } }, + 'capAdd': { + 'type': 'array', + 'items': { 'type': 'string' } }, + 'capDrop': { + 'type': 'array', + 'items': { 'type': 'string' } }, 'runAsUser': { 'type': 'string' }, 'platform': { 'type': 'string' }, 'runtime': { 'type': 'string' }, diff --git a/src/services/agent-service.js b/src/services/agent-service.js index fee8ebc9..88c2be76 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -289,6 +289,8 @@ const getAgentMicroservices = async function (fog, transaction) { }) const cmd = microservice.cmd && microservice.cmd.sort((a, b) => a.id - b.id).map((it) => it.cmd) const cdiDevices = microservice.cdiDevices && microservice.cdiDevices.sort((a, b) => a.id - b.id).map((it) => it.cdiDevices) + const capAdd = microservice.capAdd && microservice.capAdd.sort((a, b) => a.id - b.id).map((it) => it.capAdd) + const capDrop = microservice.capDrop && microservice.capDrop.sort((a, b) => a.id - b.id).map((it) => it.capDrop) const registryId = microservice.catalogItem && microservice.catalogItem.registry ? microservice.catalogItem.registry.id : microservice.registry.id const extraHosts = microservice.extraHosts ? microservice.extraHosts.map(_mapExtraHost) : [] @@ -297,6 +299,7 @@ const getAgentMicroservices = async function (fog, transaction) { uuid: microservice.uuid, imageId: imageId, config: microservice.config, + annotations: microservice.annotations, rebuild: microservice.rebuild, rootHostAccess: microservice.rootHostAccess, runAsUser: microservice.runAsUser, @@ -313,6 +316,8 @@ const getAgentMicroservices = async function (fog, transaction) { extraHosts, cmd, cdiDevices, + capAdd, + capDrop, routes, isConsumer } diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index a0bc47fa..e9bec9fb 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -16,6 +16,8 @@ const MicroserviceManager = require('../data/managers/microservice-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') const MicroserviceArgManager = require('../data/managers/microservice-arg-manager') const MicroserviceCdiDevManager = require('../data/managers/microservice-cdi-device-manager') +const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add-manager') +const MicroserviceCapDropManager = require('../data/managers/microservice-cap-drop-manager') const MicroserviceEnvManager = require('../data/managers/microservice-env-manager') const MicroservicePortService = require('../services/microservice-ports/default') const CatalogItemImageManager = require('../data/managers/catalog-item-image-manager') @@ -321,6 +323,16 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) await _createCdiDevices(microservice, cdiDevices, transaction) } } + if (microserviceData.capAdd) { + for (const capAdd of microserviceData.capAdd) { + await _createCapAdd(microservice, capAdd, transaction) + } + } + if (microserviceData.capDrop) { + for (const capDrop of microserviceData.capDrop) { + await _createCapDrop(microservice, capDrop, transaction) + } + } if (microserviceData.volumeMappings) { await _createVolumeMappings(microservice, microserviceData.volumeMappings, transaction) } @@ -428,16 +440,21 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD const config = _validateMicroserviceConfig(microserviceData.config) + const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) + const newFog = await _findFog(microserviceData, isCLI, transaction) || {} const microserviceToUpdate = { name: microserviceData.name, config: config, + annotations: annotations, images: microserviceData.images, catalogItemId: microserviceData.catalogItemId, rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, cdiDevices: microserviceData.cdiDevices, + capAdd: microserviceData.capAdd, + capDrop: microserviceData.capDrop, runAsUser: microserviceData.runAsUser, platform: microserviceData.platform, runtime: microserviceData.runtime, @@ -553,6 +570,9 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || + microserviceDataUpdate.annotations || + microserviceDataUpdate.capAdd || + microserviceDataUpdate.capDrop || microserviceDataUpdate.runAsUser || microserviceDataUpdate.platform || microserviceDataUpdate.runtime || @@ -585,6 +605,14 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD await _updateCdiDevices(microserviceDataUpdate.cdiDevices, microserviceUuid, transaction) } + if (microserviceDataUpdate.capAdd) { + await _updateCapAdd(microserviceDataUpdate.capAdd, microserviceUuid, transaction) + } + + if (microserviceDataUpdate.capDrop) { + await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) + } + if (microserviceDataUpdate.iofogUuid && microserviceDataUpdate.iofogUuid !== microservice.iofogUuid) { await MicroservicePortService.movePublicPortsToNewFog(updatedMicroservice, transaction) } @@ -634,16 +662,21 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i const config = _validateMicroserviceConfig(microserviceData.config) + const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) + const newFog = await _findFog(microserviceData, isCLI, transaction) || {} const microserviceToUpdate = { name: microserviceData.name, config: config, + annotations: annotations, images: microserviceData.images, catalogItemId: microserviceData.catalogItemId, rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, cdiDevices: microserviceData.cdiDevices, + capAdd: microserviceData.capAdd, + capDrop: microserviceData.capDrop, runAsUser: microserviceData.runAsUser, platform: microserviceData.platform, runtime: microserviceData.runtime, @@ -763,6 +796,9 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || + microserviceDataUpdate.capAdd || + microserviceDataUpdate.capDrop || + microserviceDataUpdate.annotations || microserviceDataUpdate.runAsUser || microserviceDataUpdate.platform || microserviceDataUpdate.runtime || @@ -795,6 +831,14 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i await _updateCdiDevices(microserviceDataUpdate.cdiDevices, microserviceUuid, transaction) } + if (microserviceDataUpdate.capAdd) { + await _updateCapAdd(microserviceDataUpdate.capAdd, microserviceUuid, transaction) + } + + if (microserviceDataUpdate.capDrop) { + await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) + } + if (microserviceDataUpdate.iofogUuid && microserviceDataUpdate.iofogUuid !== microservice.iofogUuid) { await MicroservicePortService.movePublicPortsToNewFog(updatedMicroservice, transaction) } @@ -860,6 +904,50 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i } } +async function rebuildMicroserviceEndPoint (microserviceUuid, isCLI, transaction) { + const query = isCLI + ? { + uuid: microserviceUuid + } + : { + uuid: microserviceUuid + } + + const microservice = await MicroserviceManager.updateAndFind(query, { rebuild: true }, transaction) + + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + const iofogUuid = microservice.iofogUuid + await ChangeTrackingService.update(iofogUuid, ChangeTrackingService.events.microserviceCommon, transaction) + return { + uuid: microserviceUuid, + rebuild: true + } +} + +async function rebuildSystemMicroserviceEndPoint (microserviceUuid, isCLI, transaction) { + const query = isCLI + ? { + uuid: microserviceUuid + } + : { + uuid: microserviceUuid + } + + const microservice = await MicroserviceManager.updateAndFind(query, { rebuild: true }, transaction) + + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + const iofogUuid = microservice.iofogUuid + await ChangeTrackingService.update(iofogUuid, ChangeTrackingService.events.microserviceCommon, transaction) + return { + uuid: microserviceUuid, + rebuild: true + } +} + /** * checks if microservice image is updated * @param {*} microserviceDataUpdateImages @@ -1028,12 +1116,40 @@ async function _createCdiDevices (microservice, cdiDevices, transaction) { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.REQUIRED_FOG_NODE)) } - const mscdiDevicesData = { + const msCdiDevicesData = { cdiDevices: cdiDevices, microserviceUuid: microservice.uuid } - await MicroserviceCdiDevManager.create(mscdiDevicesData, transaction) + await MicroserviceCdiDevManager.create(msCdiDevicesData, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) +} + +async function _createCapAdd (microservice, capAdd, transaction) { + if (!microservice.iofogUuid) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.REQUIRED_FOG_NODE)) + } + + const msCapAddData = { + capAdd: capAdd, + microserviceUuid: microservice.uuid + } + + await MicroserviceCapAddManager.create(msCapAddData, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) +} + +async function _createCapDrop (microservice, capDrop, transaction) { + if (!microservice.iofogUuid) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.REQUIRED_FOG_NODE)) + } + + const msCapDropData = { + capDrop: capDrop, + microserviceUuid: microservice.uuid + } + + await MicroserviceCapDropManager.create(msCapDropData, transaction) await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) } @@ -1253,17 +1369,29 @@ function _validateMicroserviceConfig (config) { return result } +function _validateMicroserviceAnnotations (annotations) { + let result + if (annotations) { + result = annotations.split('\\"').join('"').split('"').join('\"') // eslint-disable-line no-useless-escape + } + return result +} + async function _createMicroservice (microserviceData, isCLI, transaction) { const config = _validateMicroserviceConfig(microserviceData.config) + const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) let newMicroservice = { uuid: AppHelper.generateRandomString(32), name: microserviceData.name, config: config, + annotations: annotations, catalogItemId: microserviceData.catalogItemId, iofogUuid: microserviceData.iofogUuid, rootHostAccess: microserviceData.rootHostAccess, cdiDevices: microserviceData.cdiDevices, + capAdd: microserviceData.capAdd, + capDrop: microserviceData.capDrop, runAsUser: microserviceData.runAsUser, platform: microserviceData.platform, runtime: microserviceData.runtime, @@ -1436,6 +1564,34 @@ async function _updateCdiDevices (cdiDevices, microserviceUuid, transaction) { } } +async function _updateCapAdd (capAdd, microserviceUuid, transaction) { + await MicroserviceCapAddManager.delete({ + microserviceUuid: microserviceUuid + }, transaction) + for (const capAddData of capAdd) { + const envObj = { + microserviceUuid: microserviceUuid, + capAdd: capAddData + } + + await MicroserviceCapAddManager.create(envObj, transaction) + } +} + +async function _updateCapDrop (capDrop, microserviceUuid, transaction) { + await MicroserviceCapDropManager.delete({ + microserviceUuid: microserviceUuid + }, transaction) + for (const capDropData of capDrop) { + const envObj = { + microserviceUuid: microserviceUuid, + capDrop: capDropData + } + + await MicroserviceCapDropManager.create(envObj, transaction) + } +} + async function _updatePorts (newPortMappings, microservice, transaction) { await MicroservicePortService.deletePortMappings(microservice, transaction) for (const portMapping of newPortMappings) { @@ -1540,6 +1696,10 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { const arg = cmd.map((it) => it.cmd) const cdiDevices = await MicroserviceCdiDevManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) const cdiDevs = cdiDevices.map((it) => it.cdiDevices) + const capAdd = await MicroserviceCapAddManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + const capAdds = capAdd.map((it) => it.capAdd) + const capDrop = await MicroserviceCapDropManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + const capDrops = capDrop.map((it) => it.capDrop) const pubTags = microservice.pubTags ? microservice.pubTags.map(t => t.value) : [] const subTags = microservice.subTags ? microservice.subTags.map(t => t.value) : [] const status = await MicroserviceStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) @@ -1556,6 +1716,8 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { res.env = env res.cmd = arg res.cdiDevices = cdiDevs + res.capAdd = capAdds + res.capDrop = capDrops res.extraHosts = extraHosts.map(eH => ({ name: eH.name, address: eH.template, value: eH.value })) res.images = images.map(i => ({ containerImage: i.containerImage, fogTypeId: i.fogTypeId })) if (status && status.length) { @@ -1636,6 +1798,8 @@ module.exports = { listVolumeMappingsEndPoint: TransactionDecorator.generateTransaction(listVolumeMappingsEndPoint), updateMicroserviceEndPoint: TransactionDecorator.generateTransaction(updateMicroserviceEndPoint), updateSystemMicroserviceEndPoint: TransactionDecorator.generateTransaction(updateSystemMicroserviceEndPoint), + rebuildMicroserviceEndPoint: TransactionDecorator.generateTransaction(rebuildMicroserviceEndPoint), + rebuildSystemMicroserviceEndPoint: TransactionDecorator.generateTransaction(rebuildSystemMicroserviceEndPoint), buildGetMicroserviceResponse: _buildGetMicroserviceResponse, updateChangeTracking: _updateChangeTracking, listMicroserviceByPubTagEndPoint: TransactionDecorator.generateTransaction(listMicroserviceByPubTagEndPoint), diff --git a/src/services/router-service.js b/src/services/router-service.js index 3ccf0bcb..124089e9 100644 --- a/src/services/router-service.js +++ b/src/services/router-service.js @@ -18,6 +18,7 @@ const Constants = require('../helpers/constants') const Errors = require('../helpers/errors') const ErrorMessages = require('../helpers/error-messages') const MicroserviceManager = require('../data/managers/microservice-manager') +const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') const ApplicationManager = require('../data/managers/application-manager') const MicroservicePortManager = require('../data/managers/microservice-port-manager') @@ -245,11 +246,22 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran logSize: constants.MICROSERVICE_DEFAULT_LOG_SIZE, configLastUpdated: Date.now() } + + const capAddValues = [ + { capAdd: 'NET_RAW' } + ] + await ApplicationManager.create(routerApplicationData, transaction) const application = await ApplicationManager.findOne({ name: routerApplicationData.name }, transaction) routerMicroserviceData.applicationId = application.id const routerMicroservice = await MicroserviceManager.create(routerMicroserviceData, transaction) await MicroserviceStatusManager.create({ microserviceUuid: routerMicroserviceData.uuid }, transaction) + for (const capAdd of capAddValues) { + await MicroserviceCapAddManager.create({ + microserviceUuid: routerMicroserviceData.uuid, + capAdd: capAdd.capAdd + }, transaction) + } return routerMicroservice } From 3e3b73ac16fb5450c9f2560ea8be657c25e09880 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 9 Apr 2025 20:20:49 +0300 Subject: [PATCH 02/25] ecn-viewer version updated --- package-lock.json | 8 ++++---- package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package-lock.json b/package-lock.json index 8dcc7705..52b82735 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,7 @@ "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "0.4.3", + "@datasance/ecn-viewer": "0.4.4", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", @@ -472,9 +472,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.4.3.tgz", - "integrity": "sha512-7GHEwbd1JtbivEMus4QW7NxDwaWQP9E1WjXspTh3JH4048BhrsVrdfnqFt+f5n8x2QE77lT7UePyu5bbnpQXMQ==" + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.4.4.tgz", + "integrity": "sha512-n6ZAmPlOMTch1RBAF44QBO6hos//Q2IqNxtLY8D8Q8kuvPTMhVc1X/zWNLjHiHAsjIfmDFBJFYW6d7VhyEc1wg==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", diff --git a/package.json b/package.json index 7b39fd6e..271cb871 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "0.4.3", + "@datasance/ecn-viewer": "0.4.4", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", From 6cba95cff09cf9241a14cd702c45e3e3105ecbc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Thu, 10 Apr 2025 13:52:37 +0300 Subject: [PATCH 03/25] logrotate and mscv rebuild endpoint fixed --- logrotate.conf | 7 +++++-- src/services/microservices-service.js | 5 +++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/logrotate.conf b/logrotate.conf index 7656fa26..9690627c 100644 --- a/logrotate.conf +++ b/logrotate.conf @@ -1,9 +1,12 @@ /var/log/iofog-controller/iofog-controller.log { - rotate 10 - size 100m + daily + rotate 50 + size 100M compress notifempty missingok + create 0644 10000 10000 + nodateext postrotate if [ -f /home/runner/iofog-controller.pid ]; then kill -HUP `cat /home/runner/iofog-controller.pid`; diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index e9bec9fb..cd564905 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -913,6 +913,11 @@ async function rebuildMicroserviceEndPoint (microserviceUuid, isCLI, transaction uuid: microserviceUuid } + const check = await MicroserviceManager.findOneWithCategory(query, transaction) + if (check.catalogItem && check.catalogItem.category === 'SYSTEM') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) + } + const microservice = await MicroserviceManager.updateAndFind(query, { rebuild: true }, transaction) if (!microservice) { From d3c2016860bccbe36ab15bd294f658e6dfa0992f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Tue, 22 Apr 2025 18:06:16 +0300 Subject: [PATCH 04/25] agent static accestoken replaced with ed25519 keypair and jwt. new config import mechanism added. noon-user-auth option for dev deployments. logging improved. logrotation handled internally --- .dockerignore | 4 +- .gitignore | 4 +- Dockerfile | 4 +- docs/swagger.yaml | 6 +- package-lock.json | 1775 ++++++++++++++++- package.json | 18 +- scripts/postinstall.js | 2 +- scripts/start-dev.js | 38 +- src/cli/config.js | 46 +- src/cli/start.js | 17 +- src/config/constants.js | 31 - src/config/controller.yaml | 111 ++ src/config/default.json | 39 - src/config/development.json | 63 - src/config/env-mapping.js | 79 + src/config/index.js | 153 +- src/config/keycloak.js | 89 +- src/config/production.json | 56 - src/config/telemetry.js | 78 + src/config/test.json | 19 - src/data/managers/fog-used-token-manager.js | 101 + src/data/managers/iofog-manager.js | 14 - src/data/managers/iofog-public-key-manager.js | 89 + src/data/migrations/db_migration_v1.0.2.sql | 27 +- src/data/models/fog.js | 10 + src/data/models/fogUsedToken.js | 42 + src/data/models/fogpublickey.js | 42 + src/data/models/index.js | 9 +- src/data/providers/database-factory.js | 2 +- src/data/providers/database-provider.js | 34 +- src/data/providers/mysql.js | 2 +- src/data/providers/postgres.js | 2 +- src/data/providers/sqlite.js | 2 +- src/decorators/authorization-decorator.js | 67 +- src/helpers/app-helper.js | 2 +- src/init.js | 46 + src/jobs/fog-status-job.js | 4 +- src/jobs/stopped-app-status-job.js | 2 +- src/logger/index.js | 203 +- src/main.js | 13 +- src/schemas/config.js | 2 +- src/server.js | 425 ++-- src/services/agent-service.js | 15 +- src/services/cleanup-service.js | 34 + src/services/diagnostic-service.js | 4 +- src/services/iofog-key-service.js | 130 ++ src/services/iofog-service.js | 4 +- src/services/microservice-ports/default.js | 2 +- src/services/microservices-service.js | 13 +- src/services/tunnel-service.js | 10 +- src/services/user-service.js | 268 ++- src/utils/ssl-utils.js | 76 + test/OTEL/README.md | 50 + test/OTEL/docker-compose.yml | 65 + test/OTEL/otel-collector-config.yaml | 39 + test/OTEL/prometheus.yml | 8 + 56 files changed, 3694 insertions(+), 796 deletions(-) delete mode 100644 src/config/constants.js create mode 100644 src/config/controller.yaml delete mode 100644 src/config/default.json delete mode 100644 src/config/development.json create mode 100644 src/config/env-mapping.js delete mode 100644 src/config/production.json create mode 100644 src/config/telemetry.js delete mode 100644 src/config/test.json create mode 100644 src/data/managers/fog-used-token-manager.js create mode 100644 src/data/managers/iofog-public-key-manager.js create mode 100644 src/data/models/fogUsedToken.js create mode 100644 src/data/models/fogpublickey.js create mode 100644 src/init.js create mode 100644 src/services/cleanup-service.js create mode 100644 src/services/iofog-key-service.js create mode 100644 src/utils/ssl-utils.js create mode 100644 test/OTEL/README.md create mode 100644 test/OTEL/docker-compose.yml create mode 100644 test/OTEL/otel-collector-config.yaml create mode 100644 test/OTEL/prometheus.yml diff --git a/.dockerignore b/.dockerignore index 10f38463..72c4d854 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,4 +15,6 @@ node_modules /src/config/*-config.json .DS_Store iofogcontroller-*.tgz -iofog-iofogcontroller-*.tgz \ No newline at end of file +iofog-iofogcontroller-*.tgz +.env +src/iofog-controller.pid \ No newline at end of file diff --git a/.gitignore b/.gitignore index a80448af..76ea49d7 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,6 @@ node_modules iofogcontroller-*.tgz diagnostic/ iofog-iofogcontroller-*.tgz -.npmrc \ No newline at end of file +.npmrc +.env +src/iofog-controller.pid \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 7e3af988..ef8c1a33 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,9 +27,7 @@ FROM registry.access.redhat.com/ubi9/nodejs-20-minimal:latest USER root # Install dependencies for logging and development -RUN microdnf install -y logrotate g++ make && microdnf clean all - -COPY logrotate.conf /etc/logrotate.d/iofog-controller +RUN microdnf install -y g++ make && microdnf clean all # Install Python and pip RUN microdnf install -y python3 && \ diff --git a/docs/swagger.yaml b/docs/swagger.yaml index c0f0cfc5..93ce06d6 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -3191,9 +3191,9 @@ components: scheme: bearer bearerFormat: JWT agentToken: - type: apiKey - in: header - name: Authorization + type: http + scheme: bearer + bearerFormat: JWT requestBodies: UpdateIOFogNodeRequestBody: content: diff --git a/package-lock.json b/package-lock.json index 52b82735..cbfa99de 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,22 @@ { "name": "@datasance/iofogcontroller", - "version": "3.4.11", + "version": "3.5.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.4.11", + "version": "3.5.0", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "0.4.4", + "@datasance/ecn-viewer": "0.5.3", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", + "@opentelemetry/instrumentation-express": "^0.48.1", + "@opentelemetry/instrumentation-http": "^0.200.0", + "@opentelemetry/resources": "^1.8.0", + "@opentelemetry/sdk-node": "^0.200.0", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", @@ -20,6 +26,7 @@ "cookie-parser": "1.4.7", "cors": "2.8.5", "daemonize2": "0.4.2", + "dotenv": "^16.5.0", "ejs": "3.1.10", "express": "4.21.2", "express-session": "1.18.1", @@ -29,6 +36,7 @@ "helmet": "7.1.0", "https": "1.0.0", "is-elevated": "3.0.0", + "jose": "^4.15.9", "js-yaml": "4.1.0", "jsonschema": "1.4.1", "keycloak-connect": "^26.1.1", @@ -40,6 +48,7 @@ "mysql2": "3.10.1", "nconf": "0.12.1", "node-fetch-npm": "^2.0.4", + "node-schedule": "^2.1.1", "os": "0.1.2", "path": "0.12.7", "pg": "8.12.0", @@ -472,9 +481,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.4.4.tgz", - "integrity": "sha512-n6ZAmPlOMTch1RBAF44QBO6hos//Q2IqNxtLY8D8Q8kuvPTMhVc1X/zWNLjHiHAsjIfmDFBJFYW6d7VhyEc1wg==" + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.5.3.tgz", + "integrity": "sha512-jjfnn9zPK5OmRVbAOdfaB/jIrKA4w/RsSNK2fbeQeGhq01deJQJXmbHqutdskdyQpJKdwVWpM3mWAIe2nimOCg==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", @@ -690,6 +699,89 @@ "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", "optional": true }, + "node_modules/@grpc/grpc-js": { + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.3.tgz", + "integrity": "sha512-FTXHdOoPbZrBjlVLHuKbDZnsTxXv2BlHF57xw6LuThXacXvtkahEPED0CKMk6obZDf65Hv4k3z62eyPNpvinIg==", + "dependencies": { + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.7.14", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.14.tgz", + "integrity": "sha512-oS0FyK8eGNBJC6aB/qsS4LOxCYQlBniNzp6W8IdjlRVRGs0FOK9dS84OV+kXGaZf8Ozeos8fbUMJUGGzSpOCzQ==", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.2.5", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@grpc/proto-loader/node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@grpc/proto-loader/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@grpc/proto-loader/node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@grpc/proto-loader/node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -840,151 +932,1414 @@ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, "dependencies": { - "sprintf-js": "~1.0.2" + "sprintf-js": "~1.0.2" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@one-ini/wasm": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", + "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", + "dev": true + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-2.0.0.tgz", + "integrity": "sha512-IEkJGzK1A9v3/EHjXh3s2IiFc6L4jfK+lNgKVgUjeUJQRRhnVFMIO3TAvKwonm9O1HebCuoOt98v8bZW7oVQHA==", + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.30.1.tgz", + "integrity": "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-grpc": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-grpc/-/exporter-logs-otlp-grpc-0.200.0.tgz", + "integrity": "sha512-+3MDfa5YQPGM3WXxW9kqGD85Q7s9wlEMVNhXXG7tYFLnIeaseUt9YtCeFhEDFzfEktacdFpOtXmJuNW8cHbU5A==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-grpc-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/sdk-logs": "0.200.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-grpc/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-http/-/exporter-logs-otlp-http-0.200.0.tgz", + "integrity": "sha512-KfWw49htbGGp9s8N4KI8EQ9XuqKJ0VG+yVYVYFiCYSjEV32qpQ5qZ9UZBzOZ6xRb+E16SXOSCT3RkqBVSABZ+g==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/sdk-logs": "0.200.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-http/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-http/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-proto/-/exporter-logs-otlp-proto-0.200.0.tgz", + "integrity": "sha512-GmahpUU/55hxfH4TP77ChOfftADsCq/nuri73I/AVLe2s4NIglvTsaACkFVZAVmnXXyPS00Fk3x27WS3yO07zA==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-logs": "0.200.0", + "@opentelemetry/sdk-trace-base": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-grpc/-/exporter-metrics-otlp-grpc-0.200.0.tgz", + "integrity": "sha512-uHawPRvKIrhqH09GloTuYeq2BjyieYHIpiklOvxm9zhrCL2eRsnI/6g9v2BZTVtGp8tEgIa7rCQ6Ltxw6NBgew==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/exporter-metrics-otlp-http": "0.200.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-grpc-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/exporter-metrics-otlp-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", + "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/sdk-metrics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-proto/-/exporter-metrics-otlp-proto-0.200.0.tgz", + "integrity": "sha512-E+uPj0yyvz81U9pvLZp3oHtFrEzNSqKGVkIViTQY1rH3TOobeJPSpLnTVXACnCwkPR5XeTvPnK3pZ2Kni8AFMg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/exporter-metrics-otlp-http": "0.200.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/exporter-metrics-otlp-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", + "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/sdk-metrics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.200.0.tgz", + "integrity": "sha512-ZYdlU9r0USuuYppiDyU2VFRA0kFl855ylnb3N/2aOlXrbA4PMCznen7gmPbetGQu7pz8Jbaf4fwvrDnVdQQXSw==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/sdk-metrics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-grpc": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-grpc/-/exporter-trace-otlp-grpc-0.200.0.tgz", + "integrity": "sha512-hmeZrUkFl1YMsgukSuHCFPYeF9df0hHoKeHUthRKFCxiURs+GwF1VuabuHmBMZnjTbsuvNjOB+JSs37Csem/5Q==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-grpc-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-grpc/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-grpc/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.200.0.tgz", + "integrity": "sha512-Goi//m/7ZHeUedxTGVmEzH19NgqJY+Bzr6zXo1Rni1+hwqaksEyJ44gdlEMREu6dzX1DlAaH/qSykSVzdrdafA==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-http/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-http/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-proto": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-proto/-/exporter-trace-otlp-proto-0.200.0.tgz", + "integrity": "sha512-V9TDSD3PjK1OREw2iT9TUTzNYEVWJk4Nhodzhp9eiz4onDMYmPy3LaGbPv81yIR6dUb/hNp/SIhpiCHwFUq2Vg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-proto/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-proto/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-zipkin": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-zipkin/-/exporter-zipkin-2.0.0.tgz", + "integrity": "sha512-icxaKZ+jZL/NHXX8Aru4HGsrdhK0MLcuRXkX5G5IRmCgoRLw+Br6I/nMVozX2xjGGwV7hw2g+4Slj8K7s4HbVg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express": { + "version": "0.48.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-express/-/instrumentation-express-0.48.1.tgz", + "integrity": "sha512-j8NYOf9DRWtchbWor/zA0poI42TpZG9tViIKA0e1lC+6MshTqSJYtgNv8Fn1sx1Wn/TRyp+5OgSXiE4LDfvpEg==", + "dependencies": { + "@opentelemetry/core": "^2.0.0", + "@opentelemetry/instrumentation": "^0.200.0", + "@opentelemetry/semantic-conventions": "^1.27.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/instrumentation": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", + "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "shimmer": "^1.2.1" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express/node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/@opentelemetry/instrumentation-express/node_modules/import-in-the-middle": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", + "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/@opentelemetry/instrumentation-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.200.0.tgz", + "integrity": "sha512-9tqGbCJikhYU68y3k9mi6yWsMyMeCcwoQuHvIXan5VvvPPQ5WIZaV6Mxu/MCVe4swRNoFs8Th+qyj0TZV5ELvw==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/instrumentation": "0.200.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + "forwarded-parse": "2.1.2" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/instrumentation": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", + "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "shimmer": "^1.2.1" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/@opentelemetry/instrumentation-http/node_modules/import-in-the-middle": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", + "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/@opentelemetry/otlp-exporter-base": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.200.0.tgz", + "integrity": "sha512-IxJgA3FD7q4V6gGq4bnmQM5nTIyMDkoGFGrBrrDjB6onEiq1pafma55V+bHvGYLWvcqbBbRfezr1GED88lacEQ==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-transformer": "0.200.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-exporter-base/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/otlp-grpc-exporter-base": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-grpc-exporter-base/-/otlp-grpc-exporter-base-0.200.0.tgz", + "integrity": "sha512-CK2S+bFgOZ66Bsu5hlDeOX6cvW5FVtVjFFbWuaJP0ELxJKBB6HlbLZQ2phqz/uLj1cWap5xJr/PsR3iGoB7Vqw==", + "dependencies": { + "@grpc/grpc-js": "^1.7.1", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-grpc-exporter-base/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.200.0.tgz", + "integrity": "sha512-+9YDZbYybOnv7sWzebWOeK6gKyt2XE7iarSyBFkwwnP559pEevKOUD8NyDHhRjCSp13ybh9iVXlMfcj/DwF/yw==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-logs": "0.200.0", + "@opentelemetry/sdk-metrics": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0", + "protobufjs": "^7.3.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/sdk-metrics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-b3": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-2.0.0.tgz", + "integrity": "sha512-blx9S2EI49Ycuw6VZq+bkpaIoiJFhsDuvFGhBIoH3vJ5oYjJ2U0s3fAM5jYft99xVIAv6HqoPtlP9gpVA2IZtA==", + "dependencies": { + "@opentelemetry/core": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-b3/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-jaeger": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-2.0.0.tgz", + "integrity": "sha512-Mbm/LSFyAtQKP0AQah4AfGgsD+vsZcyreZoQ5okFBk33hU7AquU4TltgyL9dvaO8/Zkoud8/0gEvwfOZ5d7EPA==", + "dependencies": { + "@opentelemetry/core": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-jaeger/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.30.1.tgz", + "integrity": "sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==", + "dependencies": { + "@opentelemetry/core": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", + "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/sdk-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.200.0.tgz", + "integrity": "sha512-VZG870063NLfObmQQNtCVcdXXLzI3vOjjrRENmU37HYiPFa0ZXpXVDsTD02Nh3AT3xYJzQaWKl2X2lQ2l7TWJA==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.4.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "dependencies": { + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-node": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.200.0.tgz", + "integrity": "sha512-S/YSy9GIswnhYoDor1RusNkmRughipvTCOQrlF1dzI70yQaf68qgf5WMnzUxdlCl3/et/pvaO75xfPfuEmCK5A==", + "dependencies": { + "@opentelemetry/api-logs": "0.200.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/exporter-logs-otlp-grpc": "0.200.0", + "@opentelemetry/exporter-logs-otlp-http": "0.200.0", + "@opentelemetry/exporter-logs-otlp-proto": "0.200.0", + "@opentelemetry/exporter-metrics-otlp-grpc": "0.200.0", + "@opentelemetry/exporter-metrics-otlp-http": "0.200.0", + "@opentelemetry/exporter-metrics-otlp-proto": "0.200.0", + "@opentelemetry/exporter-prometheus": "0.200.0", + "@opentelemetry/exporter-trace-otlp-grpc": "0.200.0", + "@opentelemetry/exporter-trace-otlp-http": "0.200.0", + "@opentelemetry/exporter-trace-otlp-proto": "0.200.0", + "@opentelemetry/exporter-zipkin": "2.0.0", + "@opentelemetry/instrumentation": "0.200.0", + "@opentelemetry/propagator-b3": "2.0.0", + "@opentelemetry/propagator-jaeger": "2.0.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-logs": "0.200.0", + "@opentelemetry/sdk-metrics": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0", + "@opentelemetry/sdk-trace-node": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { - "node": ">=8" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/exporter-metrics-otlp-http": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", + "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" + "@opentelemetry/core": "2.0.0", + "@opentelemetry/otlp-exporter-base": "0.200.0", + "@opentelemetry/otlp-transformer": "0.200.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/sdk-metrics": "2.0.0" }, - "bin": { - "js-yaml": "bin/js-yaml.js" + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/instrumentation": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", + "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", "dependencies": { - "p-locate": "^4.1.0" + "@opentelemetry/api-logs": "0.200.0", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "shimmer": "^1.2.1" }, "engines": { - "node": ">=8" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", "dependencies": { - "p-try": "^2.0.0" + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { - "node": ">=6" + "node": "^18.19.0 || >=20.6.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/sdk-metrics": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", "dependencies": { - "p-limit": "^2.2.0" + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" }, "engines": { - "node": ">=8" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.9.0 <1.10.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true, + "node_modules/@opentelemetry/sdk-node/node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "bin": { + "acorn": "bin/acorn" + }, "engines": { - "node": ">=8" + "node": ">=0.4.0" } }, - "node_modules/@istanbuljs/load-nyc-config/node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true + "node_modules/@opentelemetry/sdk-node/node_modules/import-in-the-middle": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", + "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } }, - "node_modules/@istanbuljs/schema": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", - "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", - "dev": true, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.0.0.tgz", + "integrity": "sha512-qQnYdX+ZCkonM7tA5iU4fSRsVxbFGml8jbxOgipRGMFHKaXKHQ30js03rTobYjKjIfnOsZSbHKWF0/0v0OQGfw==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, "engines": { - "node": ">=8" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", - "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", - "dev": true, + "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.24" + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { - "node": ">=6.0.0" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, + "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "dependencies": { + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" + }, "engines": { - "node": ">=6.0.0" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "dev": true, + "node_modules/@opentelemetry/sdk-trace-node": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-2.0.0.tgz", + "integrity": "sha512-omdilCZozUjQwY3uZRBwbaRMJ3p09l4t187Lsdf0dGMye9WKD4NGcpgZRvqhI1dwcH6og+YXQEtoO9Wx3ykilg==", + "dependencies": { + "@opentelemetry/context-async-hooks": "2.0.0", + "@opentelemetry/core": "2.0.0", + "@opentelemetry/sdk-trace-base": "2.0.0" + }, "engines": { - "node": ">=6.0.0" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", - "dev": true - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", - "dev": true, + "node_modules/@opentelemetry/sdk-trace-node/node_modules/@opentelemetry/core": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", "dependencies": { - "@jridgewell/resolve-uri": "^3.1.0", - "@jridgewell/sourcemap-codec": "^1.4.14" + "@opentelemetry/semantic-conventions": "^1.29.0" + }, + "engines": { + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@one-ini/wasm": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", - "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", - "dev": true + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.32.0.tgz", + "integrity": "sha512-s0OpmpQFSfMrmedAn9Lhg4KWJELHCU6uU9dtIJ28N8UGhf9Y55im5X8fEzwhwDwiSqN+ZPSNrDJF7ivf/AuRPQ==", + "engines": { + "node": ">=14" + } }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", @@ -1037,6 +2392,60 @@ "node": "*" } }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, "node_modules/@rushstack/node-core-library": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/@rushstack/node-core-library/-/node-core-library-4.3.0.tgz", @@ -1352,6 +2761,11 @@ "undici-types": "~5.26.4" } }, + "node_modules/@types/shimmer": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@types/shimmer/-/shimmer-1.2.0.tgz", + "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==" + }, "node_modules/@types/superagent": { "version": "4.1.13", "resolved": "https://registry.npmjs.org/@types/superagent/-/superagent-4.1.13.tgz", @@ -1409,7 +2823,6 @@ "version": "8.11.3", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", - "dev": true, "bin": { "acorn": "bin/acorn" }, @@ -1417,6 +2830,14 @@ "node": ">=0.4.0" } }, + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", + "peerDependencies": { + "acorn": "^8" + } + }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -2544,6 +3965,11 @@ "deprecated": "CircularJSON is in maintenance only, flatted is its successor.", "dev": true }, + "node_modules/cjs-module-lexer": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", + "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==" + }, "node_modules/clean-stack": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", @@ -2928,6 +4354,17 @@ "node": ">= 0.10" } }, + "node_modules/cron-parser": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/cron-parser/-/cron-parser-4.9.0.tgz", + "integrity": "sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==", + "dependencies": { + "luxon": "^3.2.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -3338,6 +4775,17 @@ "node": ">=0.10.0" } }, + "node_modules/dotenv": { + "version": "16.5.0", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.5.0.tgz", + "integrity": "sha512-m/C+AwOAr9/W1UOIZUo232ejMNnJAJtYQjUbHoNTBNTJSvqzzDh7vnrei3o3r3m9blf6ZoDkvcw0VmozNRFJxg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, "node_modules/dottie": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/dottie/-/dottie-2.0.6.tgz", @@ -4830,6 +6278,11 @@ "node": ">= 0.6" } }, + "node_modules/forwarded-parse": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/forwarded-parse/-/forwarded-parse-2.1.2.tgz", + "integrity": "sha512-alTFZZQDKMporBH77856pXgzhEzaUVmLCDk+egLgIgHst3Tpndzz8MnKe+GzRJRfvVdn69HhpW7cmXzvtLvJAw==" + }, "node_modules/fresh": { "version": "0.5.2", "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", @@ -6603,10 +8056,9 @@ "integrity": "sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==" }, "node_modules/jose": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/jose/-/jose-5.6.3.tgz", - "integrity": "sha512-1Jh//hEEwMhNYPDDLwXHa2ePWgWiFNNUadVmguAAw2IJ6sj9mNxV5tGXJNqlMkJAybF6Lgw1mISDxTePP/187g==", - "dev": true, + "version": "4.15.9", + "resolved": "https://registry.npmjs.org/jose/-/jose-4.15.9.tgz", + "integrity": "sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==", "funding": { "url": "https://github.com/sponsors/panva" } @@ -7066,6 +8518,11 @@ "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" }, + "node_modules/long-timeout": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/long-timeout/-/long-timeout-0.1.1.tgz", + "integrity": "sha512-BFRuQUqc7x2NWxfJBCyUrN8iYUYznzL9JROmRz1gZ6KlOIgmoD+njPVbb+VNn2nGMKggMsK79iUNErillsrx7w==" + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -7104,6 +8561,14 @@ "es5-ext": "~0.10.2" } }, + "node_modules/luxon": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.6.1.tgz", + "integrity": "sha512-tJLxrKJhO2ukZ5z0gyjY1zPh3Rh88Ej9P7jNrZiHMUXHae1yvI2imgOZtL1TO8TW6biMMKfTtAOoEJANgtWBMQ==", + "engines": { + "node": ">=12" + } + }, "node_modules/make-dir": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", @@ -7559,6 +9024,11 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, + "node_modules/module-details-from-path": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.3.tgz", + "integrity": "sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A==" + }, "node_modules/moment": { "version": "2.30.1", "resolved": "https://registry.npmjs.org/moment/-/moment-2.30.1.tgz", @@ -7904,6 +9374,19 @@ "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", "dev": true }, + "node_modules/node-schedule": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/node-schedule/-/node-schedule-2.1.1.tgz", + "integrity": "sha512-OXdegQq03OmXEjt2hZP33W2YPs/E5BcFQks46+G2gAxs4gHOIVD1u7EqlYLYSKsaIpyKCK9Gbk0ta1/gjRSMRQ==", + "dependencies": { + "cron-parser": "^4.2.0", + "long-timeout": "0.1.1", + "sorted-array-functions": "^1.3.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -9242,6 +10725,15 @@ "node": ">=16" } }, + "node_modules/postman-runtime/node_modules/jose": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/jose/-/jose-5.6.3.tgz", + "integrity": "sha512-1Jh//hEEwMhNYPDDLwXHa2ePWgWiFNNUadVmguAAw2IJ6sj9mNxV5tGXJNqlMkJAybF6Lgw1mISDxTePP/187g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, "node_modules/postman-sandbox": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/postman-sandbox/-/postman-sandbox-5.1.1.tgz", @@ -9407,6 +10899,29 @@ "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", "dev": true }, + "node_modules/protobufjs": { + "version": "7.5.0", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.0.tgz", + "integrity": "sha512-Z2E/kOY1QjoMlCytmexzYfDm/w5fKAiRwpSzGtdnXW1zC88Z2yXazHHrOtwCzn+7wSxyE8PYM4rvVcMphF9sOA==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -9924,6 +11439,40 @@ "node": ">=0.10.0" } }, + "node_modules/require-in-the-middle": { + "version": "7.5.2", + "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-7.5.2.tgz", + "integrity": "sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==", + "dependencies": { + "debug": "^4.3.5", + "module-details-from-path": "^1.0.3", + "resolve": "^1.22.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/require-in-the-middle/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/require-in-the-middle/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, "node_modules/require-main-filename": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", @@ -10500,6 +12049,11 @@ "node": ">=8" } }, + "node_modules/shimmer": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", + "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==" + }, "node_modules/side-channel": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", @@ -10663,6 +12217,11 @@ "atomic-sleep": "^1.0.0" } }, + "node_modules/sorted-array-functions": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/sorted-array-functions/-/sorted-array-functions-1.3.0.tgz", + "integrity": "sha512-2sqgzeFlid6N4Z2fUQ1cvFmTOLRi/sEDzSQ0OKYchqgoPmQBVyM3959qYx3fpS6Esef80KjmpgPeEr028dP3OA==" + }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", diff --git a/package.json b/package.json index 271cb871..4b6212bb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.4.11", + "version": "3.5.0", "description": "ioFog Controller project for Eclipse IoFog @ iofog.org \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "0.4.4", + "@datasance/ecn-viewer": "0.5.3", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", @@ -65,6 +65,7 @@ "cookie-parser": "1.4.7", "cors": "2.8.5", "daemonize2": "0.4.2", + "dotenv": "^16.5.0", "ejs": "3.1.10", "express": "4.21.2", "express-session": "1.18.1", @@ -74,6 +75,7 @@ "helmet": "7.1.0", "https": "1.0.0", "is-elevated": "3.0.0", + "jose": "^4.15.9", "js-yaml": "4.1.0", "jsonschema": "1.4.1", "keycloak-connect": "^26.1.1", @@ -85,6 +87,7 @@ "mysql2": "3.10.1", "nconf": "0.12.1", "node-fetch-npm": "^2.0.4", + "node-schedule": "^2.1.1", "os": "0.1.2", "path": "0.12.7", "pg": "8.12.0", @@ -98,7 +101,13 @@ "string-format": "2.0.0", "umzug": "^3.7.0", "underscore": "1.13.6", - "xss-clean": "0.1.1" + "xss-clean": "0.1.1", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/resources": "^1.8.0", + "@opentelemetry/sdk-node": "^0.200.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", + "@opentelemetry/instrumentation-http": "^0.200.0", + "@opentelemetry/instrumentation-express": "^0.48.1" }, "devDependencies": { "acorn": "8.11.3", @@ -127,5 +136,6 @@ ".eslintrc.js", ".jshintrc", ".snyk" - ] + ], + "type": "commonjs" } diff --git a/scripts/postinstall.js b/scripts/postinstall.js index 9884ab3b..851c0062 100644 --- a/scripts/postinstall.js +++ b/scripts/postinstall.js @@ -168,7 +168,7 @@ function updateEncryptionMethod () { function updateLogName () { console.log(' updating log name in ') - const dirname = config.get('Service:LogsDirectory') + const dirname = config.get('log.directory') if (fs.existsSync(dirname)) { fs.readdirSync(dirname).forEach((file) => { diff --git a/scripts/start-dev.js b/scripts/start-dev.js index 3f852f46..996af7f9 100644 --- a/scripts/start-dev.js +++ b/scripts/start-dev.js @@ -12,21 +12,43 @@ */ const execSync = require('child_process').execSync - +const path = require('path') +const fs = require('fs') const { setDbEnvVars } = require('./util') function startDev () { + // Load .env file if it exists + const envPath = path.resolve(process.cwd(), '.env') + let envVars = {} + + if (fs.existsSync(envPath)) { + const envContent = fs.readFileSync(envPath, 'utf8') + envContent.split('\n').forEach(line => { + line = line.trim() + if (line && !line.startsWith('#')) { + const [key, value] = line.split('=').map(str => str.trim()) + if (key && value) { + envVars[key] = value + } + } + }) + } else { + } + + // Create a new environment object with all variables + const newEnv = { + ...process.env, // Include existing environment variables + ...envVars, // Override with .env variables + 'NODE_ENV': 'development', + 'PATH': process.env.PATH + } + + // Apply database environment variables const options = { - env: { - 'NODE_ENV': 'development', - 'VIEWER_PORT': '8008', - 'PATH': process.env.PATH - }, + env: setDbEnvVars(newEnv), stdio: [process.stdin, process.stdout, process.stderr] } - options.env = setDbEnvVars(options.env) - execSync('node ./src/main.js start', options) } diff --git a/src/cli/config.js b/src/cli/config.js index 3c5af3cd..6492c4b0 100644 --- a/src/cli/config.js +++ b/src/cli/config.js @@ -142,59 +142,59 @@ const _executeCase = async function (catalogCommand, commandName, f) { const _addConfigOption = async function (options) { await Validator.validate(options, Validator.schemas.configUpdate) - await updateConfig(options.port, 'port', 'Server:Port', async (onSuccess) => { + await updateConfig(options.port, 'port', 'server.port', async (onSuccess) => { const port = options.port const status = await AppHelper.checkPortAvailability(port) if (status === 'closed') { - config.set('Server:Port', port) + config.set('server.port', port) onSuccess() } else { logger.error(AppHelper.formatMessage(ErrorMessages.PORT_NOT_AVAILABLE, port)) } }) - await updateConfig(options.sslCert, 'ssl-cert', 'Server:SslCert', (onSuccess) => { + await updateConfig(options.sslCert, 'ssl-cert', 'server.ssl.path.cert', (onSuccess) => { const sslCert = options.sslCert if (!AppHelper.isFileExists(sslCert)) { logger.error(ErrorMessages.INVALID_FILE_PATH) return } - config.set('Server:SslCert', sslCert) + config.set('server.ssl.path.cert', sslCert) onSuccess() }) - await updateConfig(options.sslKey, 'ssl-key', 'Server:SslKey', (onSuccess) => { + await updateConfig(options.sslKey, 'ssl-key', 'server.ssl.path.key', (onSuccess) => { const sslKey = options.sslKey if (!AppHelper.isFileExists(sslKey)) { logger.error(ErrorMessages.INVALID_FILE_PATH) return } - config.set('Server:SslKey', sslKey) + config.set('server.ssl.path.key', sslKey) onSuccess() }) - await updateConfig(options.intermediateCert, 'intermediate-cert', 'Server:IntermediateCert', (onSuccess) => { + await updateConfig(options.intermediateCert, 'intermediate-cert', 'server.ssl.path.intermediateCert', (onSuccess) => { const intermediateCert = options.intermediateCert if (!AppHelper.isFileExists(intermediateCert)) { logger.error(ErrorMessages.INVALID_FILE_PATH) return } - config.set('Server:IntermediateCert', intermediateCert) + config.set('server.ssl.path.intermediateCert', intermediateCert) onSuccess() }) - await updateConfig(options.logDir, 'log-dir', 'Service:LogsDirectory', (onSuccess) => { - config.set('Service:LogsDirectory', options.logDir) + await updateConfig(options.logDir, 'log-dir', 'log.directory', (onSuccess) => { + config.set('log.directory', options.logDir) onSuccess() }) - await updateConfig(options.logSize, 'log-size', 'Service:LogsFileSize', (onSuccess) => { - config.set('Service:LogsFileSize', options.logSize * 1024) + await updateConfig(options.logSize, 'log-size', 'log.fileSize', (onSuccess) => { + config.set('log.fileSize', options.logSize * 1024) onSuccess() }) - await updateConfig(options.logSize, 'log-file-counr', 'Service:LogsFileCount', (onSuccess) => { - config.set('Service:LogsFileCount', options.logFileCount) + await updateConfig(options.logFileCount, 'log-file-count', 'log.fileCount', (onSuccess) => { + config.set('log.fileCount', options.logFileCount) onSuccess() }) } @@ -215,14 +215,14 @@ const updateConfig = async function (newConfigValue, cliConfigName, configName, const _listConfigOptions = function () { const configuration = { - 'Port': config.get('Server:Port'), - 'SSL key directory': config.get('Server:SslKey'), - 'SSL certificate directory': config.get('Server:SslCert'), - 'Intermediate key directory': config.get('Server:IntermediateCert'), - 'Log files directory': config.get('Service:LogsDirectory'), - 'Log files size': config.get('Service:LogsFileSize'), - 'Log files count': config.get('Service:LogsFileCount'), - 'Dev mode': config.get('Server:DevMode') + 'Port': config.get('server.port'), + 'SSL key directory': config.get('server.ssl.path.key'), + 'SSL certificate directory': config.get('server.ssl.path.cert'), + 'Intermediate key directory': config.get('server.ssl.path.intermediateCert'), + 'Log files directory': config.get('log.directory'), + 'Log files size': config.get('log.fileSize'), + 'Log files count': config.get('log.fileCount'), + 'Dev mode': config.get('server.devMode') } const result = Object.keys(configuration) @@ -234,7 +234,7 @@ const _listConfigOptions = function () { const _changeDevModeState = async function (options) { const enableDevMode = AppHelper.validateBooleanCliOptions(options.on, options.off) - config.set('Server:DevMode', enableDevMode) + config.set('server.devMode', enableDevMode) logger.cliRes('Dev mode state updated successfully.') } diff --git a/src/cli/start.js b/src/cli/start.js index a54e2bee..0b169502 100644 --- a/src/cli/start.js +++ b/src/cli/start.js @@ -43,11 +43,11 @@ class Start extends BaseCLIHandler { daemon._options.silent = false } const configuration = { - devMode: config.get('Server:DevMode'), - port: config.get('Server:Port'), - sslKey: config.get('Server:SslKey'), - sslCert: config.get('Server:SslCert'), - intermedKey: config.get('Server:IntermediateCert') + devMode: config.get('server.devMode'), + port: config.get('server.port'), + sslKey: config.get('server.ssl.path.key'), + sslCert: config.get('server.ssl.path.cert'), + intermedKey: config.get('server.ssl.path.intermediateCert') } const pid = daemon.status() @@ -76,8 +76,7 @@ function checkDaemon (daemon, configuration) { iterationsCount++ const pid = daemon.status() if (pid === 0) { - logger.error('Error: port is probably allocated, or ssl_key or ssl_cert or intermediate_cert ' + - 'is either missing or invalid.') + logger.error('Error: port is probably allocated, or ssl_key or ssl_cert is either missing or invalid.') return reject(new Error('Error starting ioFog-Controller')) } @@ -95,8 +94,8 @@ function checkDaemon (daemon, configuration) { } function checkServerProtocol (configuration) { - const { devMode, port, sslKey, sslCert, intermedKey } = configuration - if (!devMode && sslKey && sslCert && intermedKey) { + const { devMode, port, sslKey, sslCert } = configuration + if (!devMode && sslKey && sslCert) { logger.cliRes(`==> 🌎 HTTPS server listening on port ${port}. Open up https://localhost:${port}/ in your browser.`) } else { logger.cliRes(`==> 🌎 Listening on port ${port}. Open up http://localhost:${port}/ in your browser.`) diff --git a/src/config/constants.js b/src/config/constants.js deleted file mode 100644 index 1167cf99..00000000 --- a/src/config/constants.js +++ /dev/null @@ -1,31 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -module.exports = { - 'App:Name': 'iofog-controller', - 'Viewer:Port': 8008, - - 'Server:Port': 51121, - 'Server:DevMode': false, - - 'Service:LogsDirectory': '/var/log/iofog-controller', - 'Service:LogsFileSize': 104857600, - 'Service:LogsFileCount': 10, - - 'Settings:DefaultJobIntervalSeconds': 120, - 'Settings:FogTokenExpirationIntervalSeconds': 3600, - 'Settings:FogStatusUpdateIntervalSeconds': 30, - 'Settings:FogStatusUpdateTolerance': 3, - - 'Diagnostics:DiagnosticDir': 'diagnostic' -} diff --git a/src/config/controller.yaml b/src/config/controller.yaml new file mode 100644 index 00000000..1ff9e67f --- /dev/null +++ b/src/config/controller.yaml @@ -0,0 +1,111 @@ +# Application Configuration +app: + name: pot-controller # Application name + controlPlane: Remote # Control plane type: Remote or Kubernetes or Local + +# Server Configuration +server: + port: 51121 # Server port number + devMode: true # Development mode flag + # ssl: + # path: + # key: "" # SSL key file path + # cert: "" # SSL certificate file path + # intermediateCert: "" # Intermediate certificate file path + # # base64: + # # key: # SSL key in base64 format + # # cert: # SSL certificate in base64 format + # # intermediateCert: # Intermediate certificate in base64 format + +# Viewer Configuration +viewer: + port: 8008 # Viewer port number + url: "" # Viewer URL + +# Logging Configuration +log: + level: info + directory: /var/log/iofog-controller # Log directory + fileSize: 1073741824 # Maximum log file size in bytes (1GB) + fileCount: 10 # Maximum number of log files + +# Settings Configuration +settings: + # defaultJobInterval: 120 # Default job interval in seconds + fogStatusUpdateInterval: 30 # Fog status update interval in seconds + fogStatusUpdateTolerance: 3 # Fog status update tolerance + +# Database Configuration +database: + provider: sqlite # Database provider (sqlite/mysql/postgres) + # mysql: + # host: "" # MySQL host + # port: 3306 # MySQL port + # username: "" # MySQL username + # password: "" # MySQL password + # database: "" # MySQL database name + # postgres: + # host: "" # PostgreSQL host + # port: 5432 # PostgreSQL port + # username: "" # PostgreSQL username + # password: "" # PostgreSQL password + # database: "" # PostgreSQL database name + sqlite: + databaseName: dev_database.sqlite # SQLite database file name + logging: false # Enable SQLite query logging + transactionType: IMMEDIATE # SQLite transaction type + pool: + maxActive: 1 # Maximum active connections + max: 1 # Maximum total connections + min: 0 # Minimum connections + idle: 20000 # Idle timeout in milliseconds + +# Auth Configuration +# auth: +# realm: # Keycloak realm +# realmKey: # Realm public key +# url: # Keycloak authentication server URL +# sslRequired: # SSL requirement level +# client: +# id: # ControllerClient ID +# secret: # ControllerClient Client secret +# viewerClient: # Viewer client ID + +# Public Ports Configuration +publicPorts: + range: "6001-7999" # Public ports range + +# System Images Configuration +systemImages: + router: + "1": "ghcr.io/datasance/router:latest" + "2": "ghcr.io/datasance/router:latest" + proxy: + "1": "ghcr.io/datasance/proxy:latest" + "2": "ghcr.io/datasance/proxy:latest" + +# Diagnostics Configuration +diagnostics: + directory: "diagnostic" # Diagnostics directory + + +# OpenTelemetry Configuration +# otel: +# enabled: false # true/disable OpenTelemetry +# serviceName: "pot-controller" # Service name for traces +# endpoint: "http://localhost:4318/v1/traces" # OTel endpoint +# protocol: http/protobuf # Exporter OTLP Protocol (grpc or http/protobuf) +# headers: "" # A list of headers to apply to all outgoing data (traces, metrics, and logs). +# resourceAttributes: "service.version=3.5.0,deployment.environment=production,team=devops" # Resource attributes +# metrics: +# exporter: otlp # Otel metrics exporter +# interval: 1000 # Metrics collection interval in ms +# logs: +# level: info # Log level +# propagators: "tracecontext,baggage" # Context propagation +# traces: +# sampler: "parentbased_traceidratio" # Sampler to be used for traces +# samplerArg: 0.1 +# batch: # Batch size and timeout for telemetry data +# size: 512 # Maximum batch size +# delay: 1000 # Delay interval (in milliseconds) between two consecutive exports diff --git a/src/config/default.json b/src/config/default.json deleted file mode 100644 index ff9ea967..00000000 --- a/src/config/default.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "App": { - "Name": "iofog-controller" - }, - "Viewer": { - "Port": 8008 - }, - "Server": { - "Port": 51121, - "DevMode": false - }, - "Service": { - "LogsDirectory": "/var/log/iofog-controller", - "LogsFileSize": 104857600, - "LogsFileCount": 10 - }, - "Settings": { - "DefaultJobIntervalSeconds": 120, - "FogTokenExpirationIntervalSeconds": 3600, - "FogStatusUpdateIntervalSeconds": 30, - "FogStatusUpdateTolerance": 3 - }, - "Diagnostics": { - "DiagnosticDir": "diagnostic" - }, - "PublicPorts": { - "Range": "6001-7999" - }, - "SystemImages": { - "Router": { - "1": "ghcr.io/datasance/router:latest", - "2": "ghcr.io/datasance/router:latest" - }, - "Proxy": { - "1": "ghcr.io/datasance/proxy:latest", - "2": "ghcr.io/datasance/proxy:latest" - } - } -} diff --git a/src/config/development.json b/src/config/development.json deleted file mode 100644 index 0fed8880..00000000 --- a/src/config/development.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "App": { - "Name": "iofog-controller-dev" - }, - "Viewer": { - "Port": 8008 - }, - "Server": { - "Port": 51121, - "DevMode": true - }, - "Service": { - "LogsDirectory": "/var/log/iofog-controller", - "LogsFileSize": 104857600, - "LogsFileCount": 10 - }, - "Settings": { - "FogTokenExpirationIntervalSeconds": 3600, - "FogStatusUpdateIntervalSeconds": 30, - "FogStatusUpdateTolerance": 3 - }, - "Tunnel": { - "Username": "username", - "Password": "password", - "Host": "23.253.111.231", - "RsaKey": "rsa", - "Lport": 22, - "PortRange": "2000-10000" - }, - "Diagnostics": { - "DiagnosticDir": "diagnostic" - }, - "Database": { - "Provider": "sqlite", - "Config": { - "sqlite": { - "databaseName": "dev_database.sqlite", - "logging": false, - "transactionType": "IMMEDIATE", - "pool": { - "maxactive": 1, - "max": 1, - "min": 0, - "idle": 20000 - } - }, - "mysql": { - "host": "", - "port": "", - "username": "", - "password": "", - "databaseName": "" - }, - "postgres": { - "host": "", - "port": "", - "username": "", - "password": "", - "databaseName": "" - } - } - } -} \ No newline at end of file diff --git a/src/config/env-mapping.js b/src/config/env-mapping.js new file mode 100644 index 00000000..5398bdf0 --- /dev/null +++ b/src/config/env-mapping.js @@ -0,0 +1,79 @@ +module.exports = { + // Application Configuration + 'APP_NAME': 'app.name', + 'CONTROL_PLANE': 'app.controlPlane', + + // Server Configuration + 'SERVER_PORT': 'server.port', + 'SERVER_DEV_MODE': 'server.devMode', + + // Viewer Configuration + 'VIEWER_PORT': 'viewer.port', + 'VIEWER_URL': 'viewer.url', + + // Logging Configuration + 'LOG_LEVEL': 'log.level', + 'LOG_DIRECTORY': 'log.directory', + 'LOG_FILE_SIZE': 'log.fileSize', + 'LOG_FILE_COUNT': 'log.fileCount', + + // Settings Configuration + 'FOG_STATUS_UPDATE_INTERVAL': 'settings.fogStatusUpdateInterval', + 'FOG_STATUS_UPDATE_TOLERANCE': 'settings.fogStatusUpdateTolerance', + + // Database Configuration + 'DB_PROVIDER': 'database.provider', + // These will map to the appropriate provider based on DB_PROVIDER + 'DB_HOST': { + path: (provider) => `database.${provider}.host` + }, + 'DB_PORT': { + path: (provider) => `database.${provider}.port` + }, + 'DB_USERNAME': { + path: (provider) => `database.${provider}.username` + }, + 'DB_PASSWORD': { + path: (provider) => `database.${provider}.password` + }, + 'DB_NAME': { + path: (provider) => `database.${provider}.databaseName` + }, + + // Auth Configuration + 'KC_REALM': 'auth.realm', + 'KC_REALM_KEY': 'auth.realmKey', + 'KC_URL': 'auth.url', + 'KC_SSL_REQ': 'auth.sslRequired', + 'KC_CLIENT': 'auth.client.id', + 'KC_CLIENT_SECRET': 'auth.client.secret', + 'KC_VIEWER_CLIENT': 'auth.viewerClient', + + // Public Ports Configuration + 'PUBLIC_PORTS_RANGE': 'publicPorts.range', + + // System Images Configuration + 'ROUTER_IMAGE_1': 'systemImages.router.1', + 'ROUTER_IMAGE_2': 'systemImages.router.2', + 'PROXY_IMAGE_1': 'systemImages.proxy.1', + 'PROXY_IMAGE_2': 'systemImages.proxy.2', + + // Diagnostics Configuration + 'DIAGNOSTICS_DIRECTORY': 'diagnostics.directory', + + // OpenTelemetry Configuration + 'ENABLE_TELEMETRY': 'otel.enabled', + 'OTEL_SERVICE_NAME': 'otel.serviceName', + 'OTEL_EXPORTER_OTLP_ENDPOINT': 'otel.endpoint', + 'OTEL_EXPORTER_OTLP_PROTOCOL': 'otel.protocol', + 'OTEL_EXPORTER_OTLP_HEADERS': 'otel.headers', + 'OTEL_RESOURCE_ATTRIBUTES': 'otel.resourceAttributes', + 'OTEL_METRICS_EXPORTER': 'otel.metrics.exporter', + 'OTEL_METRICS_INTERVAL': 'otel.metrics.interval', + 'OTEL_LOG_LEVEL': 'otel.logs.level', + 'OTEL_PROPAGATORS': 'otel.propagators', + 'OTEL_TRACES_SAMPLER': 'otel.traces.sampler', + 'OTEL_TRACES_SAMPLER_ARG': 'otel.traces.samplerArg', + 'OTEL_BATCH_SIZE': 'otel.batch.size', + 'OTEL_BATCH_DELAY': 'otel.batch.delay' +} diff --git a/src/config/index.js b/src/config/index.js index a2b6dc90..34cfadec 100644 --- a/src/config/index.js +++ b/src/config/index.js @@ -13,39 +13,158 @@ const nconf = require('nconf') const path = require('path') -const constants = require('./constants') +const fs = require('fs') +const yaml = require('js-yaml') class Config { constructor () { - nconf.env({ separator: '_' }) - const environment = nconf.get('NODE:ENV') || 'production' - this.load(environment) + this.envMapping = require('./env-mapping') + this.configPath = process.env.CONFIG_PATH || path.join(__dirname, 'controller.yaml') + this.config = null + this.load() } - get (key, defaultValue) { - let value = nconf.get(key) + load () { + // 1. Load YAML config file + this.loadYamlConfig() + + // 2. Set OTEL environment variables from config + this.setOtelEnvVars() + } + + loadYamlConfig () { + try { + console.log('Loading config from:', this.configPath) + const configContent = fs.readFileSync(this.configPath, 'utf8') + this.config = yaml.load(configContent) + + // Clear any existing configuration + nconf.reset() + + // Set the entire config as defaults + nconf.defaults(this.config) + + // Set environment variables + nconf.env({ + separator: '_', + parseValues: true, + transform: (obj) => { + // Skip OTEL environment variables as they are handled separately + if (obj.key.startsWith('OTEL_') || obj.key === 'ENABLE_TELEMETRY') { + return null + } + + const mapping = this.envMapping[obj.key] + if (!mapping) { + return null + } + + // Handle database configuration + if (typeof mapping === 'object' && mapping.path) { + const provider = this.get('database.provider', 'sqlite') + return { + key: mapping.path(provider), + value: this.parseEnvValue(obj.value) + } + } + + return { + key: mapping, + value: this.parseEnvValue(obj.value) + } + } + }) + + // Get all environment overrides + const envOverrides = nconf.get() + + // Create a deep copy of the base config + const finalConfig = JSON.parse(JSON.stringify(this.config)) + + // Merge environment overrides into the final config + Object.entries(envOverrides).forEach(([key, value]) => { + if (key.includes('.')) { + const keys = key.split('.') + let current = finalConfig + for (let i = 0; i < keys.length - 1; i++) { + if (!current[keys[i]]) { + current[keys[i]] = {} + } + current = current[keys[i]] + } + current[keys[keys.length - 1]] = value + } else if (!key.includes(':') && key !== 'type') { + finalConfig[key] = value + } + }) - if (value === undefined || value === null) { - value = constants[key] + // Remove any nconf internal keys and the type field + Object.keys(finalConfig).forEach(key => { + if (key.includes(':') || key === 'type') { + delete finalConfig[key] + } + }) + + // Reset nconf and set the final merged config + nconf.reset() + nconf.defaults(finalConfig) + // Log the final merged config + } catch (error) { + console.error(`Error loading config file: ${error.message}`) + throw error } + } - if (value === undefined || value === null) { - value = defaultValue + setOtelEnvVars () { + console.log('Setting OTEL environment variables from config...') + // Only set OTEL env vars if they're not already set + for (const [envVar, configPath] of Object.entries(this.envMapping)) { + if (envVar.startsWith('OTEL_') || envVar === 'ENABLE_TELEMETRY') { + const value = this.get(configPath) + if (value !== undefined && !process.env[envVar]) { + const formattedValue = this.formatValue(value) + process.env[envVar] = formattedValue + } + } } + } + parseEnvValue (value) { + // Handle different types + if (value === 'true') return true + if (value === 'false') return false + if (!isNaN(value) && value !== '') return Number(value) return value } - set (key, value) { - const environment = nconf.get('NODE:ENV') || 'production' + formatValue (value) { + if (typeof value === 'boolean') { + return value.toString() + } + if (Array.isArray(value)) { + return value.join(',') + } + if (typeof value === 'object') { + return Object.entries(value) + .map(([key, val]) => `${key}=${val}`) + .join(',') + } + return value.toString() + } + + get (key, defaultValue) { + // Replace dots with colons for nconf compatibility + const nconfKey = key.replace(/\./g, ':') + let value = nconf.get(nconfKey) + return value !== undefined ? value : defaultValue + } - nconf.stores[environment].set(key, value) - nconf.stores[environment].saveSync() + set (key, value) { + nconf.set(key, value) } - load (environment) { - nconf.file(environment, path.join(__dirname, environment.toLowerCase() + '.json')) - nconf.file('default', path.join(__dirname, 'default.json')) + getAll () { + return nconf.get() } } diff --git a/src/config/keycloak.js b/src/config/keycloak.js index d1dc8ca2..0f510064 100644 --- a/src/config/keycloak.js +++ b/src/config/keycloak.js @@ -1,16 +1,50 @@ const session = require('express-session') const Keycloak = require('keycloak-connect') +const config = require('./index') +const logger = require('../logger') + +// Mock Keycloak implementation for development mode +class MockKeycloak { + constructor () { + this.protect = (roles) => { + return async (req, res, next) => { + // In dev mode, we just add mock user info to the request + req.kauth = { + grant: { + access_token: { + content: { + preferred_username: 'dev-user', + realm_access: { + roles: ['SRE', 'Developer', 'Viewer'] + } + } + } + } + } + return next() + } + } + + // Add middleware method to match real Keycloak interface + this.middleware = () => { + return (req, res, next) => { + // In dev mode, we just pass through the middleware + return next() + } + } + } +} const keycloakConfig = { - realm: process.env.KC_REALM, - 'realm-public-key': process.env.KC_REALM_KEY, - 'auth-server-url': `${process.env.KC_URL}`, - 'ssl-required': process.env.KC_SSL_REQ, - resource: process.env.KC_CLIENT, + realm: process.env.KC_REALM || config.get('auth.realm'), + 'realm-public-key': process.env.KC_REALM_KEY || config.get('auth.realmKey'), + 'auth-server-url': process.env.KC_URL || config.get('auth.url'), + 'ssl-required': process.env.KC_SSL_REQ || config.get('auth.sslRequired'), + resource: process.env.KC_CLIENT || config.get('auth.client.id'), 'bearer-only': true, 'verify-token-audience': true, credentials: { - secret: process.env.KC_CLIENT_SECRET + secret: process.env.KC_CLIENT_SECRET || config.get('auth.client.secret') }, 'use-resource-role-mappings': true, 'confidential-port': 0 @@ -19,14 +53,51 @@ const keycloakConfig = { let keycloak let memoryStore +function isAuthConfigured () { + const requiredConfigs = [ + 'auth.realm', + 'auth.realmKey', + 'auth.url', + 'auth.client.id', + 'auth.client.secret' + ] + return requiredConfigs.every(configKey => { + const value = config.get(configKey) + return value !== undefined && value !== null && value !== '' + }) +} + function initKeycloak () { if (keycloak) { return keycloak + } + + const isDevMode = config.get('server.devMode', true) + const hasAuthConfig = isAuthConfigured() + + if (!hasAuthConfig && isDevMode) { + // Initialize mock Keycloak for development + keycloak = new MockKeycloak() + logger.warn('Keycloak initialized in development mode (no auth configuration)') + logger.warn('WARNING: All routes are unprotected in this mode') + } else if (!hasAuthConfig) { + // Throw error in production if auth not configured + const error = new Error('Auth configuration required in production mode') + logger.error('Failed to initialize Keycloak:', error) + throw error } else { - memoryStore = new session.MemoryStore() - keycloak = new Keycloak({ store: memoryStore }, keycloakConfig) - return keycloak + // Initialize real Keycloak + try { + memoryStore = new session.MemoryStore() + keycloak = new Keycloak({ store: memoryStore }, keycloakConfig) + logger.info('Keycloak initialized successfully with auth configuration') + } catch (error) { + logger.error('Error initializing Keycloak:', error) + throw error + } } + + return keycloak } function getKeycloak () { diff --git a/src/config/production.json b/src/config/production.json deleted file mode 100644 index 82bd7708..00000000 --- a/src/config/production.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "App": { - "Name": "iofog-controller" - }, - "Viewer": { - "Port": 8008 - }, - "Server": { - "Port": 51121, - "DevMode": true - }, - "Service": { - "LogsDirectory": "/var/log/iofog-controller", - "LogsFileSize": 104857600, - "LogsFileCount": 10 - }, - "Settings": { - "FogTokenExpirationIntervalSeconds": 3600, - "FogStatusUpdateIntervalSeconds": 30, - "FogStatusUpdateTolerance": 3 - }, - "PublicPorts": { - "Provider": "default" - }, - "Database": { - "Provider": "sqlite", - "Config": { - "sqlite": { - "databaseName": "prod_database.sqlite", - "logging": false, - "transactionType": "IMMEDIATE", - "pool": { - "maxactive": 1, - "max": 1, - "min": 0, - "idle": 20000 - } - }, - "mysql": { - "host": "", - "port": "", - "username": "", - "password": "", - "databaseName": "" - }, - "postgres": { - "host": "", - "port": "", - "username": "", - "password": "", - "databaseName": "" - } - } - } -} - diff --git a/src/config/telemetry.js b/src/config/telemetry.js new file mode 100644 index 00000000..dd969184 --- /dev/null +++ b/src/config/telemetry.js @@ -0,0 +1,78 @@ +const { NodeSDK } = require('@opentelemetry/sdk-node') +const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-http') +const { HttpInstrumentation } = require('@opentelemetry/instrumentation-http') +const { ExpressInstrumentation } = require('@opentelemetry/instrumentation-express') +const { + Resource, + envDetectorSync, + hostDetectorSync, + processDetectorSync +} = require('@opentelemetry/resources') +const logger = require('../logger') + +// Workaround for async attributes +function awaitAttributes (detector) { + return { + async detect (config) { + const resource = detector.detect(config) + if (resource.waitForAsyncAttributes) { + await resource.waitForAsyncAttributes() + } + return resource + } + } +} + +// Initialize OpenTelemetry +const sdk = new NodeSDK({ + serviceName: process.env.OTEL_SERVICE_NAME || 'pot-controller', + resource: new Resource({}), + resourceDetectors: [ + awaitAttributes(envDetectorSync), + awaitAttributes(processDetectorSync), + awaitAttributes(hostDetectorSync) + ], + traceExporter: new OTLPTraceExporter({ + url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'http://localhost:4318/v1/traces', + headers: {} + }), + instrumentations: [ + new HttpInstrumentation(), + new ExpressInstrumentation() + ] +}) + +// Start the SDK +async function startTelemetry () { + const isTelemetryEnabled = process.env.ENABLE_TELEMETRY === 'true' + if (!isTelemetryEnabled) { + logger.info('Telemetry is disabled via ENABLE_TELEMETRY environment variable') + return + } + + try { + await sdk.start() + logger.info('OpenTelemetry initialized successfully') + } catch (error) { + logger.error('Error initializing OpenTelemetry:', error) + process.exit(1) + } +} + +// Handle process termination +process.on('SIGTERM', () => { + if (process.env.ENABLE_TELEMETRY !== 'true') return + + try { + sdk.shutdown() + } catch (error) { + logger.error('Error terminating OpenTelemetry:', error) + } finally { + process.exit(0) + } +}) + +module.exports = { + sdk, + startTelemetry +} diff --git a/src/config/test.json b/src/config/test.json deleted file mode 100644 index 04863751..00000000 --- a/src/config/test.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Server": { - "DevMode": true - }, - "Database": { - "Provider": "sqlite", - "Config": { - "databaseName": "test_database.sqlite", - "logging": false, - "transactionType": "IMMEDIATE", - "pool": { - "maxactive": 1, - "max": 1, - "min": 0, - "idle": 20000 - } - } - } -} \ No newline at end of file diff --git a/src/data/managers/fog-used-token-manager.js b/src/data/managers/fog-used-token-manager.js new file mode 100644 index 00000000..a9274ded --- /dev/null +++ b/src/data/managers/fog-used-token-manager.js @@ -0,0 +1,101 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const models = require('../models') +const logger = require('../../logger') +const { Op } = require('sequelize') + +class FogUsedTokenManager { + /** + * Store a JTI (JWT ID) to mark it as used + * @param {string} jti - The JWT ID + * @param {string} fogUuid - The UUID of the fog node + * @param {number} exp - The expiration timestamp + * @param {Object} transaction - Sequelize transaction + * @returns {Promise} + */ + static async storeJti (jti, fogUuid, exp, transaction) { + try { + if (!transaction || transaction.fakeTransaction) { + // If no transaction or fake transaction, create a new one + await models.FogUsedToken.create({ + jti, + iofogUuid: fogUuid, + expiryTime: exp + }) + } else { + // Use the provided transaction + await models.FogUsedToken.create({ + jti, + iofogUuid: fogUuid, + expiryTime: exp + }, { transaction }) + } + } catch (error) { + logger.error(`Failed to store JTI: ${error.message}`) + throw error + } + } + + /** + * Check if a JTI has already been used + * @param {string} jti - The JWT ID to check + * @param {Object} transaction - Sequelize transaction + * @returns {Promise} True if the JTI has been used, false otherwise + */ + static async isJtiUsed (jti, transaction) { + try { + let token + if (!transaction || transaction.fakeTransaction) { + // If no transaction or fake transaction, query without transaction + token = await models.FogUsedToken.findOne({ + where: { jti } + }) + } else { + // Use the provided transaction + token = await models.FogUsedToken.findOne({ + where: { jti }, + transaction + }) + } + return !!token + } catch (error) { + logger.error(`Failed to check JTI: ${error.message}`) + throw error + } + } + + /** + * Clean up expired JTIs + * @returns {Promise} Number of deleted tokens + */ + static async cleanupExpiredJtis () { + try { + const now = new Date().getTime() / 1000 // Convert to Unix timestamp + const result = await models.FogUsedToken.destroy({ + where: { + expiryTime: { + [Op.lt]: now + } + } + }) + logger.debug(`Cleaned up ${result} expired JTIs`) + return result + } catch (error) { + logger.error(`Failed to cleanup expired JTIs: ${error.message}`) + throw error + } + } +} + +module.exports = FogUsedTokenManager diff --git a/src/data/managers/iofog-manager.js b/src/data/managers/iofog-manager.js index 35a7d894..a0e98c76 100644 --- a/src/data/managers/iofog-manager.js +++ b/src/data/managers/iofog-manager.js @@ -16,7 +16,6 @@ const models = require('../models') const Fog = models.Fog const Tags = models.Tags -const FogAccessToken = models.FogAccessToken const Microservice = models.Microservice const Strace = models.StraceDiagnostics @@ -65,19 +64,6 @@ class FogManager extends BaseManager { }) } - // no transaction required here, used by auth decorator - checkToken (token) { - return Fog.findOne({ - include: [{ - model: FogAccessToken, - as: 'accessToken', - where: { - token: token - } - }] - }) - } - // no transaction required here, used by agent-last-active decorator updateLastActive (uuid, timestamp) { return Fog.update({ diff --git a/src/data/managers/iofog-public-key-manager.js b/src/data/managers/iofog-public-key-manager.js new file mode 100644 index 00000000..93004f09 --- /dev/null +++ b/src/data/managers/iofog-public-key-manager.js @@ -0,0 +1,89 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const FogPublicKey = models.FogPublicKey + +class FogPublicKeyManager extends BaseManager { + getEntity () { + return FogPublicKey + } + + // Find public key by fog UUID + findByFogUuid (fogUuid, transaction) { + const options = transaction.fakeTransaction + ? { + where: { + iofogUuid: fogUuid + } + } + : { + where: { + iofogUuid: fogUuid + }, + transaction: transaction + } + + return FogPublicKey.findOne(options) + } + + // Update or create public key for a fog + updateOrCreate (fogUuid, publicKey, transaction) { + const options = transaction.fakeTransaction + ? { + where: { + iofogUuid: fogUuid + } + } + : { + where: { + iofogUuid: fogUuid + }, + transaction: transaction + } + + return FogPublicKey.findOne(options).then((existingKey) => { + if (existingKey) { + const updateOptions = transaction.fakeTransaction + ? { + where: { + iofogUuid: fogUuid + } + } + : { + where: { + iofogUuid: fogUuid + }, + transaction: transaction + } + + return FogPublicKey.update({ + publicKey: publicKey + }, updateOptions) + } else { + const createOptions = transaction.fakeTransaction + ? {} + : { transaction: transaction } + + return FogPublicKey.create({ + iofogUuid: fogUuid, + publicKey: publicKey + }, createOptions) + } + }) + } +} + +const instance = new FogPublicKeyManager() +module.exports = instance diff --git a/src/data/migrations/db_migration_v1.0.2.sql b/src/data/migrations/db_migration_v1.0.2.sql index 2eed15b6..8f802a65 100644 --- a/src/data/migrations/db_migration_v1.0.2.sql +++ b/src/data/migrations/db_migration_v1.0.2.sql @@ -612,4 +612,29 @@ CREATE TABLE IF NOT EXISTS MicroserviceCapDrop ( CREATE INDEX idx_microservice_capDrop_microserviceUuid ON MicroserviceCapDrop (microservice_uuid); -ALTER TABLE Microservices ADD COLUMN annotations TEXT; \ No newline at end of file +ALTER TABLE Microservices ADD COLUMN annotations TEXT; + +CREATE TABLE IF NOT EXISTS FogPublicKeys ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + public_key TEXT, + iofog_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_public_keys_iofogUuid ON FogPublicKeys (iofog_uuid); + +CREATE TABLE IF NOT EXISTS FogUsedTokens ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + jti VARCHAR(255) NOT NULL, + iofog_uuid VARCHAR(32), + expiry_time DATETIME NOT NULL, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_used_tokens_iofogUuid ON FogUsedTokens (iofog_uuid); + +DROP TABLE IF EXISTS FogAccessTokens; \ No newline at end of file diff --git a/src/data/models/fog.js b/src/data/models/fog.js index df198da4..3b8a02c8 100644 --- a/src/data/models/fog.js +++ b/src/data/models/fog.js @@ -329,11 +329,21 @@ module.exports = (sequelize, DataTypes) => { as: 'accessToken' }) + Fog.hasOne(models.FogPublicKey, { + foreignKey: 'iofog_uuid', + as: 'publicKey' + }) + Fog.hasMany(models.Microservice, { foreignKey: 'iofog_uuid', as: 'microservice' }) + Fog.hasMany(models.FogUsedToken, { + foreignKey: 'iofog_uuid', + as: 'jti' + }) + Fog.hasOne(models.Router, { foreignKey: 'iofog_uuid', as: 'router' diff --git a/src/data/models/fogUsedToken.js b/src/data/models/fogUsedToken.js new file mode 100644 index 00000000..a46323ff --- /dev/null +++ b/src/data/models/fogUsedToken.js @@ -0,0 +1,42 @@ +'use strict' + +const { convertToInt } = require('../../helpers/app-helper') + +module.exports = (sequelize, DataTypes) => { + const FogUsedToken = sequelize.define('FogUsedToken', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + jti: { + type: DataTypes.STRING(255), + primaryKey: true, + allowNull: false + }, + expiryTime: { + type: DataTypes.BIGINT, + get () { + return convertToInt(this.getDataValue('daemonLastStart'), 0) + }, + field: 'expiry_time' + } + }, { + tableName: 'FogUsedTokens', + timestamps: true, + underscored: true + }) + FogUsedToken.associate = function (models) { + FogUsedToken.belongsTo(models.Fog, { + foreignKey: { + name: 'iofogUuid', + field: 'iofog_uuid' + }, + as: 'iofog', + onDelete: 'cascade' + }) + } + return FogUsedToken +} diff --git a/src/data/models/fogpublickey.js b/src/data/models/fogpublickey.js new file mode 100644 index 00000000..37413b00 --- /dev/null +++ b/src/data/models/fogpublickey.js @@ -0,0 +1,42 @@ +'use strict' + +module.exports = (sequelize, DataTypes) => { + const FogPublicKey = sequelize.define('FogPublicKey', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + publicKey: { + type: DataTypes.TEXT, + field: 'public_key' + }, + createdAt: { + type: DataTypes.DATE, + field: 'created_at' + }, + updatedAt: { + type: DataTypes.DATE, + field: 'updated_at' + } + }, { + tableName: 'FogPublicKeys', + timestamps: true, + underscored: true + }) + + FogPublicKey.associate = function (models) { + FogPublicKey.belongsTo(models.Fog, { + foreignKey: { + name: 'iofogUuid', + field: 'iofog_uuid' + }, + as: 'iofog', + onDelete: 'cascade' + }) + } + + return FogPublicKey +} diff --git a/src/data/models/index.js b/src/data/models/index.js index 5a2886c8..71666d16 100644 --- a/src/data/models/index.js +++ b/src/data/models/index.js @@ -8,6 +8,7 @@ const constants = require('../constants') const basename = path.basename(__filename) const db = {} const config = require('../../config') +const logger = require('../../logger') const databaseProvider = require('../providers/database-factory') const sequelize = databaseProvider.sequelize @@ -52,10 +53,10 @@ db.initDB = async (isStart) => { // Check if the database file exists if (fs.existsSync(sqliteDbPath)) { - console.log('Database file exists. Running migrations only...') + logger.info('Database file exists. Running migrations only...') await databaseProvider.runMigration(sqliteDbPath) // Ensure migration finishes before moving on } else { - console.log('Database file does not exist. Running migrations and seeders...') + logger.info('Database file does not exist. Running migrations and seeders...') await databaseProvider.runMigration(sqliteDbPath) // Wait for migration to finish await databaseProvider.runSeeder(sqliteDbPath) // Wait for seeding to finish } @@ -63,8 +64,8 @@ db.initDB = async (isStart) => { // Configure system images const fogTypes = await db.FogType.findAll({}) - await configureImage(db, constants.ROUTER_CATALOG_NAME, fogTypes, config.get('SystemImages:Router', {})) - await configureImage(db, constants.PROXY_CATALOG_NAME, fogTypes, config.get('SystemImages:Proxy', {})) + await configureImage(db, constants.ROUTER_CATALOG_NAME, fogTypes, config.get('systemImages.router', {})) + await configureImage(db, constants.PROXY_CATALOG_NAME, fogTypes, config.get('systemImages.proxy', {})) } } diff --git a/src/data/providers/database-factory.js b/src/data/providers/database-factory.js index 12c17b72..3538908b 100644 --- a/src/data/providers/database-factory.js +++ b/src/data/providers/database-factory.js @@ -1,7 +1,7 @@ const config = require('../../config') function createDatabaseProvider () { - let provider = process.env.DB_PROVIDER || config.get('Database:Provider', 'sqlite') + let provider = process.env.DB_PROVIDER || config.get('database.provider', 'sqlite') if (!provider) { provider = 'sqlite' diff --git a/src/data/providers/database-provider.js b/src/data/providers/database-provider.js index 67fd47df..9ada5d73 100644 --- a/src/data/providers/database-provider.js +++ b/src/data/providers/database-provider.js @@ -1,6 +1,7 @@ const path = require('path') const fs = require('fs') const sqlite3 = require('sqlite3').verbose() +const logger = require('../../logger') class DatabaseProvider { constructor () { @@ -12,7 +13,7 @@ class DatabaseProvider { const migrationSqlPath = path.resolve(__dirname, '../migrations/db_migration_v1.0.2.sql') if (!fs.existsSync(migrationSqlPath)) { - console.error(`Migration file not found: ${migrationSqlPath}`) + logger.error(`Migration file not found: ${migrationSqlPath}`) throw new Error('Migration file not found') } @@ -21,10 +22,10 @@ class DatabaseProvider { let db = new sqlite3.Database(dbName, (err) => { if (err) { - console.error(err.message) + logger.error(err.message) throw err } - console.log('Connected to the SQLite database for migration.') + logger.info('Connected to the SQLite database for migration.') }) try { @@ -45,7 +46,7 @@ class DatabaseProvider { err.message.includes('already exists') || err.message.includes('duplicate') ) { - console.warn(`Ignored error: ${err.message}`) + logger.warn(`Ignored error: ${err.message}`) resolve() // Ignore specific errors } else { db.run('ROLLBACK;') // Rollback transaction on error @@ -61,16 +62,16 @@ class DatabaseProvider { // Commit the transaction if all queries succeed db.run('COMMIT;') - console.log('Migration completed successfully.') + logger.info('Migration completed successfully.') } catch (err) { - console.error('Migration failed:', err) + logger.error('Migration failed:', err) throw err } finally { db.close((err) => { if (err) { - console.error('Error closing database connection:', err.message) + logger.error('Error closing database connection:', err.message) } else { - console.log('Database connection closed after migration.') + logger.info('Database connection closed after migration.') } }) } @@ -81,7 +82,7 @@ class DatabaseProvider { const seederSqlPath = path.resolve(__dirname, '../seeders/db_seeder_v1.0.2.sql') if (!fs.existsSync(seederSqlPath)) { - console.error(`Seeder file not found: ${seederSqlPath}`) + logger.error(`Seeder file not found: ${seederSqlPath}`) throw new Error('Seeder file not found') } @@ -90,14 +91,15 @@ class DatabaseProvider { let db = new sqlite3.Database(dbName, (err) => { if (err) { - console.error(err.message) + logger.error(err.message) throw err } - console.log('Connected to the SQLite database for seeding.') + logger.info('Connected to the SQLite database for seeding.') }) try { db.serialize(() => { + db.run('PRAGMA foreign_keys=OFF;') // Disable foreign key checks during seeding db.run('BEGIN TRANSACTION;') // Start transaction }) @@ -113,7 +115,7 @@ class DatabaseProvider { err.message.includes('already exists') || err.message.includes('duplicate') ) { - console.warn(`Ignored error: ${err.message}`) + logger.warn(`Ignored error: ${err.message}`) resolve() // Ignore specific errors } else { db.run('ROLLBACK;') // Rollback transaction on error @@ -129,16 +131,16 @@ class DatabaseProvider { // Commit the transaction if all queries succeed db.run('COMMIT;') - console.log('Seeding completed successfully.') + logger.info('Seeding completed successfully.') } catch (err) { - console.error('Seeding failed:', err) + logger.error('Seeding failed:', err) throw err } finally { db.close((err) => { if (err) { - console.error('Error closing database connection:', err.message) + logger.error('Error closing database connection:', err.message) } else { - console.log('Database connection closed after seeding.') + logger.info('Database connection closed after seeding.') } }) } diff --git a/src/data/providers/mysql.js b/src/data/providers/mysql.js index 935e2512..3c654f8a 100644 --- a/src/data/providers/mysql.js +++ b/src/data/providers/mysql.js @@ -7,7 +7,7 @@ class MySqlDatabaseProvider extends DatabaseProvider { constructor () { super() - const mysqlConfig = config.get('Database:Config:mysql', {}) + const mysqlConfig = config.get('database.mysql', {}) mysqlConfig.dialect = 'mysql' mysqlConfig.host = process.env.DB_HOST || mysqlConfig.host mysqlConfig.port = process.env.DB_PORT || mysqlConfig.port diff --git a/src/data/providers/postgres.js b/src/data/providers/postgres.js index b5e959f0..1c8a07e1 100644 --- a/src/data/providers/postgres.js +++ b/src/data/providers/postgres.js @@ -7,7 +7,7 @@ class PostgresDatabaseProvider extends DatabaseProvider { constructor () { super() - const postgresConfig = config.get('Database:Config:postgre', {}) + const postgresConfig = config.get('database.postgres', {}) postgresConfig.dialect = 'postgres' postgresConfig.host = process.env.DB_HOST || postgresConfig.host postgresConfig.port = process.env.DB_PORT || postgresConfig.port diff --git a/src/data/providers/sqlite.js b/src/data/providers/sqlite.js index 1d0e5cb0..15312e32 100644 --- a/src/data/providers/sqlite.js +++ b/src/data/providers/sqlite.js @@ -9,7 +9,7 @@ class SqliteDatabaseProvider extends DatabaseProvider { constructor () { super() - const sqliteConfig = config.get('Database:Config:sqlite', {}) + const sqliteConfig = config.get('database.sqlite', {}) sqliteConfig.dialect = 'sqlite' sqliteConfig.databaseName = process.env.DB_NAME || sqliteConfig.databaseName if (!sqliteConfig.databaseName.endsWith('.sqlite')) { diff --git a/src/decorators/authorization-decorator.js b/src/decorators/authorization-decorator.js index 9883fedd..656a4d17 100644 --- a/src/decorators/authorization-decorator.js +++ b/src/decorators/authorization-decorator.js @@ -11,9 +11,8 @@ * */ const logger = require('../logger') -const config = require('../config') const FogManager = require('../data/managers/iofog-manager') -const FogAccessTokenManager = require('../data/managers/iofog-access-token-manager') +const FogKeyService = require('../services/iofog-key-service') const Errors = require('../helpers/errors') const { isTest } = require('../helpers/app-helper') @@ -24,28 +23,64 @@ function checkFogToken (f) { } const req = fArgs[0] - const token = req.headers.authorization + const authHeader = req.headers.authorization - const fog = await FogManager.checkToken(token) - - if (!fog) { - logger.error('token ' + token + ' incorrect') + if (!authHeader) { + logger.error('No authorization token provided') throw new Errors.AuthenticationError('authorization failed') } - if (Date.now() > fog.accessToken.expirationTime) { - logger.error('token ' + token + ' expired') - throw new Errors.AuthenticationError('token expired') + + // Extract token from Bearer scheme + const [scheme, token] = authHeader.split(' ') + if (scheme.toLowerCase() !== 'bearer' || !token) { + logger.error('Invalid authorization scheme') + throw new Errors.AuthenticationError('authorization failed') } - fArgs.push(fog) + try { + // Debug log for JWT + logger.debug({ token }, 'Received JWT') + + // First, decode the JWT without verification to get the fog UUID + const tokenParts = token.split('.') + if (tokenParts.length !== 3) { + logger.error('Invalid JWT format') + throw new Errors.AuthenticationError('authorization failed') + } + + const payload = JSON.parse(Buffer.from(tokenParts[1], 'base64').toString()) + const fogUuid = payload.sub + logger.debug({ payload }, 'JWT payload') + + if (!fogUuid) { + logger.error('JWT missing subject claim') + throw new Errors.AuthenticationError('authorization failed') + } - FogAccessTokenManager.updateExpirationTime(fog.accessToken.id, fog.accessToken.expirationTime + - config.get('Settings:FogTokenExpirationIntervalSeconds') * 1000) + // Get the fog with transaction + const fog = await FogManager.findOne({ + uuid: fogUuid + }, { fakeTransaction: true }) - const timestamp = Date.now() - await FogManager.updateLastActive(fog.uuid, timestamp) + if (!fog) { + logger.error(`Fog with UUID ${fogUuid} not found`) + throw new Errors.AuthenticationError('authorization failed') + } - return f.apply(this, fArgs) + // Verify the JWT with transaction + await FogKeyService.verifyJWT(token, fogUuid, { fakeTransaction: true }) + + // Update last active timestamp with transaction + const timestamp = Date.now() + await FogManager.updateLastActive(fog.uuid, timestamp, { fakeTransaction: true }) + + fArgs.push(fog) + + return f.apply(this, fArgs) + } catch (error) { + logger.error(`JWT verification failed: ${error.message}`) + throw new Errors.AuthenticationError('authorization failed') + } } } diff --git a/src/helpers/app-helper.js b/src/helpers/app-helper.js index 3c0ffa97..8a9a26bf 100644 --- a/src/helpers/app-helper.js +++ b/src/helpers/app-helper.js @@ -70,7 +70,7 @@ async function checkPortAvailability (port) { } const findAvailablePort = async function (hostname) { - let portRange = Config.get('Tunnel:PortRange') + let portRange = Config.get('tunnel.portRange') if (!portRange) { logger.warn('Port range was\'n specified in config. Default range (2000-10000) will be used') portRange = '2000-10000' diff --git a/src/init.js b/src/init.js new file mode 100644 index 00000000..2da8d026 --- /dev/null +++ b/src/init.js @@ -0,0 +1,46 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +// Load configuration first +require('./config') + +// Initialize logger with configuration +const logger = require('./logger') +const { startTelemetry } = require('./config/telemetry') +const db = require('./data/models') + +async function initialize () { + try { + // Log initial steps using console since logger might not be ready + console.log('Configuration loaded') + console.log('Logger initialized with configuration') + + // Now we can use logger for the rest of initialization + logger.info('Initializing OpenTelemetry...') + startTelemetry() + + logger.info('Initializing database...') + await db.initDB(true) + + logger.info('Initialization completed successfully') + return true + } catch (error) { + // Use console.error here since logger might not be initialized + console.error('Initialization failed:', error) + process.exit(1) + } +} + +module.exports = { + initialize +} diff --git a/src/jobs/fog-status-job.js b/src/jobs/fog-status-job.js index d007bd09..e0737080 100644 --- a/src/jobs/fog-status-job.js +++ b/src/jobs/fog-status-job.js @@ -21,7 +21,7 @@ const MicroserviceStates = require('../enums/microservice-state') const FogStates = require('../enums/fog-state') const Config = require('../config') -const scheduleTime = Config.get('Settings:FogStatusUpdateIntervalSeconds') * 1000 +const scheduleTime = Config.get('settings.fogStatusUpdateInterval') * 1000 async function run () { try { @@ -41,7 +41,7 @@ async function updateFogsConnectionStatus (transaction) { } async function _updateFogStatus (transaction) { - const statusUpdateTolerance = Config.get('Settings:FogStatusUpdateTolerance') + const statusUpdateTolerance = Config.get('settings.fogStatusUpdateTolerance') const fogs = await FogManager.findAll({ daemonStatus: FogStates.RUNNING }, transaction) const unknownFogUuids = fogs .filter((fog) => { diff --git a/src/jobs/stopped-app-status-job.js b/src/jobs/stopped-app-status-job.js index 87fb608f..e6cf6f25 100644 --- a/src/jobs/stopped-app-status-job.js +++ b/src/jobs/stopped-app-status-job.js @@ -19,7 +19,7 @@ const MicroserviceStates = require('../enums/microservice-state') const Config = require('../config') const ApplicationManager = require('../data/managers/application-manager') -const scheduleTime = Config.get('Settings:FogStatusUpdateIntervalSeconds') * 1000 +const scheduleTime = Config.get('settings.fogStatusUpdateInterval') * 1000 async function run () { try { diff --git a/src/logger/index.js b/src/logger/index.js index 03bc2d2d..d855e847 100644 --- a/src/logger/index.js +++ b/src/logger/index.js @@ -11,13 +11,47 @@ * */ -const fs = require('fs') -const path = require('path') const pino = require('pino') -const serializer = require('pino-std-serializers') +const path = require('path') +const fs = require('fs') const config = require('../config') +const serializer = require('pino-std-serializers') +const zlib = require('zlib') + +// Get log directory and settings from environment or config +const dirName = process.env.LOG_DIRECTORY || config.get('log.directory') + +const maxFileSize = process.env.LOG_FILE_SIZE ? parseInt(process.env.LOG_FILE_SIZE) * 1024 * 1024 * 1024 : config.get('log.fileSize') + +const maxFiles = process.env.LOG_FILE_COUNT ? parseInt(process.env.LOG_FILE_COUNT) : config.get('log.fileCount') + +// Validate required values +if (!dirName) { + throw new Error('Log directory is not configured. Please set LOG_DIRECTORY environment variable or log.directory in config.') +} +if (!maxFileSize) { + throw new Error('Log file size is not configured. Please set LOG_FILE_SIZE environment variable or log.fileSize in config.') +} +if (!maxFiles) { + throw new Error('Log file count is not configured. Please set LOG_FILE_COUNT environment variable or log.fileCount in config.') +} -const dirName = config.get('Service:LogsDirectory') +const baseFileName = 'iofog-controller' +const logFileName = `${baseFileName}.log` + +console.log('Log directory:', dirName) +console.log('Max file size:', maxFileSize) +console.log('Max files:', maxFiles) + +// Default log level from environment variable, fallback to config, then 'info' if not set +let defaultLogLevel = process.env.LOG_LEVEL || config.get('log.level') || 'info' + +// Validate log level +const validLogLevels = ['fatal', 'error', 'warn', 'info', 'debug', 'trace', 'silly'] +if (!validLogLevels.includes(defaultLogLevel)) { + console.error(`Invalid LOG_LEVEL: ${defaultLogLevel}. Using default level: info`) + defaultLogLevel = 'info' +} const levels = { error: 100, @@ -26,6 +60,8 @@ const levels = { cliRes: 70, apiReq: 60, apiRes: 50, + service: 45, + db: 40, info: 40, verbose: 30, debug: 20, @@ -33,12 +69,18 @@ const levels = { } const defaultFormat = { - level: 'info', + level: defaultLogLevel, + timestamp: () => `,"time":"${new Date().toISOString()}"`, customLevels: levels, useOnlyCustomLevels: true, - redact: ['headers.authorization'], + redact: { + paths: ['headers.authorization', 'token', 'password', 'apiKey', 'secret', 'privateKey'], + censor: '[REDACTED]' + }, formatters: { - level: (level) => ({ level }), + level: (label) => { + return { level: label } + }, log: (log) => { if (!log.req && !log.res) { return log @@ -68,26 +110,139 @@ const defaultFormat = { } } -const consoleLogger = pino(defaultFormat) - let fileLogger = null -try { - // Create the log directory if it does not exist - if (!fs.existsSync(dirName)) { - fs.mkdirSync(dirName) +let consoleLogger = null + +async function compressFile (sourcePath, targetPath) { + return new Promise((resolve, reject) => { + const gzip = zlib.createGzip() + const input = fs.createReadStream(sourcePath) + const output = fs.createWriteStream(targetPath) + + input.pipe(gzip).pipe(output) + + output.on('finish', () => { + fs.unlink(sourcePath, (err) => { + if (err) reject(err) + else resolve() + }) + }) + + output.on('error', reject) + }) +} + +async function rotateLogFile (isStartup = false) { + try { + const logFile = path.join(dirName, logFileName) + + // On startup, rotate if file exists and has content + // During runtime, rotate if size limit is reached + const shouldRotate = isStartup + ? (fs.existsSync(logFile) && fs.statSync(logFile).size > 0) + : (fs.existsSync(logFile) && fs.statSync(logFile).size >= maxFileSize) + + if (shouldRotate) { + console.log(isStartup ? 'Rotating log file on startup...' : 'Log file size exceeded, rotating...') + + // Find the next available compressed file number + let nextFileNumber = 1 + while (fs.existsSync(path.join(dirName, `${baseFileName}${nextFileNumber}.log.gz`))) { + nextFileNumber++ + } + + // If we've reached max files, remove the oldest compressed file + if (nextFileNumber > maxFiles) { + const oldestFile = path.join(dirName, `${baseFileName}1.log.gz`) + if (fs.existsSync(oldestFile)) { + console.log('Removing oldest compressed log file:', oldestFile) + await fs.promises.unlink(oldestFile) + } + nextFileNumber = maxFiles + } + + // Compress the current log file to the numbered target + const compressedFile = path.join(dirName, `${baseFileName}${nextFileNumber}.log.gz`) + console.log(`Compressing current log file to: ${compressedFile}`) + await compressFile(logFile, compressedFile) + + // Create new empty log file + await fs.promises.writeFile(logFile, '') + console.log('Log rotation completed') + } + } catch (err) { + console.error('Error during log rotation:', err) + } +} + +function getLogger () { + if (!fileLogger) { + try { + // Create the log directory if it does not exist + if (!fs.existsSync(dirName)) { + console.log('Creating log directory:', dirName) + fs.mkdirSync(dirName, { recursive: true }) + } + + const logFile = path.join(dirName, logFileName) + console.log('Log file path:', logFile) + + // Perform initial rotation if needed + rotateLogFile(true).catch(err => { + console.error('Error during initial rotation:', err) + }) + + const logDestination = pino.destination({ + dest: logFile, + sync: true, + mkdir: true + }) + + // Check rotation before each write + const originalWrite = logDestination.write + logDestination.write = function (chunk) { + rotateLogFile(false).catch(err => { + console.error('Error during rotation check:', err) + }) + return originalWrite.call(this, chunk) + } + + fileLogger = pino( + { + ...defaultFormat, + level: defaultLogLevel + }, + logDestination + ) + + // Test write to ensure file is writable + fileLogger.info('Logger initialized successfully') + console.log('File logger initialized and tested') + } catch (err) { + console.error('Error initializing file logger:', err) + return getConsoleLogger() + } } + return fileLogger +} - const logDestination = pino.destination(path.resolve(dirName, 'iofog-controller.log')) - fileLogger = pino( - { +function getConsoleLogger () { + if (!consoleLogger) { + consoleLogger = pino({ ...defaultFormat, - level: 'apiRes' - }, - logDestination) - process.on('SIGHUP', () => logDestination.reopen()) -} catch (e) {} + level: defaultLogLevel + }) + } + return consoleLogger +} + +// Initialize file logger immediately +getLogger() -module.exports = {} +module.exports = { + getLogger, + getConsoleLogger +} for (const level of Object.keys(levels)) { module.exports[level] = (...log) => { @@ -102,9 +257,9 @@ for (const level of Object.keys(levels)) { if (log[0] instanceof Error) { log = serializer.err(...log) } - consoleLogger[level](...log) + getConsoleLogger()[level](...log) if (fileLogger !== null) { - fileLogger[level](...log) + getLogger()[level](...log) } } } diff --git a/src/main.js b/src/main.js index 4e4eb4ba..bb7ec0cf 100644 --- a/src/main.js +++ b/src/main.js @@ -20,11 +20,10 @@ const isElevated = require('is-elevated') const fetch = require('node-fetch-npm') const isHTTPS = () => { - const sslKey = config.get('Server:SslKey', '') - const devMode = config.get('Server:DevMode', false) - const sslCert = config.get('Server:SslCert', '') - const intermedKey = config.get('Server:IntermediateCert', '') - return !devMode && sslKey && sslCert && intermedKey + const sslKey = config.get('server.ssl.path.key', '') + const devMode = config.get('server.devMode', false) + const sslCert = config.get('server.ssl.path.cert', '') + return !devMode && sslKey && sslCert } const getJSONFromURL = async (uri) => { @@ -35,8 +34,8 @@ const getJSONFromURL = async (uri) => { return response.json() } -const apiPort = +(config.get('Server:Port', 51121)) -const viewerPort = +(process.env.VIEWER_PORT || config.get('Viewer:Port', 80)) +const apiPort = +(config.get('server.port', 51121)) +const viewerPort = +(process.env.VIEWER_PORT || config.get('viewer.port', 8008)) const isDaemonElevated = async () => { // If it is running and you can see it, you have enough permission to move forward diff --git a/src/schemas/config.js b/src/schemas/config.js index c03b779a..5c4e390b 100644 --- a/src/schemas/config.js +++ b/src/schemas/config.js @@ -18,7 +18,7 @@ const configUpdate = { port: { type: 'integer', minimum: 0, maximum: 65535 }, sslCert: { type: 'string' }, sslKey: { type: 'string' }, - intermediateCert: { type: 'string' }, + intermediateCert: { type: 'string', optional: true }, logDir: { type: 'string' }, logSize: { type: 'integer' } } diff --git a/src/server.js b/src/server.js index 5c91faac..5a0b8039 100755 --- a/src/server.js +++ b/src/server.js @@ -11,225 +11,270 @@ * */ -const config = require('./config') -const logger = require('./logger') -const db = require('./data/models') - -const bodyParser = require('body-parser') -const cookieParser = require('cookie-parser') -const express = require('express') -const ecnViewer = process.env.ECN_VIEWER_PATH ? require(`${process.env.ECN_VIEWER_PATH}/package/index.js`) : require('@datasance/ecn-viewer') -const fs = require('fs') -const helmet = require('helmet') -const cors = require('cors') -const https = require('https') -const path = require('path') -const { renderFile } = require('ejs') -const xss = require('xss-clean') -const { substitutionMiddleware } = require('./helpers/template-helper') -const multer = require('multer') -const multerMemStorage = multer.memoryStorage() -const uploadFile = (fileName) => multer({ - storage: multerMemStorage -}).single(fileName) -const keycloak = require('./config/keycloak.js').initKeycloak() -const session = require('express-session') -const memoryStore = require('./config/keycloak.js').getMemoryStore() - -const viewerApp = express() - -const app = express() - -app.use(cors()) - -app.use(helmet()) -app.use(xss()) - -// express logs -// app.use(morgan('combined')); -app.use(session({ - secret: 'pot-controller', - resave: false, - saveUninitialized: true, - store: memoryStore -})) -app.use(keycloak.middleware()) -app.use(bodyParser.urlencoded({ - extended: true -})) -app.use(bodyParser.json()) - -app.engine('ejs', renderFile) -app.set('view engine', 'ejs') -app.use(cookieParser()) - -app.set('views', path.join(__dirname, 'views')) - -app.on('uncaughtException', (req, res, route, err) => { - // TODO -}) +// Initialize everything in the correct order +const { initialize } = require('./init') +initialize().then(() => { + const config = require('./config') + const logger = require('./logger') + const db = require('./data/models') + const CleanupService = require('./services/cleanup-service') -app.use((req, res, next) => { - if (req.headers && req.headers['request-id']) { - req.id = req.headers['request-id'] - delete req.headers['request-id'] - } + const bodyParser = require('body-parser') + const cookieParser = require('cookie-parser') + const express = require('express') + const ecnViewer = process.env.ECN_VIEWER_PATH ? require(`${process.env.ECN_VIEWER_PATH}/package/index.js`) : require('@datasance/ecn-viewer') + const fs = require('fs') + const helmet = require('helmet') + const cors = require('cors') + const https = require('https') + const path = require('path') + const { renderFile } = require('ejs') + const xss = require('xss-clean') + const { substitutionMiddleware } = require('./helpers/template-helper') + const multer = require('multer') + const multerMemStorage = multer.memoryStorage() + const uploadFile = (fileName) => multer({ + storage: multerMemStorage + }).single(fileName) - res.append('X-Timestamp', Date.now()) - next() -}) + // Initialize session and Keycloak after config is loaded + const session = require('express-session') + const { initKeycloak, getMemoryStore } = require('./config/keycloak.js') + const memoryStore = getMemoryStore() + const keycloak = initKeycloak() -global.appRoot = path.resolve(__dirname) + const viewerApp = express() + const app = express() -const registerRoute = (route) => { - const middlewares = [route.middleware] - if (route.supportSubstitution) { - middlewares.unshift(substitutionMiddleware) - } - if (route.fileInput) { - middlewares.unshift(uploadFile(route.fileInput)) - } - app[route.method.toLowerCase()](route.path, ...middlewares) -} + app.use(cors()) -const setupMiddleware = function (routeName) { - const routes = [].concat(require(path.join(__dirname, 'routes', routeName)) || []) - routes.forEach(registerRoute) -} + app.use(helmet()) + app.use(xss()) -fs.readdirSync(path.join(__dirname, 'routes')) - .forEach(setupMiddleware) + // express logs + // app.use(morgan('combined')); + app.use(session({ + secret: 'pot-controller', + resave: false, + saveUninitialized: true, + store: memoryStore + })) + app.use(keycloak.middleware()) + app.use(bodyParser.urlencoded({ + extended: true + })) + app.use(bodyParser.json()) -const jobs = [] + app.engine('ejs', renderFile) + app.set('view engine', 'ejs') + app.use(cookieParser()) -const setupJobs = function (file) { - jobs.push((require(path.join(__dirname, 'jobs', file)) || [])) -} + app.set('views', path.join(__dirname, 'views')) -fs.readdirSync(path.join(__dirname, 'jobs')) - .filter((file) => { - return (file.indexOf('.') !== 0) && (file.slice(-3) === '.js') - }) - .forEach(setupJobs) - -function registerServers (api, viewer) { - process.once('SIGTERM', async function (code) { - console.log('SIGTERM received. Shutting down.') - await new Promise((resolve) => { api.close(resolve) }) - console.log('API Server closed.') - await new Promise((resolve) => { viewer.close(resolve) }) - console.log('Viewer Server closed.') - process.exit(0) + app.on('uncaughtException', (req, res, route, err) => { + // TODO }) -} - -function startHttpServer (apps, ports, jobs) { - logger.info('SSL not configured, starting HTTP server.') - const viewerServer = apps.viewer.listen(ports.viewer, function onStart (err) { - if (err) { - logger.error(err) + app.use((req, res, next) => { + if (req.headers && req.headers['request-id']) { + req.id = req.headers['request-id'] + delete req.headers['request-id'] } - logger.info(`==> 🌎 Viewer listening on port ${ports.viewer}. Open up http://localhost:${ports.viewer}/ in your browser.`) + + res.append('X-Timestamp', Date.now()) + next() }) - const apiServer = apps.api.listen(ports.api, function onStart (err) { - if (err) { - logger.error(err) + + global.appRoot = path.resolve(__dirname) + + const registerRoute = (route) => { + const middlewares = [route.middleware] + if (route.supportSubstitution) { + middlewares.unshift(substitutionMiddleware) } - logger.info(`==> 🌎 API Listening on port ${ports.api}. Open up http://localhost:${ports.api}/ in your browser.`) - jobs.forEach((job) => job.run()) - }) - registerServers(apiServer, viewerServer) -} - -function startHttpsServer (apps, ports, sslKey, sslCert, intermedKey, jobs) { - try { - const sslOptions = { - key: fs.readFileSync(sslKey), - cert: fs.readFileSync(sslCert), - ca: fs.readFileSync(intermedKey), - requestCert: true, - rejectUnauthorized: false // currently for some reason iofog agent doesn't work without this option + if (route.fileInput) { + middlewares.unshift(uploadFile(route.fileInput)) } + app[route.method.toLowerCase()](route.path, ...middlewares) + } + + const setupMiddleware = function (routeName) { + const routes = [].concat(require(path.join(__dirname, 'routes', routeName)) || []) + routes.forEach(registerRoute) + } + + fs.readdirSync(path.join(__dirname, 'routes')) + .forEach(setupMiddleware) + + const jobs = [] + + const setupJobs = function (file) { + jobs.push((require(path.join(__dirname, 'jobs', file)) || [])) + } + + fs.readdirSync(path.join(__dirname, 'jobs')) + .filter((file) => { + return (file.indexOf('.') !== 0) && (file.slice(-3) === '.js') + }) + .forEach(setupJobs) - const viewerServer = https.createServer(sslOptions, apps.viewer).listen(ports.viewer, function onStart (err) { + function registerServers (api, viewer) { + process.once('SIGTERM', async function (code) { + console.log('SIGTERM received. Shutting down.') + await new Promise((resolve) => { api.close(resolve) }) + console.log('API Server closed.') + await new Promise((resolve) => { viewer.close(resolve) }) + console.log('Viewer Server closed.') + process.exit(0) + }) + } + + function startHttpServer (apps, ports, jobs) { + logger.info('SSL not configured, starting HTTP server.') + + const viewerServer = apps.viewer.listen(ports.viewer, function onStart (err) { if (err) { logger.error(err) } - logger.info(`==> 🌎 HTTPS Viewer server listening on port ${ports.viewer}. Open up https://localhost:${ports.viewer}/ in your browser.`) - jobs.forEach((job) => job.run()) + logger.info(`==> 🌎 Viewer listening on port ${ports.viewer}. Open up http://localhost:${ports.viewer}/ in your browser.`) }) - - const apiServer = https.createServer(sslOptions, apps.api).listen(ports.api, function onStart (err) { + const apiServer = apps.api.listen(ports.api, function onStart (err) { if (err) { logger.error(err) } - logger.info(`==> 🌎 HTTPS API server listening on port ${ports.api}. Open up https://localhost:${ports.api}/ in your browser.`) + logger.info(`==> 🌎 API Listening on port ${ports.api}. Open up http://localhost:${ports.api}/ in your browser.`) jobs.forEach((job) => job.run()) }) registerServers(apiServer, viewerServer) - } catch (e) { - logger.error('ssl_key or ssl_cert or intermediate_cert is either missing or invalid. Provide valid SSL configurations.') } -} - -const devMode = config.get('Server:DevMode') -const apiPort = +(config.get('Server:Port')) -const viewerPort = +(process.env.VIEWER_PORT || config.get('Viewer:Port')) -const viewerURL = process.env.VIEWER_URL || config.get('Viewer:Url') -const sslKey = config.get('Server:SslKey') -const sslCert = config.get('Server:SslCert') -const intermedKey = config.get('Server:IntermediateCert') -const kcRealm = process.env.KC_REALM -const kcURL = `${process.env.KC_URL}` -const kcClient = process.env.KC_VIEWER_CLIENT - -viewerApp.use('/', ecnViewer.middleware(express)) - -const isDaemon = process.argv[process.argv.length - 1] === 'daemonize2' - -const initState = async () => { - if (!isDaemon) { - // InitDB + + const { createSSLOptions } = require('./utils/ssl-utils') + + function startHttpsServer (apps, ports, sslKey, sslCert, intermedKey, jobs, isBase64 = false) { try { - await db.initDB(true) - } catch (err) { - logger.error('Unable to initialize the database. Error: ' + err) - process.exit(1) + const sslOptions = createSSLOptions({ + key: sslKey, + cert: sslCert, + intermedKey: intermedKey, + isBase64: isBase64 + }) + + const viewerServer = https.createServer(sslOptions, apps.viewer).listen(ports.viewer, function onStart (err) { + if (err) { + logger.error(err) + } + logger.info(`==> 🌎 HTTPS Viewer server listening on port ${ports.viewer}. Open up https://localhost:${ports.viewer}/ in your browser.`) + jobs.forEach((job) => job.run()) + }) + + const apiServer = https.createServer(sslOptions, apps.api).listen(ports.api, function onStart (err) { + if (err) { + logger.error(err) + } + logger.info(`==> 🌎 HTTPS API server listening on port ${ports.api}. Open up https://localhost:${ports.api}/ in your browser.`) + jobs.forEach((job) => job.run()) + }) + registerServers(apiServer, viewerServer) + } catch (e) { + logger.error('Error loading SSL certificates. Please check your configuration.') } + } + + const devMode = process.env.DEV_MODE || config.get('server.devMode') + const apiPort = process.env.API_PORT || config.get('server.port') + const viewerPort = process.env.VIEWER_PORT || config.get('viewer.port') + const viewerURL = process.env.VIEWER_URL || config.get('viewer.url') + + // File-based SSL configuration + const sslKey = process.env.SSL_KEY || config.get('server.ssl.path.key') + const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') + const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') - // Store PID to let deamon know we are running. - jobs.push({ - run: () => { - const pidFile = path.join((process.env.PID_BASE || __dirname), 'iofog-controller.pid') - logger.info(`==> PID file: ${pidFile}`) - fs.writeFileSync(pidFile, process.pid.toString()) + // Base64 SSL configuration + const sslKeyBase64 = config.get('server.ssl.base64.key') + const sslCertBase64 = config.get('server.ssl.base64.cert') + const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') + + const hasFileBasedSSL = !devMode && sslKey && sslCert + const hasBase64SSL = !devMode && sslKeyBase64 && sslCertBase64 + + const kcRealm = process.env.KC_REALM || config.get('auth.realm') + const kcURL = process.env.KC_URL || config.get('auth.url') + const kcClient = process.env.KC_VIEWER_CLIENT || config.get('auth.viewerClient') + + viewerApp.use('/', ecnViewer.middleware(express)) + + const isDaemon = process.argv[process.argv.length - 1] === 'daemonize2' + + const initState = async () => { + if (!isDaemon) { + // InitDB + try { + await db.initDB(true) + } catch (err) { + logger.error('Unable to initialize the database. Error: ' + err) + process.exit(1) } - }) - } - // Set up controller-config.js for ECN Viewer - const ecnViewerControllerConfigFilePath = path.join(__dirname, '..', 'node_modules', '@datasance', 'ecn-viewer', 'build', 'controller-config.js') - const ecnViewerControllerConfig = { - port: apiPort, - user: {}, - keycloakURL: kcURL, - keycloakRealm: kcRealm, - keycloakClientid: kcClient - } - if (viewerURL) { - ecnViewerControllerConfig.url = viewerURL - } - const ecnViewerConfigScript = ` - window.controllerConfig = ${JSON.stringify(ecnViewerControllerConfig)} - ` - fs.writeFileSync(ecnViewerControllerConfigFilePath, ecnViewerConfigScript) -} - -initState() - .then(() => { - if (!devMode && sslKey && sslCert && intermedKey) { - startHttpsServer({ api: app, viewer: viewerApp }, { api: apiPort, viewer: viewerPort }, sslKey, sslCert, intermedKey, jobs) - } else { - startHttpServer({ api: app, viewer: viewerApp }, { api: apiPort, viewer: viewerPort }, jobs) + + // Store PID to let deamon know we are running. + jobs.push({ + run: () => { + const pidFile = path.join((process.env.PID_BASE || __dirname), 'iofog-controller.pid') + logger.info(`==> PID file: ${pidFile}`) + fs.writeFileSync(pidFile, process.pid.toString()) + } + }) } - }) + // Set up controller-config.js for ECN Viewer + const ecnViewerControllerConfigFilePath = path.join(__dirname, '..', 'node_modules', '@datasance', 'ecn-viewer', 'build', 'controller-config.js') + const ecnViewerControllerConfig = { + port: apiPort, + user: {}, + controllerDevMode: devMode, + keycloakURL: kcURL, + keycloakRealm: kcRealm, + keycloakClientid: kcClient + } + if (viewerURL) { + ecnViewerControllerConfig.url = viewerURL + } + const ecnViewerConfigScript = ` + window.controllerConfig = ${JSON.stringify(ecnViewerControllerConfig)} + ` + fs.writeFileSync(ecnViewerControllerConfigFilePath, ecnViewerConfigScript) + } + + // Initialize cleanup service + CleanupService.start() + + initState() + .then(() => { + if (hasFileBasedSSL) { + startHttpsServer( + { api: app, viewer: viewerApp }, + { api: apiPort, viewer: viewerPort }, + sslKey, + sslCert, + intermedKey, + jobs, + false + ) + } else if (hasBase64SSL) { + startHttpsServer( + { api: app, viewer: viewerApp }, + { api: apiPort, viewer: viewerPort }, + sslKeyBase64, + sslCertBase64, + intermedKeyBase64, + jobs, + true + ) + } else { + startHttpServer( + { api: app, viewer: viewerApp }, + { api: apiPort, viewer: viewerPort }, + jobs + ) + } + }) +}) diff --git a/src/services/agent-service.js b/src/services/agent-service.js index 88c2be76..fa2a6f1f 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -21,7 +21,7 @@ const Op = Sequelize.Op const TransactionDecorator = require('../decorators/transaction-decorator') const FogProvisionKeyManager = require('../data/managers/iofog-provision-key-manager') const FogManager = require('../data/managers/iofog-manager') -const FogAccessTokenService = require('../services/iofog-access-token-service') +const FogKeyService = require('../services/iofog-key-service') const ChangeTrackingService = require('./change-tracking-service') const FogVersionCommandManager = require('../data/managers/iofog-version-command-manager') const StraceManager = require('../data/managers/strace-manager') @@ -54,6 +54,7 @@ const agentProvision = async function (provisionData, transaction) { const provision = await FogProvisionKeyManager.findOne({ provisionKey: provisionData.key }, transaction) + if (!provision) { throw new Errors.NotFoundError(ErrorMessages.INVALID_PROVISIONING_KEY) } @@ -67,11 +68,17 @@ const agentProvision = async function (provisionData, transaction) { uuid: provision.iofogUuid }, transaction) + if (!fog) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_IOFOG_UUID) + } + await _checkMicroservicesFogType(fog, provisionData.type, transaction) - const newAccessToken = await FogAccessTokenService.generateAccessToken(transaction) + // Generate Ed25519 key pair + const keyPair = await FogKeyService.generateKeyPair(transaction) - await FogAccessTokenService.updateAccessToken(fog.uuid, newAccessToken, transaction) + // Store the public key + await FogKeyService.storePublicKey(fog.uuid, keyPair.publicKey, transaction) await FogManager.update({ uuid: fog.uuid @@ -85,7 +92,7 @@ const agentProvision = async function (provisionData, transaction) { return { uuid: fog.uuid, - token: newAccessToken.token + privateKey: keyPair.privateKey } } diff --git a/src/services/cleanup-service.js b/src/services/cleanup-service.js new file mode 100644 index 00000000..9c67201d --- /dev/null +++ b/src/services/cleanup-service.js @@ -0,0 +1,34 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const schedule = require('node-schedule') +const FogUsedTokenManager = require('../data/managers/fog-used-token-manager') +const logger = require('../logger') + +class CleanupService { + start () { + // Run every 5 minutes to ensure we catch all expired tokens + // (since tokens are valid for 10 minutes) + schedule.scheduleJob('*/5 * * * *', async () => { + try { + logger.debug('Starting cleanup of expired JTIs') + const count = await FogUsedTokenManager.cleanupExpiredJtis() + logger.debug(`Cleaned up ${count} expired JTIs`) + } catch (error) { + logger.error('Error during JTI cleanup:', error) + } + }) + } +} + +module.exports = new CleanupService() diff --git a/src/services/diagnostic-service.js b/src/services/diagnostic-service.js index f138683c..41ace819 100644 --- a/src/services/diagnostic-service.js +++ b/src/services/diagnostic-service.js @@ -58,7 +58,7 @@ const getMicroserviceStraceData = async function (uuid, data, isCLI, transaction throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_STRACE, uuid)) } - const dir = Config.get('Diagnostics:DiagnosticDir') || 'diagnostics' + const dir = Config.get('diagnostics.directory') || 'diagnostics' const filePath = dir + '/' + uuid let result = straceData.buffer @@ -91,7 +91,7 @@ const postMicroserviceStraceDatatoFtp = async function (uuid, data, isCLI, trans throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_STRACE, uuid)) } - const dir = Config.get('Diagnostics:DiagnosticDir') + const dir = Config.get('diagnostics.directory') const filePath = dir + '/' + uuid _createDirectoryIfNotExists(dir) diff --git a/src/services/iofog-key-service.js b/src/services/iofog-key-service.js new file mode 100644 index 00000000..242c4597 --- /dev/null +++ b/src/services/iofog-key-service.js @@ -0,0 +1,130 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const crypto = require('crypto') +const AppHelper = require('../helpers/app-helper') +const FogPublicKeyManager = require('../data/managers/iofog-public-key-manager') +const FogUsedTokenManager = require('../data/managers/fog-used-token-manager') +const jose = require('jose') + +/** + * Generate Ed25519 key pair and return as JWK strings + * @returns {Object} Object containing publicKey and privateKey as base64 encoded JWK strings + */ +const generateKeyPair = async function (transaction) { + // Generate Ed25519 key pair + const { publicKey, privateKey } = crypto.generateKeyPairSync('ed25519') + + // Convert to JWK format + const publicKeyJwk = publicKey.export({ format: 'jwk' }) + const privateKeyJwk = privateKey.export({ format: 'jwk' }) + + // Convert JWK to base64 encoded single line strings + const publicKeyBase64 = Buffer.from(JSON.stringify(publicKeyJwk)).toString('base64') + const privateKeyBase64 = Buffer.from(JSON.stringify(privateKeyJwk)).toString('base64') + + return { + publicKey: publicKeyBase64, + privateKey: privateKeyBase64 + } +} + +/** + * Store public key for a fog node + * @param {string} fogUuid - UUID of the fog node + * @param {string} publicKey - Public key as base64 encoded JWK string + * @param {Object} transaction - Sequelize transaction + * @returns {Promise} Promise resolving to the stored public key + */ +const storePublicKey = async function (fogUuid, publicKey, transaction) { + // Encrypt the public key using the fog UUID as salt + const encryptedPublicKey = AppHelper.encryptText(publicKey, fogUuid) + + // Store the encrypted public key + return FogPublicKeyManager.updateOrCreate(fogUuid, encryptedPublicKey, transaction) +} + +/** + * Get public key for a fog node + * @param {string} fogUuid - UUID of the fog node + * @param {Object} transaction - Sequelize transaction + * @returns {Promise} Promise resolving to the public key as base64 encoded JWK string + */ +const getPublicKey = async function (fogUuid, transaction) { + // Get the encrypted public key + const fogPublicKey = await FogPublicKeyManager.findByFogUuid(fogUuid, transaction) + + if (!fogPublicKey) { + return null + } + + // Decrypt the public key using the fog UUID as salt + return AppHelper.decryptText(fogPublicKey.publicKey, fogUuid) +} + +/** + * Verify a JWT signed by a fog node + * @param {string} token - JWT token + * @param {string} fogUuid - UUID of the fog node + * @param {Object} transaction - Sequelize transaction + * @returns {Promise} Promise resolving to the verified JWT payload + */ +const verifyJWT = async function (token, fogUuid, transaction) { + try { + // Get the public key for the fog node + const publicKeyBase64 = await getPublicKey(fogUuid, transaction) + + if (!publicKeyBase64) { + throw new Error('Public key not found for fog node') + } + + // Convert base64 JWK string to JWK object + const publicKeyJwk = JSON.parse(Buffer.from(publicKeyBase64, 'base64').toString()) + + // Convert JWK to crypto key + const publicKey = crypto.createPublicKey({ + key: publicKeyJwk, + format: 'jwk' + }) + + // Verify the JWT using jose + const { payload } = await jose.jwtVerify(token, publicKey, { + algorithms: ['EdDSA'] + }) + + // Check if JTI is already used + const isUsed = await FogUsedTokenManager.isJtiUsed(payload.jti, transaction) + if (isUsed) { + throw new Error('JWT already used') + } + + // Store the JTI + await FogUsedTokenManager.storeJti(payload.jti, fogUuid, payload.exp, transaction) + + return payload + } catch (error) { + throw new Error(`JWT verification failed: ${error.message}`) + } +} + +async function all (transaction) { + return FogPublicKeyManager.findAll(null, transaction) +} + +module.exports = { + generateKeyPair, + storePublicKey, + getPublicKey, + verifyJWT, + all +} diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index 1255c82d..1cc27db5 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -489,8 +489,8 @@ async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { const newProvision = { iofogUuid: fogData.uuid, - provisionKey: AppHelper.generateRandomString(8), - expirationTime: new Date().getTime() + (20 * 60 * 1000) + provisionKey: AppHelper.generateRandomString(16), + expirationTime: new Date().getTime() + (10 * 60 * 1000) } const fog = await FogManager.findOne(queryFogData, transaction) diff --git a/src/services/microservice-ports/default.js b/src/services/microservice-ports/default.js index d5c23378..956186be 100644 --- a/src/services/microservice-ports/default.js +++ b/src/services/microservice-ports/default.js @@ -52,7 +52,7 @@ async function _checkForDuplicatePorts (agent, localPort, transaction) { } function _createDefaultPublicPortRange () { - const defaultPortRangeStr = process.env.PUBLIC_PORTS_RANGE || controllerConfig.get('PublicPorts:Range') + const defaultPortRangeStr = process.env.PUBLIC_PORTS_RANGE || controllerConfig.get('publicPorts.range') const [startStr, endStr] = defaultPortRangeStr.split('-') let start = parseInt(startStr) let end = parseInt(endStr) diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index cd564905..8a725720 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -40,6 +40,7 @@ const { VOLUME_MAPPING_DEFAULT } = require('../helpers/constants') const constants = require('../helpers/constants') const isEqual = require('lodash/isEqual') const TagsManager = require('../data/managers/tags-manager') +const logger = require('../logger') async function _setPubTags (microserviceModel, tagsArray, transaction) { if (tagsArray) { @@ -356,14 +357,14 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) response.forEach(ms => ms.iofogUuid && fogsNeedUpdate.add(ms.iofogUuid)) } } catch (error) { - console.error(`[ERROR] Checking fog nodes list for pubTag "${tag.value}":`, error.message) + logger.error(`Checking fog nodes list for pubTag "${tag.value}":`, error.message) } } for (const fog of fogsNeedUpdate) { try { await ChangeTrackingService.update(fog, ChangeTrackingService.events.microserviceFull, transaction) } catch (error) { - console.error(`[ERROR] Updating change tracking for fog "${fog.value}":`, error.message) + logger.error(`Updating change tracking for fog "${fog.value}":`, error.message) } } } @@ -863,14 +864,14 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i response.forEach(ms => ms.iofogUuid && fogsNeedUpdate.add(ms.iofogUuid)) } } catch (error) { - console.error(`[ERROR] Checking fog nodes list for pubTag "${tag.value}":`, error.message) + logger.error(`Checking fog nodes list for pubTag "${tag.value}":`, error.message) } } for (const fog of fogsNeedUpdate) { try { await ChangeTrackingService.update(fog, ChangeTrackingService.events.microserviceFull, transaction) } catch (error) { - console.error(`[ERROR] Updating change tracking for fog "${fog.value}":`, error.message) + logger.error(`Updating change tracking for fog "${fog.value}":`, error.message) } } } @@ -1197,7 +1198,7 @@ async function getReceiverMicroservices (microservice, transaction) { ] } } catch (error) { - console.error(`[ERROR] Checking microservices for pubTag "${tag.value}":`, error.message) + logger.error(`Checking microservices for pubTag "${tag.value}":`, error.message) } } } @@ -1226,7 +1227,7 @@ async function isMicroserviceConsumer (microservice, transaction) { return true } } catch (error) { - console.error(`[ERROR] Checking microservices for subTag "${tag.value}":`, error.message) + logger.error(`Checking microservices for subTag "${tag.value}":`, error.message) } } } diff --git a/src/services/tunnel-service.js b/src/services/tunnel-service.js index 14142af2..7c013b26 100644 --- a/src/services/tunnel-service.js +++ b/src/services/tunnel-service.js @@ -30,13 +30,13 @@ const openTunnel = async function (tunnelData, isCli, transaction) { if (isCli) { tunnel.rport = await AppHelper.findAvailablePort(tunnelData.host) } else { - const host = Config.get('Tunnel:Host') + const host = Config.get('tunnel.host') tunnel = { - username: Config.get('Tunnel:Username'), - password: Config.get('Tunnel:Password'), + username: Config.get('tunnel.username'), + password: Config.get('tunnel.password'), host: host, - rsakey: Config.get('Tunnel:RsaKey'), - lport: Config.get('Tunnel:Lport'), + rsakey: Config.get('tunnel.rsaKey'), + lport: Config.get('tunnel.lport'), iofogUuid: iofog.uuid, closed: false, rport: await AppHelper.findAvailablePort(host) diff --git a/src/services/user-service.js b/src/services/user-service.js index fcbed54d..639b1fef 100644 --- a/src/services/user-service.js +++ b/src/services/user-service.js @@ -16,38 +16,73 @@ const TransactionDecorator = require('../decorators/transaction-decorator') const axios = require('axios') const qs = require('qs') const https = require('https') +const config = require('../config') + +const kcClient = process.env.KC_CLIENT || config.get('auth.client.id') +const kcClientSecret = process.env.KC_CLIENT_SECRET || config.get('auth.client.secret') +const kcUrl = process.env.KC_URL || config.get('auth.url') +const kcRealm = process.env.KC_REALM || config.get('auth.realm') +const isDevMode = config.get('server.devMode', true) + +const mockUser = { + preferred_username: 'dev-user', + email: 'dev@example.com', + realm_access: { + roles: ['SRE', 'Developer', 'Viewer'] + } +} + +const mockToken = { + access_token: 'mock-access-token', + refresh_token: 'mock-refresh-token' +} + +const isAuthConfigured = () => { + return kcUrl && kcRealm && kcClient && kcClientSecret +} const login = async function (credentials, isCLI, transaction) { - try { - const data = qs.stringify({ - grant_type: 'password', - username: credentials.email, - password: credentials.password, - totp: credentials.totp, - client_id: process.env.KC_CLIENT, - client_secret: process.env.KC_CLIENT_SECRET - }) - - const agent = new https.Agent({ - rejectUnauthorized: false // Ignore SSL certificate errors - }) - - const config = { - method: 'post', - maxBodyLength: Infinity, - url: `${process.env.KC_URL}realms/${process.env.KC_REALM}/protocol/openid-connect/token`, - headers: { - 'Cache-Control': 'no-cache', - 'Content-Type': 'application/x-www-form-urlencoded' - }, - data, - httpsAgent: agent + // If in dev mode and auth is not configured, always return mock token + if (!isAuthConfigured() && isDevMode) { + return { + accessToken: mockToken.access_token, + refreshToken: mockToken.refresh_token } + } - // Make a POST request to Keycloak token endpoint - const response = await axios.request(config) + // If auth is not configured and not in dev mode, throw error + if (!isAuthConfigured() && !isDevMode) { + throw new Error(`Auth is not configured for this cluster. Please contact your administrator.`) + } - // Extract the access token from the response + // Only proceed with axios request if auth is configured + const data = qs.stringify({ + grant_type: 'password', + username: credentials.email, + password: credentials.password, + totp: credentials.totp, + client_id: kcClient, + client_secret: kcClientSecret + }) + + const agent = new https.Agent({ + rejectUnauthorized: false + }) + + const requestConfig = { + method: 'post', + maxBodyLength: Infinity, + url: `${kcUrl}realms/${kcRealm}/protocol/openid-connect/token`, + headers: { + 'Cache-Control': 'no-cache', + 'Content-Type': 'application/x-www-form-urlencoded' + }, + data, + httpsAgent: agent + } + + try { + const response = await axios.request(requestConfig) const accessToken = response.data.access_token const refreshToken = response.data.refresh_token return { @@ -55,40 +90,53 @@ const login = async function (credentials, isCLI, transaction) { refreshToken } } catch (error) { - console.error('Error during login:', error) - throw new Errors.InvalidCredentialsError() + if (error.response && error.response.data) { + throw new Errors.InvalidCredentialsError(error.response.data.error_description || 'Invalid credentials') + } + throw new Errors.InvalidCredentialsError(error.message || 'Invalid credentials') } } const refresh = async function (credentials, isCLI, transaction) { - try { - const data = qs.stringify({ - grant_type: 'refresh_token', - refresh_token: credentials.refreshToken, - client_id: process.env.KC_CLIENT, - client_secret: process.env.KC_CLIENT_SECRET - }) - - const agent = new https.Agent({ - rejectUnauthorized: false // Ignore SSL certificate errors - }) - - const config = { - method: 'post', - maxBodyLength: Infinity, - url: `${process.env.KC_URL}realms/${process.env.KC_REALM}/protocol/openid-connect/token`, - headers: { - 'Cache-Control': 'no-cache', - 'Content-Type': 'application/x-www-form-urlencoded' - }, - data, - httpsAgent: agent + // If in dev mode and auth is not configured, always return mock token + if (!isAuthConfigured() && isDevMode) { + return { + accessToken: mockToken.access_token, + refreshToken: mockToken.refresh_token } + } + + // If auth is not configured and not in dev mode, throw error + if (!isAuthConfigured() && !isDevMode) { + throw new Error(`Auth is not configured for this cluster. Please contact your administrator.`) + } - // Make a POST request to Keycloak token endpoint - const response = await axios.request(config) + // Only proceed with axios request if auth is configured + const data = qs.stringify({ + grant_type: 'refresh_token', + refresh_token: credentials.refreshToken, + client_id: kcClient, + client_secret: kcClientSecret + }) + + const agent = new https.Agent({ + rejectUnauthorized: false + }) + + const requestConfig = { + method: 'post', + maxBodyLength: Infinity, + url: `${kcUrl}realms/${kcRealm}/protocol/openid-connect/token`, + headers: { + 'Cache-Control': 'no-cache', + 'Content-Type': 'application/x-www-form-urlencoded' + }, + data, + httpsAgent: agent + } - // Extract the access token from the response + try { + const response = await axios.request(requestConfig) const accessToken = response.data.access_token const refreshToken = response.data.refresh_token return { @@ -96,68 +144,88 @@ const refresh = async function (credentials, isCLI, transaction) { refreshToken } } catch (error) { - console.error('Error during login:', error) - throw new Errors.InvalidCredentialsError() + if (error.response && error.response.data) { + throw new Errors.InvalidCredentialsError(error.response.data.error_description || 'Invalid credentials') + } + throw new Errors.InvalidCredentialsError(error.message || 'Invalid credentials') } } const profile = async function (req, isCLI, transaction) { - try { - const accessToken = req.headers.authorization.replace('Bearer ', '') - const agent = new https.Agent({ - // Ignore SSL certificate errors - rejectUnauthorized: false - }) - - const profileconfig = { - method: 'get', - maxBodyLength: Infinity, - url: `${process.env.KC_URL}realms/${process.env.KC_REALM}/protocol/openid-connect/userinfo`, - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - Authorization: `Bearer ${accessToken}` - }, - httpsAgent: agent - } + // If in dev mode and auth is not configured, always return mock user + if (!isAuthConfigured() && isDevMode) { + return mockUser + } - // Make the request using async/await - const response = await axios.request(profileconfig) + // If auth is not configured and not in dev mode, throw error + if (!isAuthConfigured() && !isDevMode) { + throw new Error(`Auth is not configured for this cluster. Please contact your administrator.`) + } - // Return the userinfo data + // Only proceed with axios request if auth is configured + const accessToken = req.headers.authorization.replace('Bearer ', '') + const agent = new https.Agent({ + rejectUnauthorized: false + }) + + const requestConfig = { + method: 'get', + maxBodyLength: Infinity, + url: `${kcUrl}realms/${kcRealm}/protocol/openid-connect/userinfo`, + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + Authorization: `Bearer ${accessToken}` + }, + httpsAgent: agent + } + + try { + const response = await axios.request(requestConfig) return response.data } catch (error) { - console.error('Error during profile retrieval:', error) - throw new Errors.InvalidCredentialsError() + if (error.response && error.response.data) { + throw new Errors.InvalidCredentialsError(error.response.data.error_description || 'Invalid credentials') + } + throw new Errors.InvalidCredentialsError(error.message || 'Invalid credentials') } } const logout = async function (req, isCLI, transaction) { - try { - const accessToken = req.headers.authorization.replace('Bearer ', '') - const agent = new https.Agent({ - // Ignore SSL certificate errors - rejectUnauthorized: false - }) - - const logoutconfig = { - method: 'post', - maxBodyLength: Infinity, - url: `${process.env.KC_URL}realms/${process.env.KC_REALM}/protocol/openid-connect/logout`, - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - Authorization: `Bearer ${accessToken}` - }, - httpsAgent: agent - } + // If in dev mode and auth is not configured, always return success + if (!isAuthConfigured() && isDevMode) { + return { status: 'success' } + } - // Make the request using async/await - const response = await axios.request(logoutconfig) + // If auth is not configured and not in dev mode, throw error + if (!isAuthConfigured() && !isDevMode) { + throw new Error(`Auth is not configured for this cluster. Please contact your administrator.`) + } - // Return the userinfo data + // Only proceed with axios request if auth is configured + const accessToken = req.headers.authorization.replace('Bearer ', '') + const agent = new https.Agent({ + rejectUnauthorized: false + }) + + const requestConfig = { + method: 'post', + maxBodyLength: Infinity, + url: `${kcUrl}realms/${kcRealm}/protocol/openid-connect/logout`, + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + Authorization: `Bearer ${accessToken}` + }, + httpsAgent: agent + } + + try { + const response = await axios.request(requestConfig) return response.data } catch (error) { - console.error('Error during logout:', error) - throw new Errors.InvalidCredentialsError() + if (error.response && error.response.data) { + throw new Errors.InvalidCredentialsError(error.response.data.error_description || 'Invalid credentials') + } + throw new Errors.InvalidCredentialsError(error.message || 'Invalid credentials') } } diff --git a/src/utils/ssl-utils.js b/src/utils/ssl-utils.js new file mode 100644 index 00000000..988eca7a --- /dev/null +++ b/src/utils/ssl-utils.js @@ -0,0 +1,76 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const fs = require('fs') +const logger = require('../logger') + +/** + * Loads a certificate from either a file path or base64 string + * @param {string} source - The source of the certificate (file path or base64 string) + * @param {boolean} isBase64 - Whether the source is a base64 string + * @returns {Buffer} The loaded certificate + * @throws {Error} If there's an error loading the certificate + */ +function loadCertificate (source, isBase64 = false) { + try { + if (!source) { + throw new Error('Certificate source is empty') + } + + if (isBase64) { + return Buffer.from(source, 'base64') + } + return fs.readFileSync(source) + } catch (e) { + logger.error(`Error loading certificate: ${e.message}`) + throw e + } +} + +/** + * Creates SSL options from either file paths or base64 strings + * @param {Object} options - SSL configuration options + * @param {string} options.key - SSL key file path or base64 string + * @param {string} options.cert - SSL certificate file path or base64 string + * @param {string} [options.intermedKey] - Intermediate certificate file path or base64 string + * @param {boolean} [options.isBase64=false] - Whether the inputs are base64 strings + * @returns {Object} SSL options for HTTPS server + */ +function createSSLOptions ({ key, cert, intermedKey, isBase64 = false }) { + if (!key || !cert) { + throw new Error('SSL key and certificate are required') + } + + const sslOptions = { + key: loadCertificate(key, isBase64), + cert: loadCertificate(cert, isBase64), + requestCert: true, + rejectUnauthorized: false + } + + // Only add CA if intermediate certificate is provided + if (intermedKey) { + try { + sslOptions.ca = loadCertificate(intermedKey, isBase64) + } catch (e) { + logger.warn('Intermediate certificate could not be loaded, continuing without it') + } + } + + return sslOptions +} + +module.exports = { + loadCertificate, + createSSLOptions +} diff --git a/test/OTEL/README.md b/test/OTEL/README.md new file mode 100644 index 00000000..77725087 --- /dev/null +++ b/test/OTEL/README.md @@ -0,0 +1,50 @@ +# OpenTelemetry Test Setup + +This directory contains the necessary configuration files to test OpenTelemetry integration with Jaeger, Prometheus, and Grafana. + +## Prerequisites + +- Docker +- Docker Compose + +## Setup + +1. Start the observability stack: +```bash +docker-compose up -d +``` + +2. Access the UIs: +- Jaeger UI: http://localhost:16686 +- Prometheus: http://localhost:9090 +- Grafana: http://localhost:3000 (login with admin/admin) + +## Testing Your Application + +To test your application with this setup: + +1. Set the environment variables: +```bash +export OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +export OTEL_SERVICE_NAME=your-service-name +``` + +2. Run your application with OpenTelemetry: +```bash +sudo -E node -r dotenv/config src/server.js +``` + +## Verifying the Setup + +1. Make some requests to your application +2. Open Jaeger UI (http://localhost:16686) +3. Select your service from the dropdown +4. Click "Find Traces" +5. You should see traces from your application + +## Cleanup + +To stop and remove all containers: +```bash +docker-compose down -v +``` \ No newline at end of file diff --git a/test/OTEL/docker-compose.yml b/test/OTEL/docker-compose.yml new file mode 100644 index 00000000..6db625c4 --- /dev/null +++ b/test/OTEL/docker-compose.yml @@ -0,0 +1,65 @@ +version: '3.8' + +services: + # Jaeger for trace visualization + jaeger: + image: jaegertracing/all-in-one:latest + ports: + - "16686:16686" # Jaeger UI + - "14250:14250" # gRPC + environment: + - COLLECTOR_OTLP_ENABLED=true + - METRICS_STORAGE_TYPE=prometheus + - PROMETHEUS_SERVER_URL=http://prometheus:9090 + networks: + - telemetry-network + + # OpenTelemetry Collector + otel-collector: + image: otel/opentelemetry-collector:latest + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "4317:4317" # OTLP gRPC + - "4318:4318" # OTLP HTTP + - "8888:8888" # Prometheus metrics exposed by the collector + - "8889:8889" # Prometheus exporter + - "13133:13133" # health_check extension + - "55679:55679" # zpages extension + depends_on: + - jaeger + - prometheus + networks: + - telemetry-network + + # Prometheus for metrics + prometheus: + image: prom/prometheus:latest + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + networks: + - telemetry-network + + # Grafana for visualization + grafana: + image: grafana/grafana:latest + ports: + - "3000:3000" + volumes: + - grafana-storage:/var/lib/grafana + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + depends_on: + - prometheus + networks: + - telemetry-network + +networks: + telemetry-network: + driver: bridge + +volumes: + grafana-storage: \ No newline at end of file diff --git a/test/OTEL/otel-collector-config.yaml b/test/OTEL/otel-collector-config.yaml new file mode 100644 index 00000000..5c5b03ab --- /dev/null +++ b/test/OTEL/otel-collector-config.yaml @@ -0,0 +1,39 @@ +receivers: + otlp: + protocols: + http: + endpoint: 0.0.0.0:4318 + grpc: + endpoint: 0.0.0.0:4317 + +processors: + batch: + timeout: 1s + send_batch_size: 1024 + +exporters: + otlp: + endpoint: jaeger:4317 + tls: + insecure: true + prometheus: + endpoint: "0.0.0.0:8889" + namespace: "controller" + resource_to_telemetry_conversion: + enabled: true + debug: + verbosity: detailed + +service: + telemetry: + logs: + level: debug + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [otlp, debug] + metrics: + receivers: [otlp] + processors: [batch] + exporters: [otlp, prometheus, debug] \ No newline at end of file diff --git a/test/OTEL/prometheus.yml b/test/OTEL/prometheus.yml new file mode 100644 index 00000000..de10cdb2 --- /dev/null +++ b/test/OTEL/prometheus.yml @@ -0,0 +1,8 @@ +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'otel-collector' + scrape_interval: 5s + static_configs: + - targets: ['otel-collector:8889'] \ No newline at end of file From 5e5957557360622c66af1e841858703889788f5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Thu, 24 Apr 2025 15:13:14 +0300 Subject: [PATCH 05/25] yaml parses updated with microservice container annotations, capadd, capdrop --- src/services/yaml-parser-service.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index d8fb7ca2..bdbf1ea5 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -87,6 +87,9 @@ const parseMicroserviceYAML = async (microservice) => { agentName: lget(microservice, 'agent.name'), registryId, ...microservice.container, + annotations: microservice.container.annotations != null ? JSON.stringify(microservice.container.annotations) : undefined, + capAdd: lget(microservice, 'container.capAdd', []), + capDrop: lget(microservice, 'container.capDrop', []), ports: (lget(microservice, 'container.ports', [])), volumeMappings: lget(microservice, 'container.volumes', []), cmd: lget(microservice, 'container.commands', []), From 58ba9cdd0276ad9ff05af2f551542fa28124f59c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Thu, 24 Apr 2025 22:59:52 +0300 Subject: [PATCH 06/25] viewer version updated --- package-lock.json | 8 ++++---- package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package-lock.json b/package-lock.json index cbfa99de..9be6b883 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,7 @@ "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "0.5.3", + "@datasance/ecn-viewer": "0.5.4", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", "@opentelemetry/instrumentation-express": "^0.48.1", @@ -481,9 +481,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.5.3.tgz", - "integrity": "sha512-jjfnn9zPK5OmRVbAOdfaB/jIrKA4w/RsSNK2fbeQeGhq01deJQJXmbHqutdskdyQpJKdwVWpM3mWAIe2nimOCg==" + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.5.4.tgz", + "integrity": "sha512-Eu8BhBAhHyU6S3RdOPyiKpq3DhRUcEQQlU02BBWTdI5e6j5Iqv6Q72AFBw+AaE0NeO7PSNz8x7jQj77OX7jU5g==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", diff --git a/package.json b/package.json index 4b6212bb..4d56685d 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "0.5.3", + "@datasance/ecn-viewer": "0.5.4", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", From 91316a46ac89e2a48f2bdf3b40b68adec8d5267a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Fri, 16 May 2025 16:51:09 +0300 Subject: [PATCH 07/25] new secret and certificate endpoints added, router mtls by default, bd migrations and seeders handled by controller, logging improved --- docs/swagger.yaml | 824 +++++++++++++++++- package-lock.json | 350 +++++++- package.json | 11 +- scripts/cli-tests.js | 6 +- src/controllers/agent-controller.js | 7 +- src/controllers/certificate-controller.js | 87 ++ src/controllers/secret-controller.js | 66 ++ src/data/managers/certificate-manager.js | 227 +++++ src/data/managers/secret-manager.js | 57 ++ .../mysql/db_migration_mysql_v1.0.2.sql | 716 +++++++++++++++ .../postgres/db_migration_pg_v1.0.2.sql | 717 +++++++++++++++ .../db_migration_sqlite_v1.0.2.sql} | 79 +- src/data/models/certificate.js | 130 +++ src/data/models/index.js | 60 +- src/data/models/microservicestatus.js | 5 + src/data/models/router.js | 30 +- src/data/models/secret.js | 79 ++ src/data/providers/database-provider.js | 520 ++++++++++- src/data/providers/mysql.js | 57 +- src/data/providers/postgres.js | 58 +- .../seeders/mysql/db_seeder_mysql_v1.0.2.sql | 43 + .../seeders/postgres/db_seeder_pg_v1.0.2.sql | 42 + .../db_seeder_sqlite_v1.0.2.sql} | 0 src/decorators/authorization-decorator.js | 1 + src/helpers/error-messages.js | 13 +- src/helpers/errors.js | 11 +- src/helpers/secret-helper.js | 59 ++ src/helpers/template-helper.js | 378 ++++---- src/logger/index.js | 39 +- src/routes/agent.js | 69 +- src/routes/application.js | 20 +- src/routes/applicationTemplate.js | 16 +- src/routes/catalog.js | 10 +- src/routes/certificate.js | 356 ++++++++ src/routes/config.js | 6 +- src/routes/controller.js | 4 +- src/routes/diagnostics.js | 10 +- src/routes/edgeResource.js | 16 +- src/routes/flow.js | 10 +- src/routes/iofog.js | 22 +- src/routes/microservices.js | 50 +- src/routes/registries.js | 8 +- src/routes/router.js | 4 +- src/routes/routing.js | 10 +- src/routes/secret.js | 246 ++++++ src/routes/tunnel.js | 4 +- src/routes/user.js | 2 +- src/schemas/certificate.js | 144 +++ src/schemas/secret.js | 65 ++ src/services/agent-service.js | 54 +- src/services/certificate-service.js | 605 +++++++++++++ src/services/iofog-service.js | 153 +++- src/services/router-service.js | 303 +++++-- src/services/secret-service.js | 136 +++ src/services/yaml-parser-service.js | 79 +- src/utils/cert.js | 518 +++++++++++ src/utils/k8s-client.js | 153 ++++ 57 files changed, 7148 insertions(+), 597 deletions(-) create mode 100644 src/controllers/certificate-controller.js create mode 100644 src/controllers/secret-controller.js create mode 100644 src/data/managers/certificate-manager.js create mode 100644 src/data/managers/secret-manager.js create mode 100644 src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql create mode 100644 src/data/migrations/postgres/db_migration_pg_v1.0.2.sql rename src/data/migrations/{db_migration_v1.0.2.sql => sqlite/db_migration_sqlite_v1.0.2.sql} (90%) create mode 100644 src/data/models/certificate.js create mode 100644 src/data/models/secret.js create mode 100644 src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql create mode 100644 src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql rename src/data/seeders/{db_seeder_v1.0.2.sql => sqlite/db_seeder_sqlite_v1.0.2.sql} (100%) create mode 100644 src/helpers/secret-helper.js create mode 100644 src/routes/certificate.js create mode 100644 src/routes/secret.js create mode 100644 src/schemas/certificate.js create mode 100644 src/schemas/secret.js create mode 100644 src/services/certificate-service.js create mode 100644 src/services/secret-service.js create mode 100644 src/utils/cert.js create mode 100644 src/utils/k8s-client.js diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 93ce06d6..28aaad1a 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1,6 +1,6 @@ openapi : "3.0.0" info: - version: 3.4.11 + version: 3.5.0 title: Datasance PoT Controller paths: /status: @@ -476,7 +476,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -619,7 +619,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -677,7 +677,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -813,7 +813,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -895,6 +895,34 @@ paths: description: Not Authorized "500": description: Internal Server Error + /agent/cert: + get: + tags: + - Agent + summary: Move Controller CA to Agent + operationId: agentControllerCert + security: + - agentToken: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/AgentDeprovisioningRequest" + required: true + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "400": + description: Bad Request + "401": + description: Not Authorized + "500": + description: Internal Server Error /agent/config: get: tags: @@ -1576,7 +1604,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -1778,7 +1806,7 @@ paths: requestBody: required: true content: - application/yaml: + multipart/form-data: schema: type: object properties: @@ -3155,6 +3183,453 @@ paths: description: Not Found "500": description: Internal Server Error + /secrets: + post: + tags: + - Secrets + summary: Creates a new secret + operationId: createSecret + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SecretCreate" + responses: + "201": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/SecretResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "409": + description: Secret Already Exists + "500": + description: Internal Server Error + get: + tags: + - Secrets + summary: Lists all secrets + operationId: listSecrets + security: + - userToken: [] + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/SecretListResponse" + "401": + description: Not Authorized + "500": + description: Internal Server Error + "/secrets/{name}": + get: + tags: + - Secrets + summary: Gets a secret by name + operationId: getSecret + parameters: + - in: path + name: name + description: Secret name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/SecretResponse" + "401": + description: Not Authorized + "404": + description: Secret Not Found + "500": + description: Internal Server Error + put: + tags: + - Secrets + summary: Updates an existing secret + operationId: updateSecret + parameters: + - in: path + name: name + description: Secret name + required: true + schema: + type: string + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SecretUpdate" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/SecretResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Secret Not Found + "500": + description: Internal Server Error + delete: + tags: + - Secrets + summary: Deletes a secret + operationId: deleteSecret + parameters: + - in: path + name: name + description: Secret name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "200": + description: Success + "401": + description: Not Authorized + "404": + description: Secret Not Found + "500": + description: Internal Server Error + /secrets/yaml: + post: + tags: + - Secrets + summary: Create a secret from YAML file + operationId: createSecretFromYAML + security: + - userToken: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + secret: + type: string + format: binary + responses: + '201': + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/SecretResponse" + '400': + description: Bad Request + "401": + description: Not Authorized + "409": + description: Secret Already Exists + "500": + description: Internal Server Error + "/secrets/yaml/{name}": + put: + tags: + - Secrets + summary: Updates an existing secret using YAML + operationId: updateSecretFromYAML + parameters: + - in: path + name: name + description: Secret name + required: true + schema: + type: string + security: + - userToken: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + secret: + type: string + format: binary + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/SecretResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Secret Not Found + /certificates/ca/{name}: + get: + tags: + - Certificates + summary: Get a Certificate Authority (CA) by name + operationId: getCA + parameters: + - in: path + name: name + description: CA name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CAResponse" + '401': + description: Unauthorized + '404': + description: CA not found + '500': + description: Internal Server Error + delete: + tags: + - Certificates + summary: Delete a Certificate Authority (CA) by name + operationId: deleteCA + parameters: + - in: path + name: name + description: CA name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + '401': + description: Unauthorized + '404': + description: CA not found + '500': + description: Internal Server Error + + /certificates: + post: + tags: + - Certificates + summary: Create a new certificate + operationId: createCertificate + security: + - userToken: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateCreateRequest" + responses: + '201': + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Not Found - Referenced CA not found + '409': + description: Conflict - Certificate already exists + get: + tags: + - Certificates + summary: List all certificates + operationId: listCertificates + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateListResponse" + '401': + description: Unauthorized + '500': + description: Internal Server Error + + /certificates/expiring: + get: + tags: + - Certificates + summary: List certificates that will expire soon + operationId: listExpiringCertificates + parameters: + - in: query + name: days + description: Number of days ahead to check for expiration (default 30) + required: false + schema: + type: integer + default: 30 + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateListResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '500': + description: Internal Server Error + + /certificates/{name}: + get: + tags: + - Certificates + summary: Get a certificate by name + operationId: getCertificate + parameters: + - in: path + name: name + description: Certificate name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateResponse" + '401': + description: Unauthorized + '404': + description: Certificate not found + '500': + description: Internal Server Error + delete: + tags: + - Certificates + summary: Delete a certificate by name + operationId: deleteCertificate + parameters: + - in: path + name: name + description: Certificate name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + '401': + description: Unauthorized + '404': + description: Certificate not found + '500': + description: Internal Server Error + + /certificates/{name}/renew: + post: + tags: + - Certificates + summary: Renew a certificate + operationId: renewCertificate + parameters: + - in: path + name: name + description: Certificate name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateRenewResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Certificate not found + '500': + description: Internal Server Error + /certificates/yaml: + post: + tags: + - Certificates + summary: Create a certificate or CA from YAML file + operationId: createCertificateFromYAML + security: + - userToken: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + certificate: + type: string + format: binary + responses: + '201': + description: Created + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CAResponse" + - $ref: "#/components/schemas/CertificateResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Not Found - Referenced CA not found + '409': + description: Conflict - Certificate or CA already exists tags: - name: Controller description: Manage your controller @@ -3182,6 +3657,10 @@ tags: description: Used by your agents to communicate with your controller - name: User description: Manage your users + - name: Secrets + description: Manage your secrets + - name: Certificates + description: Manage your certificates servers: - url: http://localhost:51121/api/v3 components: @@ -3897,20 +4376,6 @@ components: type: number edgeRouterPort: type: number - requireSsl: - type: string - sslProfile: - type: string - saslMechanisms: - type: string - authenticatePeer: - type: string - caCert: - type: string - tlsCert: - type: string - tlsKey: - type: string host: type: string tags: @@ -5040,4 +5505,319 @@ components: sourceMicroserviceUuid: type: string destMicroserviceUuid: - type: string \ No newline at end of file + type: string + SecretCreate: + type: object + required: + - name + - type + - data + properties: + name: + type: string + minLength: 1 + maxLength: 255 + type: + type: string + enum: [opaque, tls] + data: + type: object + SecretUpdate: + type: object + required: + - data + properties: + data: + type: object + SecretResponse: + type: object + required: + - id + - name + - type + - data + - created_at + - updated_at + properties: + id: + type: integer + name: + type: string + type: + type: string + enum: [opaque, tls] + data: + type: object + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + SecretListResponse: + type: object + required: + - secrets + properties: + secrets: + type: array + items: + type: object + required: + - id + - name + - type + - created_at + - updated_at + properties: + id: + type: integer + name: + type: string + type: + type: string + enum: [opaque, tls] + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + # Certificate schemas + CACreateRequest: + type: object + required: + - name + - subject + - type + properties: + name: + type: string + description: Name of the CA + subject: + type: string + description: Subject of the CA (CN) + expiration: + type: integer + description: Expiration time in milliseconds + type: + type: string + enum: [k8s-secret, direct, self-signed] + description: Type of CA + secretName: + type: string + description: Name of the secret (required for direct type) + + CAResponse: + type: object + properties: + name: + type: string + description: Name of the CA + subject: + type: string + description: Subject of the CA + is_ca: + type: boolean + description: True if this is a CA + valid_from: + type: string + format: date-time + description: Validity start date + valid_to: + type: string + format: date-time + description: Validity end date + serial_number: + type: string + description: Certificate serial number + data: + type: object + properties: + certificate: + type: string + description: PEM encoded certificate + private_key: + type: string + description: PEM encoded private key + + CAListResponse: + type: object + properties: + cas: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the CA + subject: + type: string + description: Subject of the CA + valid_from: + type: string + format: date-time + description: Validity start date + valid_to: + type: string + format: date-time + description: Validity end date + days_remaining: + type: integer + description: Days until expiration + is_expired: + type: boolean + description: True if certificate is expired + + CertificateCreateRequest: + type: object + required: + - name + - subject + - hosts + properties: + name: + type: string + description: Name of the certificate + subject: + type: string + description: Subject of the certificate (CN) + hosts: + type: string + description: Comma-separated list of hosts + expiration: + type: integer + description: Expiration time in milliseconds + ca: + type: object + properties: + type: + type: string + enum: [k8s-secret, direct, self-signed] + description: Type of CA + secretName: + type: string + description: Name of the CA secret + cert: + type: string + description: PEM encoded certificate (for direct type) + key: + type: string + description: PEM encoded private key (for direct type) + + CertificateResponse: + type: object + properties: + name: + type: string + description: Name of the certificate + subject: + type: string + description: Subject of the certificate + hosts: + type: string + description: Comma-separated list of hosts + is_ca: + type: boolean + description: True if this is a CA + valid_from: + type: string + format: date-time + description: Validity start date + valid_to: + type: string + format: date-time + description: Validity end date + serial_number: + type: string + description: Certificate serial number + ca_name: + type: string + description: Name of the signing CA + certificate_chain: + type: array + items: + type: object + properties: + name: + type: string + subject: + type: string + days_remaining: + type: integer + description: Days until expiration + is_expired: + type: boolean + description: True if certificate is expired + data: + type: object + properties: + certificate: + type: string + description: PEM encoded certificate + private_key: + type: string + description: PEM encoded private key + + CertificateListResponse: + type: object + properties: + certificates: + type: array + items: + type: object + properties: + name: + type: string + description: Name of the certificate + subject: + type: string + description: Subject of the certificate + hosts: + type: string + description: Comma-separated list of hosts + is_ca: + type: boolean + description: True if this is a CA + valid_from: + type: string + format: date-time + description: Validity start date + valid_to: + type: string + format: date-time + description: Validity end date + days_remaining: + type: integer + description: Days until expiration + is_expired: + type: boolean + description: True if certificate is expired + ca_name: + type: string + description: Name of the signing CA + + CertificateRenewResponse: + type: object + properties: + name: + type: string + description: Name of the certificate + subject: + type: string + description: Subject of the certificate + hosts: + type: string + description: Comma-separated list of hosts + valid_from: + type: string + format: date-time + description: New validity start date + valid_to: + type: string + format: date-time + description: New validity end date + renewed: + type: boolean + description: True if certificate was successfully renewed \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index 9be6b883..db97afda 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "license": "EPL-2.0", "dependencies": { "@datasance/ecn-viewer": "0.5.4", + "@kubernetes/client-node": "^0.22.3", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", "@opentelemetry/instrumentation-express": "^0.48.1", @@ -30,7 +31,7 @@ "ejs": "3.1.10", "express": "4.21.2", "express-session": "1.18.1", - "formidable": "3.5.1", + "formidable": "3.5.4", "ftp": "0.3.10", "globally": "^0.0.0", "helmet": "7.1.0", @@ -48,6 +49,7 @@ "mysql2": "3.10.1", "nconf": "0.12.1", "node-fetch-npm": "^2.0.4", + "node-forge": "^1.3.1", "node-schedule": "^2.1.1", "os": "0.1.2", "path": "0.12.7", @@ -910,6 +912,25 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@isaacs/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/@istanbuljs/load-nyc-config": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", @@ -1081,6 +1102,111 @@ "url": "https://opencollective.com/js-sdsl" } }, + "node_modules/@jsep-plugin/assignment": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@jsep-plugin/assignment/-/assignment-1.3.0.tgz", + "integrity": "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@jsep-plugin/regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@jsep-plugin/regex/-/regex-1.0.4.tgz", + "integrity": "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@kubernetes/client-node": { + "version": "0.22.3", + "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.22.3.tgz", + "integrity": "sha512-dG8uah3+HDJLpJEESshLRZlAZ4PgDeV9mZXT0u1g7oy4KMRzdZ7n5g0JEIlL6QhK51/2ztcIqURAnjfjJt6Z+g==", + "dependencies": { + "byline": "^5.0.0", + "isomorphic-ws": "^5.0.0", + "js-yaml": "^4.1.0", + "jsonpath-plus": "^10.2.0", + "request": "^2.88.0", + "rfc4648": "^1.3.0", + "stream-buffers": "^3.0.2", + "tar": "^7.0.0", + "tslib": "^2.4.1", + "ws": "^8.18.0" + }, + "optionalDependencies": { + "openid-client": "^6.1.3" + } + }, + "node_modules/@kubernetes/client-node/@cypress/request@3.0.8": {}, + "node_modules/@kubernetes/client-node/node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "engines": { + "node": ">=18" + } + }, + "node_modules/@kubernetes/client-node/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@kubernetes/client-node/node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/@kubernetes/client-node/node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@kubernetes/client-node/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "engines": { + "node": ">=18" + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, "node_modules/@one-ini/wasm": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", @@ -2341,6 +2467,14 @@ "node": ">=14" } }, + "node_modules/@paralleldrive/cuid2": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@paralleldrive/cuid2/-/cuid2-2.2.2.tgz", + "integrity": "sha512-ZOBkgDwEdoYVlSeRbYYXs0S9MejQofiVYoTbKzy/6GQa39/q5tQU2IX46+shYnUkpEl3wc+J6wRlar7r2EK2xA==", + "dependencies": { + "@noble/hashes": "^1.1.5" + } + }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -2754,11 +2888,11 @@ "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" }, "node_modules/@types/node": { - "version": "20.12.12", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.12.tgz", - "integrity": "sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==", + "version": "22.15.17", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.17.tgz", + "integrity": "sha512-wIX2aSZL5FE+MR0JlvF87BNVrtFWf6AE6rxSE9X7OwnVvoyCQjpzSRJ+M87se/4QCkCiebQAqrJ0y6fwIyi7nw==", "dependencies": { - "undici-types": "~5.26.4" + "undici-types": "~6.21.0" } }, "node_modules/@types/shimmer": { @@ -3646,6 +3780,14 @@ "node": ">=10.16.0" } }, + "node_modules/byline": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", + "integrity": "sha512-s6webAy+R4SR8XVuJWt2V2rGvhnrhxN+9S15GNuTK3wKPOXFF6RNc+8ug2XhH+2s4f+uudG4kUVYmYOQWL2g0Q==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -6250,14 +6392,17 @@ } }, "node_modules/formidable": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.1.tgz", - "integrity": "sha512-WJWKelbRHN41m5dumb0/k8TeAx7Id/y3a+Z7QfhxP/htI9Js5zYaEDtG8uMgG0vM0lOlqnmjE99/kfpOYi/0Og==", + "version": "3.5.4", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.4.tgz", + "integrity": "sha512-YikH+7CUTOtP44ZTnUhR7Ic2UASBPOqmaRkRKxRbywPTe5VxF7RRCck4af9wutiZ/QKM5nME9Bie2fFaPz5Gug==", "dependencies": { + "@paralleldrive/cuid2": "^2.2.2", "dezalgo": "^1.0.4", - "hexoid": "^1.0.0", "once": "^1.4.0" }, + "engines": { + "node": ">=14.0.0" + }, "funding": { "url": "https://ko-fi.com/tunnckoCore/commissions" } @@ -6892,6 +7037,7 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/hexoid/-/hexoid-1.0.0.tgz", "integrity": "sha512-QFLV0taWQOZtvIRIAdBChesmogZrtuXvVWsFHZTk2SU+anspqZ2vMnoLg7IE1+Uk16N19APic1BuF8bC8c2m5g==", + "dev": true, "engines": { "node": ">=8" } @@ -7779,6 +7925,14 @@ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, + "node_modules/isomorphic-ws": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-5.0.0.tgz", + "integrity": "sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw==", + "peerDependencies": { + "ws": "*" + } + }, "node_modules/isstream": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", @@ -8226,6 +8380,14 @@ "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==", "optional": true }, + "node_modules/jsep": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/jsep/-/jsep-1.4.0.tgz", + "integrity": "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==", + "engines": { + "node": ">= 10.16.0" + } + }, "node_modules/jsesc": { "version": "2.5.2", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", @@ -8306,6 +8468,23 @@ "node": ">= 10.0.0" } }, + "node_modules/jsonpath-plus": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-10.3.0.tgz", + "integrity": "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA==", + "dependencies": { + "@jsep-plugin/assignment": "^1.3.0", + "@jsep-plugin/regex": "^1.0.4", + "jsep": "^1.4.0" + }, + "bin": { + "jsonpath": "bin/jsonpath-cli.js", + "jsonpath-plus": "bin/jsonpath-cli.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/jsonschema": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jsonschema/-/jsonschema-1.4.1.tgz", @@ -8878,7 +9057,6 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", - "dev": true, "bin": { "mkdirp": "dist/cjs/src/bin.js" }, @@ -9345,7 +9523,6 @@ "version": "1.3.1", "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz", "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==", - "dev": true, "engines": { "node": ">= 6.13.0" } @@ -9676,6 +9853,15 @@ "node": "*" } }, + "node_modules/oauth4webapi": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/oauth4webapi/-/oauth4webapi-3.5.1.tgz", + "integrity": "sha512-txg/jZQwcbaF7PMJgY7aoxc9QuCxHVFMiEkDIJ60DwDz3PbtXPQnrzo+3X4IRYGChIwWLabRBRpf1k9hO9+xrQ==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, "node_modules/object-assign": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.0.1.tgz", @@ -9780,6 +9966,28 @@ "node": ">=4" } }, + "node_modules/openid-client": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-6.5.0.tgz", + "integrity": "sha512-fAfYaTnOYE2kQCqEJGX9KDObW2aw7IQy4jWpU/+3D3WoCFLbix5Hg6qIPQ6Js9r7f8jDUmsnnguRNCSw4wU/IQ==", + "optional": true, + "dependencies": { + "jose": "^6.0.10", + "oauth4webapi": "^3.5.1" + }, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/openid-client/node_modules/jose": { + "version": "6.0.11", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.0.11.tgz", + "integrity": "sha512-QxG7EaliDARm1O1S8BGakqncGT9s25bKL1WSf6/oa17Tkqwi8D2ZNglqCF+DsYF88/rV66Q/Q2mFAy697E1DUg==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, "node_modules/optionator": { "version": "0.9.4", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", @@ -9953,20 +10161,6 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "optional": true }, - "node_modules/pac-proxy-agent/node_modules/socks-proxy-agent": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.3.tgz", - "integrity": "sha512-VNegTZKhuGq5vSD6XNKlbqWhyt/40CgoEw8XxD6dhnm8Jq9IEa3nIa4HwnM8XOqU0CdB0BwWVXusqiFXfHB3+A==", - "optional": true, - "dependencies": { - "agent-base": "^7.1.1", - "debug": "^4.3.4", - "socks": "^2.7.1" - }, - "engines": { - "node": ">= 14" - } - }, "node_modules/pac-resolver": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-7.0.1.tgz", @@ -11023,20 +11217,6 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "optional": true }, - "node_modules/proxy-agent/node_modules/socks-proxy-agent": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.3.tgz", - "integrity": "sha512-VNegTZKhuGq5vSD6XNKlbqWhyt/40CgoEw8XxD6dhnm8Jq9IEa3nIa4HwnM8XOqU0CdB0BwWVXusqiFXfHB3+A==", - "optional": true, - "dependencies": { - "agent-base": "^7.1.1", - "debug": "^4.3.4", - "socks": "^2.7.1" - }, - "engines": { - "node": ">= 14" - } - }, "node_modules/proxy-from-env": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", @@ -11431,6 +11611,10 @@ "node": ">=4" } }, + "node_modules/request": { + "resolved": "node_modules/@kubernetes/client-node/@cypress/request@3.0.8", + "link": true + }, "node_modules/require-directory": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", @@ -11559,6 +11743,11 @@ "resolved": "https://registry.npmjs.org/retry-as-promised/-/retry-as-promised-7.0.4.tgz", "integrity": "sha512-XgmCoxKWkDofwH8WddD0w85ZfqYz+ZHlr5yo+3YUCfycWawU56T5ckWXsScsj5B8tqUcIG67DxXByo3VUgiAdA==" }, + "node_modules/rfc4648": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.5.4.tgz", + "integrity": "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg==" + }, "node_modules/roarr": { "version": "2.15.4", "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", @@ -12209,6 +12398,52 @@ "npm": ">= 3.0.0" } }, + "node_modules/socks-proxy-agent": { + "version": "8.0.5", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-8.0.5.tgz", + "integrity": "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw==", + "optional": true, + "dependencies": { + "agent-base": "^7.1.2", + "debug": "^4.3.4", + "socks": "^2.8.3" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/socks-proxy-agent/node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "optional": true, + "engines": { + "node": ">= 14" + } + }, + "node_modules/socks-proxy-agent/node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "optional": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/socks-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "optional": true + }, "node_modules/sonic-boom": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.0.1.tgz", @@ -13373,6 +13608,14 @@ "node": ">= 0.8" } }, + "node_modules/stream-buffers": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.3.tgz", + "integrity": "sha512-pqMqwQCso0PBJt2PQmDO0cFj0lyqmiwOMiMSkVtRokl7e+ZTRYgDHKnuZNbqjiJXgsg4nuqtD/zxuo9KqTp0Yw==", + "engines": { + "node": ">= 0.10.0" + } + }, "node_modules/stream-length": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/stream-length/-/stream-length-1.0.2.tgz", @@ -14067,8 +14310,7 @@ "node_modules/tslib": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", - "optional": true + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/tunnel-agent": { "version": "0.6.0", @@ -14301,9 +14543,9 @@ "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==" }, "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==" }, "node_modules/uniq": { "version": "1.0.1", @@ -14709,6 +14951,26 @@ "mkdirp": "bin/cmd.js" } }, + "node_modules/ws": { + "version": "8.18.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz", + "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "node_modules/xml": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/xml/-/xml-1.0.1.tgz", diff --git a/package.json b/package.json index 4d56685d..d7473c81 100644 --- a/package.json +++ b/package.json @@ -56,6 +56,7 @@ }, "dependencies": { "@datasance/ecn-viewer": "0.5.4", + "@kubernetes/client-node": "^0.22.3", "axios": "1.8.4", "body-parser": "^1.20.3", "child_process": "1.0.2", @@ -69,7 +70,7 @@ "ejs": "3.1.10", "express": "4.21.2", "express-session": "1.18.1", - "formidable": "3.5.1", + "formidable": "3.5.4", "ftp": "0.3.10", "globally": "^0.0.0", "helmet": "7.1.0", @@ -87,6 +88,7 @@ "mysql2": "3.10.1", "nconf": "0.12.1", "node-fetch-npm": "^2.0.4", + "node-forge": "^1.3.1", "node-schedule": "^2.1.1", "os": "0.1.2", "path": "0.12.7", @@ -137,5 +139,10 @@ ".jshintrc", ".snyk" ], - "type": "commonjs" + "type": "commonjs", + "overrides": { + "@kubernetes/client-node": { + "request": "@cypress/request@3.0.8" + } + } } diff --git a/scripts/cli-tests.js b/scripts/cli-tests.js index 7f8f1148..e64f86f3 100644 --- a/scripts/cli-tests.js +++ b/scripts/cli-tests.js @@ -65,9 +65,9 @@ async function seedTestData () { fogType: 1, isSystem: true, routerMode: 'interior', - messagingPort: 5672, - edgeRouterPort: 56722, - interRouterPort: 56721, + messagingPort: 5671, + edgeRouterPort: 45671, + interRouterPort: 55671, host: 'localhost' }, { }, false) const defaultRouter = await RouterService.findOne({ isDefault: true }) diff --git a/src/controllers/agent-controller.js b/src/controllers/agent-controller.js index a3c1e794..223ef9aa 100644 --- a/src/controllers/agent-controller.js +++ b/src/controllers/agent-controller.js @@ -110,6 +110,10 @@ const putImageSnapshotEndPoint = async function (req, fog) { return AgentService.putImageSnapshot(req, fog) } +const getControllerCAEndPoint = async function (req, fog) { + return AgentService.getControllerCA(fog) +} + module.exports = { agentProvisionEndPoint: agentProvisionEndPoint, agentDeprovisionEndPoint: AuthDecorator.checkFogToken(agentDeprovisionEndPoint), @@ -130,5 +134,6 @@ module.exports = { getImageSnapshotEndPoint: AuthDecorator.checkFogToken(getImageSnapshotEndPoint), putImageSnapshotEndPoint: AuthDecorator.checkFogToken(putImageSnapshotEndPoint), resetAgentConfigChangesEndPoint: AuthDecorator.checkFogToken(resetAgentConfigChangesEndPoint), - getAgentLinkedEdgeResourcesEndpoint: AuthDecorator.checkFogToken(getAgentLinkedEdgeResourcesEndpoint) + getAgentLinkedEdgeResourcesEndpoint: AuthDecorator.checkFogToken(getAgentLinkedEdgeResourcesEndpoint), + getControllerCAEndPoint: AuthDecorator.checkFogToken(getControllerCAEndPoint) } diff --git a/src/controllers/certificate-controller.js b/src/controllers/certificate-controller.js new file mode 100644 index 00000000..3271c5c6 --- /dev/null +++ b/src/controllers/certificate-controller.js @@ -0,0 +1,87 @@ +const CertificateService = require('../services/certificate-service') +const YamlParserService = require('../services/yaml-parser-service') + +// CA Management +const createCAEndpoint = async function (req) { + const ca = req.body + return CertificateService.createCAEndpoint(ca) +} + +const getCAEndpoint = async function (req) { + const name = req.params.name + return CertificateService.getCAEndpoint(name) +} + +const listCAEndpoint = async function (req) { + return CertificateService.listCAEndpoint() +} + +const deleteCAEndpoint = async function (req) { + const name = req.params.name + return CertificateService.deleteCAEndpoint(name) +} + +// Certificate Management +const createCertificateEndpoint = async function (req) { + const cert = req.body + return CertificateService.createCertificateEndpoint(cert) +} + +const getCertificateEndpoint = async function (req) { + const name = req.params.name + return CertificateService.getCertificateEndpoint(name) +} + +const listCertificatesEndpoint = async function (req) { + return CertificateService.listCertificatesEndpoint() +} + +const deleteCertificateEndpoint = async function (req) { + const name = req.params.name + return CertificateService.deleteCertificateEndpoint(name) +} + +// Certificate Renewal +const renewCertificateEndpoint = async function (req) { + const name = req.params.name + return CertificateService.renewCertificateEndpoint(name) +} + +// List Expiring Certificates +const listExpiringCertificatesEndpoint = async function (req) { + const days = req.query.days ? parseInt(req.query.days) : 30 + return CertificateService.listExpiringCertificatesEndpoint(days) +} + +// YAML Endpoint +const createCertificateFromYamlEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const certData = await YamlParserService.parseCertificateFile(fileContent) + + if (certData.isCA) { + delete certData.isCA + return CertificateService.createCAEndpoint(certData) + } else { + return CertificateService.createCertificateEndpoint(certData) + } +} + +module.exports = { + // CA endpoints + createCAEndpoint, + getCAEndpoint, + listCAEndpoint, + deleteCAEndpoint, + + // Certificate endpoints + createCertificateEndpoint, + getCertificateEndpoint, + listCertificatesEndpoint, + deleteCertificateEndpoint, + // Certificate renewal endpoints + renewCertificateEndpoint, + listExpiringCertificatesEndpoint, + + // YAML endpoints + createCertificateFromYamlEndpoint +} diff --git a/src/controllers/secret-controller.js b/src/controllers/secret-controller.js new file mode 100644 index 00000000..d4457a06 --- /dev/null +++ b/src/controllers/secret-controller.js @@ -0,0 +1,66 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const SecretService = require('../services/secret-service') +const YamlParserService = require('../services/yaml-parser-service') + +const createSecretEndpoint = async function (req) { + const secret = req.body + return SecretService.createSecretEndpoint(secret) +} + +const updateSecretEndpoint = async function (req) { + const secret = req.body + const secretName = req.params.name + return SecretService.updateSecretEndpoint(secretName, secret) +} + +const getSecretEndpoint = async function (req) { + const secretName = req.params.name + return SecretService.getSecretEndpoint(secretName) +} + +const listSecretsEndpoint = async function (req) { + return SecretService.listSecretsEndpoint() +} + +const deleteSecretEndpoint = async function (req) { + const secretName = req.params.name + return SecretService.deleteSecretEndpoint(secretName) +} + +const createSecretFromYamlEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const secretData = await YamlParserService.parseSecretFile(fileContent) + return SecretService.createSecretEndpoint(secretData) +} + +const updateSecretFromYamlEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const secretName = req.params.name + const secretData = await YamlParserService.parseSecretFile(fileContent, { + isUpdate: true, + secretName: secretName + }) + return SecretService.updateSecretEndpoint(secretName, secretData) +} + +module.exports = { + createSecretEndpoint, + updateSecretEndpoint, + getSecretEndpoint, + listSecretsEndpoint, + deleteSecretEndpoint, + createSecretFromYamlEndpoint, + updateSecretFromYamlEndpoint +} diff --git a/src/data/managers/certificate-manager.js b/src/data/managers/certificate-manager.js new file mode 100644 index 00000000..a54c62a0 --- /dev/null +++ b/src/data/managers/certificate-manager.js @@ -0,0 +1,227 @@ +const BaseManager = require('./base-manager') +const models = require('../models') +const Certificate = models.Certificate +const { Op } = require('sequelize') +const SecretManager = require('./secret-manager') +const AppHelper = require('../../helpers/app-helper') + +class CertificateManager extends BaseManager { + getEntity () { + return Certificate + } + + async createCertificateRecord (certData, transaction) { + // First find the secret by name to get its ID + const secret = await SecretManager.findOne({ name: certData.name }, transaction) + + if (secret) { + // Link the certificate to the secret + certData.secretId = secret.id + } + + return this.create(certData, transaction) + } + + async findCertificatesByCA (caId, transaction) { + AppHelper.checkTransaction(transaction) + + const options = transaction.fakeTransaction + ? { + where: { signedById: caId }, + include: ['secret'] } + : { + where: { signedById: caId }, + include: ['secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async findExpiringCertificates (days = 30, transaction) { + AppHelper.checkTransaction(transaction) + + const expirationDate = new Date() + expirationDate.setDate(expirationDate.getDate() + days) + + const options = transaction.fakeTransaction + ? { + where: { validTo: { [Op.lt]: expirationDate + } + }, + include: ['signingCA'] } + : { + where: { validTo: { [Op.lt]: expirationDate + } + }, + include: ['signingCA'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async findCertificateByName (name, transaction) { + AppHelper.checkTransaction(transaction) + + const options = transaction.fakeTransaction + ? { + where: { name }, + include: ['signingCA', 'secret'] } + : { + where: { name }, + include: ['signingCA', 'secret'], + transaction: transaction } + return this.getEntity().findOne(options) + } + + async findAllCAs (transaction) { + AppHelper.checkTransaction(transaction) + + const options = transaction.fakeTransaction + ? { + where: { isCA: true }, + include: ['secret'] } + : { + where: { isCA: true }, + include: ['secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async findAllCertificates (transaction) { + AppHelper.checkTransaction(transaction) + + const options = transaction.fakeTransaction + ? { + include: ['signingCA', 'secret'] } + : { + include: ['signingCA', 'secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async deleteCertificate (name, transaction) { + return this.delete({ name }, transaction) + } + + async updateCertificate (id, updates, transaction) { + AppHelper.checkTransaction(transaction) + + // Find existing certificate + const options = transaction.fakeTransaction + ? { + where: { id } } + : { + where: { id }, + transaction: transaction } + const cert = await this.getEntity().findOne(options) + + if (!cert) { + throw new Error(`Certificate with id ${id} not found`) + } + + // Update certificate + return this.update({ id }, updates, transaction) + } + + async findExpiredCertificates (transaction) { + AppHelper.checkTransaction(transaction) + + const currentDate = new Date() + + const options = transaction.fakeTransaction + ? { + where: { validTo: { [Op.lt]: currentDate + } + }, + include: ['signingCA', 'secret'] } + : { + where: { validTo: { [Op.lt]: currentDate + } + }, + include: ['signingCA', 'secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async getCertificateChain (certId, transaction) { + AppHelper.checkTransaction(transaction) + const chain = [] + + const options = transaction.fakeTransaction + ? { + where: { id: certId }, + include: ['signingCA', 'secret'] } + : { + where: { id: certId }, + include: ['signingCA', 'secret'], + transaction: transaction } + let currentCert = await this.getEntity().findOne(options) + + if (!currentCert) { + return chain + } + + chain.push(currentCert) + + // Traverse up the chain of signing CAs + while (currentCert.signingCA) { + const parentOptions = transaction.fakeTransaction + ? { where: { id: currentCert.signedById }, include: ['signingCA', 'secret'] + } + : { where: { id: currentCert.signedById }, include: ['signingCA', 'secret'], transaction: transaction + } + currentCert = await this.getEntity().findOne(parentOptions) + + if (currentCert) { + chain.push(currentCert) + } else { + break + } + } + + return chain + } + + async findCertificatesForRenewal (days = 30, transaction) { + AppHelper.checkTransaction(transaction) + + // Calculate the date range - we want certificates that expire between now and (now + days) + const now = new Date() + const futureDate = new Date() + futureDate.setDate(futureDate.getDate() + days) + + const options = transaction.fakeTransaction + ? { + where: { + validTo: { + [Op.gt]: now, + [Op.lt]: futureDate + } + }, + include: ['signingCA', 'secret'] } + : { + where: { + validTo: { + [Op.gt]: now, + [Op.lt]: futureDate + } + }, + include: ['signingCA', 'secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } + + async getCertificateChildren (caId, transaction) { + AppHelper.checkTransaction(transaction) + + const options = transaction.fakeTransaction + ? { + where: { signedById: caId }, + include: ['secret'] } + : { + where: { signedById: caId }, + include: ['secret'], + transaction: transaction } + return this.getEntity().findAll(options) + } +} + +module.exports = new CertificateManager() diff --git a/src/data/managers/secret-manager.js b/src/data/managers/secret-manager.js new file mode 100644 index 00000000..6cca804a --- /dev/null +++ b/src/data/managers/secret-manager.js @@ -0,0 +1,57 @@ +const BaseManager = require('./base-manager') +const SecretHelper = require('../../helpers/secret-helper') +const models = require('../models') +const Secret = models.Secret + +class SecretManager extends BaseManager { + getEntity () { + return Secret + } + + async createSecret (name, type, data, transaction) { + // const encryptedData = await SecretHelper.encryptSecret(data, name) + return this.create({ + name, + type, + data: data + }, transaction) + } + + async updateSecret (name, data, transaction) { + const encryptedData = await SecretHelper.encryptSecret(data, name) + return this.update( + { name }, + { data: encryptedData }, + transaction + ) + } + + async getSecret (name, transaction) { + const secret = await this.findOne({ name }, transaction) + if (!secret) { + return null + } + // const decryptedData = await SecretHelper.decryptSecret(secret.data, name) + return { + ...secret.toJSON(), + data: secret.data + } + } + + async listSecrets (transaction) { + const secrets = await this.findAll({}, transaction) + return secrets.map(secret => ({ + id: secret.id, + name: secret.name, + type: secret.type, + created_at: secret.created_at, + updated_at: secret.updated_at + })) + } + + async deleteSecret (name, transaction) { + return this.delete({ name }, transaction) + } +} + +module.exports = new SecretManager() diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql new file mode 100644 index 00000000..5cb25c7d --- /dev/null +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql @@ -0,0 +1,716 @@ +START TRANSACTION; + +CREATE TABLE IF NOT EXISTS Flows ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) UNIQUE, + description VARCHAR(255) DEFAULT '', + is_activated BOOLEAN DEFAULT false, + is_system BOOLEAN DEFAULT false, + created_at DATETIME, + updated_at DATETIME +); + +CREATE TABLE IF NOT EXISTS Registries ( + id INT AUTO_INCREMENT PRIMARY KEY, + url VARCHAR(255), + is_public BOOLEAN, + secure BOOLEAN, + certificate TEXT, + requires_cert BOOLEAN, + user_name TEXT, + password TEXT, + user_email TEXT +); + + +CREATE TABLE IF NOT EXISTS CatalogItems ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(255) UNIQUE, + description VARCHAR(255), + category TEXT, + config_example VARCHAR(255) DEFAULT '{}', + publisher TEXT, + disk_required BIGINT DEFAULT 0, + ram_required BIGINT DEFAULT 0, + picture VARCHAR(255) DEFAULT 'images/shared/default.png', + is_public BOOLEAN DEFAULT false, + registry_id INT, + FOREIGN KEY (registry_id) REFERENCES Registries (id) ON DELETE SET NULL +); + +CREATE INDEX idx_catalog_item_registry_id ON CatalogItems (registry_id); + + +CREATE TABLE IF NOT EXISTS FogTypes ( + id INT PRIMARY KEY, + name TEXT, + image TEXT, + description TEXT, + network_catalog_item_id INT, + hal_catalog_item_id INT, + bluetooth_catalog_item_id INT, + FOREIGN KEY (network_catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, + FOREIGN KEY (hal_catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, + FOREIGN KEY (bluetooth_catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_type_network_catalog_item_id ON FogTypes (network_catalog_item_id); +CREATE INDEX idx_fog_type_hal_catalog_item_id ON FogTypes (hal_catalog_item_id); +CREATE INDEX idx_fog_type_bluetooth_catalog_item_id ON FogTypes (bluetooth_catalog_item_id); + + +CREATE TABLE IF NOT EXISTS Fogs ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + name VARCHAR(255) DEFAULT 'Unnamed ioFog 1', + location TEXT, + gps_mode TEXT, + latitude FLOAT, + longitude FLOAT, + description TEXT, + last_active BIGINT, + daemon_status VARCHAR(32) DEFAULT 'UNKNOWN', + daemon_operating_duration BIGINT DEFAULT 0, + daemon_last_start BIGINT, + memory_usage FLOAT DEFAULT 0.000, + disk_usage FLOAT DEFAULT 0.000, + cpu_usage FLOAT DEFAULT 0.00, + memory_violation TEXT, + disk_violation TEXT, + cpu_violation TEXT, + `system-available-disk` BIGINT, + `system-available-memory` BIGINT, + `system-total-cpu` FLOAT, + security_status VARCHAR(32) DEFAULT 'OK', + security_violation_info VARCHAR(32) DEFAULT 'No violation', + catalog_item_status TEXT, + repository_count BIGINT DEFAULT 0, + repository_status TEXT, + system_time BIGINT, + last_status_time BIGINT, + ip_address VARCHAR(32) DEFAULT '0.0.0.0', + ip_address_external VARCHAR(32) DEFAULT '0.0.0.0', + host VARCHAR(32), + processed_messages BIGINT DEFAULT 0, + catalog_item_message_counts TEXT, + message_speed FLOAT DEFAULT 0.000, + last_command_time BIGINT, + network_interface VARCHAR(32) DEFAULT 'dynamic', + docker_url VARCHAR(255) DEFAULT 'unix:///var/run/docker.sock', + disk_limit FLOAT DEFAULT 50, + disk_directory VARCHAR(255) DEFAULT '/var/lib/iofog/', + memory_limit FLOAT DEFAULT 4096, + cpu_limit FLOAT DEFAULT 80, + log_limit FLOAT DEFAULT 10, + log_directory VARCHAR(255) DEFAULT '/var/log/iofog/', + bluetooth BOOLEAN DEFAULT FALSE, + hal BOOLEAN DEFAULT FALSE, + log_file_count BIGINT DEFAULT 10, + `version` TEXT, + is_ready_to_upgrade BOOLEAN DEFAULT TRUE, + is_ready_to_rollback BOOLEAN DEFAULT FALSE, + status_frequency INT DEFAULT 10, + change_frequency INT DEFAULT 20, + device_scan_frequency INT DEFAULT 20, + tunnel VARCHAR(255) DEFAULT '', + isolated_docker_container BOOLEAN DEFAULT TRUE, + docker_pruning_freq INT DEFAULT 1, + available_disk_threshold FLOAT DEFAULT 20, + log_level VARCHAR(10) DEFAULT 'INFO', + is_system BOOLEAN DEFAULT FALSE, + router_id INT DEFAULT 0, + time_zone VARCHAR(32) DEFAULT 'Etc/UTC', + created_at DATETIME, + updated_at DATETIME, + fog_type_id INT DEFAULT 0, + FOREIGN KEY (fog_type_id) REFERENCES FogTypes (id) +); + +CREATE INDEX idx_fog_fog_type_id ON Fogs (fog_type_id); + +CREATE TABLE IF NOT EXISTS ChangeTrackings ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + microservice_config BOOLEAN DEFAULT false, + reboot BOOLEAN DEFAULT false, + deletenode BOOLEAN DEFAULT false, + version BOOLEAN DEFAULT false, + microservice_list BOOLEAN DEFAULT false, + config BOOLEAN DEFAULT false, + routing BOOLEAN DEFAULT false, + registries BOOLEAN DEFAULT false, + tunnel BOOLEAN DEFAULT false, + diagnostics BOOLEAN DEFAULT false, + router_changed BOOLEAN DEFAULT false, + image_snapshot BOOLEAN DEFAULT false, + prune BOOLEAN DEFAULT false, + linked_edge_resources BOOLEAN DEFAULT false, + last_updated VARCHAR(255) DEFAULT false, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_change_tracking_iofog_uuid ON ChangeTrackings (iofog_uuid); + +CREATE TABLE IF NOT EXISTS FogAccessTokens ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + expiration_time BIGINT, + token TEXT, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_access_tokens_iofogUuid ON FogAccessTokens (iofog_uuid); + +CREATE TABLE IF NOT EXISTS FogProvisionKeys ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + provisioning_string VARCHAR(100), + expiration_time BIGINT, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_provision_keys_iofogUuid ON FogProvisionKeys (iofog_uuid); + +CREATE TABLE IF NOT EXISTS FogVersionCommands ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + version_command VARCHAR(100), + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_version_commands_iofogUuid ON FogVersionCommands (iofog_uuid); + +CREATE TABLE IF NOT EXISTS HWInfos ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + info TEXT, + created_at DATETIME, + updated_at DATETIME, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_hw_infos_iofogUuid ON HWInfos (iofog_uuid); + +CREATE TABLE IF NOT EXISTS USBInfos ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + info TEXT, + created_at DATETIME, + updated_at DATETIME, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_usb_infos_iofogUuid ON USBInfos (iofog_uuid); + +CREATE TABLE IF NOT EXISTS Tunnels ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + username TEXT, + password TEXT, + host TEXT, + remote_port INT, + local_port INT DEFAULT 22, + rsa_key TEXT, + closed BOOLEAN DEFAULT false, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_tunnels_iofogUuid ON Tunnels (iofog_uuid); + +CREATE TABLE IF NOT EXISTS Microservices ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + config TEXT, + name VARCHAR(255) DEFAULT 'New Microservice', + config_last_updated BIGINT, + rebuild BOOLEAN DEFAULT false, + root_host_access BOOLEAN DEFAULT false, + log_size BIGINT DEFAULT 0, + image_snapshot VARCHAR(255) DEFAULT '', + `delete` BOOLEAN DEFAULT false, + delete_with_cleanup BOOLEAN DEFAULT false, + created_at DATETIME, + updated_at DATETIME, + catalog_item_id INT, + registry_id INT DEFAULT 1, + iofog_uuid VARCHAR(32), + application_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, + FOREIGN KEY (registry_id) REFERENCES Registries (id) ON DELETE SET NULL, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, + FOREIGN KEY (application_id) REFERENCES Flows (id) ON DELETE CASCADE +); + +CREATE INDEX idx_microservices_catalogItemId ON Microservices (catalog_item_id); +CREATE INDEX idx_microservices_registryId ON Microservices (registry_id); +CREATE INDEX idx_microservices_iofogUuid ON Microservices (iofog_uuid); +CREATE INDEX idx_microservices_applicationId ON Microservices (application_id); + +CREATE TABLE IF NOT EXISTS MicroserviceArgs ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + cmd TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_args_microserviceUuid ON MicroserviceArgs (microservice_uuid); + +CREATE TABLE IF NOT EXISTS MicroserviceEnvs ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + `key` TEXT, + `value` TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_envs_microserviceUuid ON MicroserviceEnvs (microservice_uuid); + +CREATE TABLE IF NOT EXISTS MicroserviceExtraHost ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + template_type TEXT, + name TEXT, + public_port INT, + template TEXT, + `value` TEXT, + microservice_uuid VARCHAR(32), + target_microservice_uuid VARCHAR(32), + target_fog_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (target_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (target_fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_extra_host_microserviceUuid ON MicroserviceExtraHost (microservice_uuid); +CREATE INDEX idx_microservice_extra_host_targetMicroserviceUuid ON MicroserviceExtraHost (target_microservice_uuid); +CREATE INDEX idx_microservice_extra_host_targetFogUuid ON MicroserviceExtraHost (target_fog_uuid); + +CREATE TABLE IF NOT EXISTS MicroservicePorts ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + port_internal INT, + port_external INT, + is_udp BOOLEAN, + is_public BOOLEAN, + is_proxy BOOLEAN, + created_at DATETIME, + updated_at DATETIME, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_port_microserviceUuid ON MicroservicePorts (microservice_uuid); + +CREATE TABLE IF NOT EXISTS MicroservicePublicPorts ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + port_id INT UNIQUE, + host_id VARCHAR(255) UNIQUE, + local_proxy_id TEXT, + remote_proxy_id TEXT, + public_port INT, + queue_name TEXT, + schemes VARCHAR(255) DEFAULT '["https"]', + is_tcp BOOLEAN DEFAULT false, + created_at DATETIME, + updated_at DATETIME, + protocol VARCHAR(255) AS (CASE WHEN is_tcp THEN 'tcp' ELSE 'http' END) VIRTUAL, + FOREIGN KEY (port_id) REFERENCES MicroservicePorts (id) ON DELETE CASCADE, + FOREIGN KEY (host_id) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_public_port_portId ON MicroservicePublicPorts (port_id); +CREATE INDEX idx_microservice_public_port_hostId ON MicroservicePublicPorts (host_id); + + +CREATE TABLE IF NOT EXISTS MicroserviceStatuses ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + status VARCHAR(255) DEFAULT 'QUEUED', + operating_duration BIGINT DEFAULT 0, + start_time BIGINT DEFAULT 0, + cpu_usage FLOAT DEFAULT 0.000, + memory_usage BIGINT DEFAULT 0, + container_id VARCHAR(255) DEFAULT '', + percentage FLOAT DEFAULT 0.00, + error_message TEXT, + microservice_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_status_microserviceUuid ON MicroserviceStatuses (microservice_uuid); + +CREATE TABLE IF NOT EXISTS StraceDiagnostics ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + strace_run BOOLEAN, + buffer VARCHAR(255) DEFAULT '', + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_strace_diagnostics_microserviceUuid ON StraceDiagnostics (microservice_uuid); + +CREATE TABLE IF NOT EXISTS VolumeMappings ( + uuid INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + host_destination TEXT, + container_destination TEXT, + access_mode TEXT, + type TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_volume_mappings_microserviceUuid ON VolumeMappings (microservice_uuid); + + +CREATE TABLE IF NOT EXISTS CatalogItemImages ( + id INT AUTO_INCREMENT PRIMARY KEY, + container_image TEXT, + catalog_item_id INT, + microservice_uuid VARCHAR(32), + fog_type_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (fog_type_id) REFERENCES FogTypes (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_image_catalog_item_id ON CatalogItemImages (catalog_item_id); +CREATE INDEX idx_catalog_item_image_microservice_uuid ON CatalogItemImages (microservice_uuid); +CREATE INDEX idx_catalog_item_image_fog_type_id ON CatalogItemImages (fog_type_id); + +CREATE TABLE IF NOT EXISTS CatalogItemInputTypes ( + id INT AUTO_INCREMENT PRIMARY KEY, + info_type TEXT, + info_format TEXT, + catalog_item_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_input_type_catalog_item_id ON CatalogItemInputTypes (catalog_item_id); + +CREATE TABLE IF NOT EXISTS CatalogItemOutputTypes ( + id INT AUTO_INCREMENT PRIMARY KEY, + info_type TEXT, + info_format TEXT, + catalog_item_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_output_type_catalog_item_id ON CatalogItemOutputTypes (catalog_item_id); + + +CREATE TABLE IF NOT EXISTS Routings ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name TEXT NOT NULL, + source_microservice_uuid VARCHAR(32), + dest_microservice_uuid VARCHAR(32), + application_id INT, + FOREIGN KEY (source_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (dest_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (application_id) REFERENCES Flows (id) ON DELETE CASCADE +); + +CREATE INDEX idx_routing_sourceMicroserviceUuid ON Routings (source_microservice_uuid); +CREATE INDEX idx_routing_destMicroserviceUuid ON Routings (dest_microservice_uuid); +CREATE INDEX idx_routing_applicationId ON Routings (application_id); + +CREATE TABLE IF NOT EXISTS Routers ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + is_edge BOOLEAN DEFAULT true, + messaging_port INT DEFAULT 5671, + edge_router_port INT, + inter_router_port INT, + host TEXT, + is_default BOOLEAN DEFAULT false, + iofog_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE + +); + +CREATE INDEX idx_router_iofogUuid ON Routers (iofog_uuid); + + +CREATE TABLE RouterConnections ( + id INT AUTO_INCREMENT PRIMARY KEY, + source_router INT, + dest_router INT, + created_at DATETIME NOT NULL, + updated_at DATETIME NOT NULL, + FOREIGN KEY (source_router) REFERENCES Routers(id) ON DELETE CASCADE, + FOREIGN KEY (dest_router) REFERENCES Routers(id) ON DELETE CASCADE +); + +CREATE INDEX idx_routerconnections_sourceRouter ON RouterConnections (source_router); +CREATE INDEX idx_routerconnections_destRouter ON RouterConnections (dest_router); + + + +CREATE TABLE IF NOT EXISTS Config ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + `key` VARCHAR(255) NOT NULL UNIQUE, + value VARCHAR(255) NOT NULL, + created_at DATETIME, + updated_at DATETIME +); + +CREATE INDEX idx_config_key ON Config (`key`); + + +CREATE TABLE IF NOT EXISTS Tags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + value VARCHAR(255) UNIQUE NOT NULL +); + +CREATE TABLE IF NOT EXISTS IofogTags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE INDEX idx_iofogtags_fog_uuid ON IofogTags (fog_uuid); +CREATE INDEX idx_iofogtags_tag_id ON IofogTags (tag_id); + +CREATE TABLE IF NOT EXISTS EdgeResources ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name VARCHAR(255) NOT NULL, + version TEXT, + description TEXT, + display_name TEXT, + display_color TEXT, + display_icon TEXT, + interface_protocol TEXT, + interface_id INT, + custom TEXT +); + + +CREATE TABLE IF NOT EXISTS AgentEdgeResources ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + edge_resource_id INT, + FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, + FOREIGN KEY (edge_resource_id) REFERENCES EdgeResources (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS EdgeResourceOrchestrationTags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + edge_resource_id INT, + tag_id INT, + FOREIGN KEY (edge_resource_id) REFERENCES EdgeResources (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE INDEX idx_agentedgeresources_fog_id ON AgentEdgeResources (fog_uuid); +CREATE INDEX idx_agentedgeresources_edge_resource_id ON AgentEdgeResources (edge_resource_id); +CREATE INDEX idx_edgeresourceorchestrationtags_edge_resource_id ON EdgeResourceOrchestrationTags (edge_resource_id); +CREATE INDEX idx_edgeresourceorchestrationtags_tag_id ON EdgeResourceOrchestrationTags (tag_id); + +CREATE TABLE IF NOT EXISTS HTTPBasedResourceInterfaces ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + edge_resource_id INT, + FOREIGN KEY (edge_resource_id) REFERENCES EdgeResources (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS HTTPBasedResourceInterfaceEndpoints ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + interface_id INT, + name TEXT, + description TEXT, + `method` TEXT, + url TEXT, + requestType TEXT, + responseType TEXT, + requestPayloadExample TEXT, + responsePayloadExample TEXT, + FOREIGN KEY (interface_id) REFERENCES HTTPBasedResourceInterfaces (id) ON DELETE CASCADE +); + +CREATE INDEX idx_httpbasedresourceinterfaces_edge_resource_id ON HTTPBasedResourceInterfaces (edge_resource_id); +CREATE INDEX idx_httpbasedresourceinterfaceendpoints_interface_id ON HTTPBasedResourceInterfaceEndpoints (interface_id); + + +CREATE TABLE IF NOT EXISTS ApplicationTemplates ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL DEFAULT 'new-application', + description VARCHAR(255) DEFAULT '', + schema_version VARCHAR(255) DEFAULT '', + application_json LONGTEXT, + created_at DATETIME, + updated_at DATETIME + +); + + +CREATE TABLE IF NOT EXISTS ApplicationTemplateVariables ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + application_template_id INT NOT NULL, + `key` TEXT, + description VARCHAR(255) DEFAULT '', + default_value VARCHAR(255), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (application_template_id) REFERENCES ApplicationTemplates (id) ON DELETE CASCADE +); + +CREATE INDEX idx_applicationtemplatevariables_application_template_id ON ApplicationTemplateVariables (application_template_id); + +CREATE TABLE IF NOT EXISTS MicroserviceCdiDevices ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + cdi_devices TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_cdiDevices_microserviceUuid ON MicroserviceCdiDevices (microservice_uuid); + +ALTER TABLE Microservices +ADD COLUMN run_as_user TEXT DEFAULT NULL, +ADD COLUMN platform TEXT DEFAULT NULL, +ADD COLUMN runtime TEXT DEFAULT NULL; + +ALTER TABLE Fogs +RENAME COLUMN `system-available-disk` TO system_available_disk, +RENAME COLUMN `system-available-memory` TO system_available_memory, +RENAME COLUMN `system-total-cpu` TO system_total_cpu; + +ALTER TABLE Routers DROP COLUMN IF EXISTS require_ssl; +ALTER TABLE Routers DROP COLUMN IF EXISTS ssl_profile; +ALTER TABLE Routers DROP COLUMN IF EXISTS sasl_mechanisms; +ALTER TABLE Routers DROP COLUMN IF EXISTS authenticate_peer; +ALTER TABLE Routers DROP COLUMN IF EXISTS ca_cert; +ALTER TABLE Routers DROP COLUMN IF EXISTS tls_cert; +ALTER TABLE Routers DROP COLUMN IF EXISTS tls_key; + +CREATE TABLE IF NOT EXISTS MicroservicePubTags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + microservice_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS MicroserviceSubTags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + microservice_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE INDEX idx_microservicepubtags_microservice_uuid ON MicroservicePubTags (microservice_uuid); +CREATE INDEX idx_microservicesubtags_microservice_uuid ON MicroserviceSubTags (microservice_uuid); +CREATE INDEX idx_microservicepubtags_tag_id ON MicroservicePubTags (tag_id); +CREATE INDEX idx_microservicesubtags_tag_id ON MicroserviceSubTags (tag_id); + +CREATE TABLE IF NOT EXISTS MicroserviceCapAdd ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + cap_add TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capAdd_microserviceUuid ON MicroserviceCapAdd (microservice_uuid); + +CREATE TABLE IF NOT EXISTS MicroserviceCapDrop ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + cap_drop TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capDrop_microserviceUuid ON MicroserviceCapDrop (microservice_uuid); + +ALTER TABLE Microservices +ADD COLUMN annotations TEXT; + +CREATE TABLE IF NOT EXISTS FogPublicKeys ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + public_key TEXT, + iofog_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_public_keys_iofogUuid ON FogPublicKeys (iofog_uuid); + +CREATE TABLE IF NOT EXISTS FogUsedTokens ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + jti VARCHAR(255) NOT NULL, + iofog_uuid VARCHAR(32), + expiry_time DATETIME NOT NULL, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_used_tokens_iofogUuid ON FogUsedTokens (iofog_uuid); + +DROP TABLE IF EXISTS FogAccessTokens; + +ALTER TABLE MicroserviceStatuses ADD COLUMN ip_address TEXT; + +CREATE TABLE IF NOT EXISTS Secrets ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + data TEXT NOT NULL, + created_at DATETIME, + updated_at DATETIME +); + +CREATE INDEX idx_secrets_name ON Secrets (name); + +CREATE TABLE IF NOT EXISTS Certificates ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name TEXT NOT NULL, + subject TEXT NOT NULL, + is_ca BOOLEAN DEFAULT false, + signed_by_id INT, + hosts TEXT, + valid_from DATETIME NOT NULL, + valid_to DATETIME NOT NULL, + serial_number TEXT NOT NULL, + secret_id INT, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (signed_by_id) REFERENCES Certificates (id) ON DELETE SET NULL, + FOREIGN KEY (secret_id) REFERENCES Secrets (id) ON DELETE CASCADE +); + +CREATE UNIQUE INDEX idx_certificates_name_unique ON Certificates ((name(255))); +CREATE INDEX idx_certificates_valid_to ON Certificates ((valid_to)); +CREATE INDEX idx_certificates_is_ca ON Certificates (is_ca); +CREATE INDEX idx_certificates_signed_by_id ON Certificates (signed_by_id); +CREATE INDEX idx_certificates_secret_id ON Certificates (secret_id); + +CREATE TABLE IF NOT EXISTS Services ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + type VARCHAR(50) NOT NULL, + resource TEXT NOT NULL, + target_port INT NOT NULL, + service_port INT, + bridge_port INT, + service_endpoint TEXT, + created_at DATETIME, + updated_at DATETIME, +); + +CREATE INDEX idx_services_name ON Services (name); +CREATE INDEX idx_services_id ON Services (id); + +CREATE TABLE IF NOT EXISTS ServiceTags ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + service_id INT NOT NULL, + tag_id INT NOT NULL, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (service_id) REFERENCES Services (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); +CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); + +COMMIT; \ No newline at end of file diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql new file mode 100644 index 00000000..29faea83 --- /dev/null +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql @@ -0,0 +1,717 @@ +CREATE TABLE IF NOT EXISTS "Flows" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name VARCHAR(255) UNIQUE, + description VARCHAR(255) DEFAULT '', + is_activated BOOLEAN DEFAULT false, + is_system BOOLEAN DEFAULT false, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) +); + +CREATE TABLE IF NOT EXISTS "Registries" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + url VARCHAR(255), + is_public BOOLEAN, + secure BOOLEAN, + certificate TEXT, + requires_cert BOOLEAN, + user_name TEXT, + password TEXT, + user_email TEXT +); + + +CREATE TABLE IF NOT EXISTS "CatalogItems" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + name VARCHAR(255) UNIQUE, + description VARCHAR(255), + category TEXT, + config_example VARCHAR(255) DEFAULT '{}', + publisher TEXT, + disk_required BIGINT DEFAULT 0, + ram_required BIGINT DEFAULT 0, + picture VARCHAR(255) DEFAULT 'images/shared/default.png', + is_public BOOLEAN DEFAULT false, + registry_id INT, + FOREIGN KEY (registry_id) REFERENCES "Registries" (id) ON DELETE SET NULL +); + +CREATE INDEX idx_catalog_item_registry_id ON "CatalogItems" (registry_id); + + +CREATE TABLE IF NOT EXISTS "FogTypes" ( + id INT PRIMARY KEY, + name TEXT, + image TEXT, + description TEXT, + network_catalog_item_id INT, + hal_catalog_item_id INT, + bluetooth_catalog_item_id INT, + FOREIGN KEY (network_catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, + FOREIGN KEY (hal_catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, + FOREIGN KEY (bluetooth_catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_type_network_catalog_item_id ON "FogTypes" (network_catalog_item_id); +CREATE INDEX idx_fog_type_hal_catalog_item_id ON "FogTypes" (hal_catalog_item_id); +CREATE INDEX idx_fog_type_bluetooth_catalog_item_id ON "FogTypes" (bluetooth_catalog_item_id); + + +CREATE TABLE IF NOT EXISTS "Fogs" ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + name VARCHAR(255) DEFAULT 'Unnamed ioFog 1', + location TEXT, + gps_mode TEXT, + latitude DOUBLE PRECISION, + longitude DOUBLE PRECISION, + description TEXT, + last_active BIGINT, + daemon_status VARCHAR(32) DEFAULT 'UNKNOWN', + daemon_operating_duration BIGINT DEFAULT 0, + daemon_last_start BIGINT, + memory_usage DOUBLE PRECISION DEFAULT 0.000, + disk_usage DOUBLE PRECISION DEFAULT 0.000, + cpu_usage DOUBLE PRECISION DEFAULT 0.00, + memory_violation TEXT, + disk_violation TEXT, + cpu_violation TEXT, + system_available_disk BIGINT, + system_available_memory BIGINT, + system_total_cpu DOUBLE PRECISION, + security_status VARCHAR(32) DEFAULT 'OK', + security_violation_info VARCHAR(32) DEFAULT 'No violation', + catalog_item_status TEXT, + repository_count BIGINT DEFAULT 0, + repository_status TEXT, + system_time BIGINT, + last_status_time BIGINT, + ip_address VARCHAR(32) DEFAULT '0.0.0.0', + ip_address_external VARCHAR(32) DEFAULT '0.0.0.0', + host VARCHAR(32), + processed_messages BIGINT DEFAULT 0, + catalog_item_message_counts TEXT, + message_speed DOUBLE PRECISION DEFAULT 0.000, + last_command_time BIGINT, + network_interface VARCHAR(32) DEFAULT 'dynamic', + docker_url VARCHAR(255) DEFAULT 'unix:///var/run/docker.sock', + disk_limit DOUBLE PRECISION DEFAULT 50, + disk_directory VARCHAR(255) DEFAULT '/var/lib/iofog/', + memory_limit DOUBLE PRECISION DEFAULT 4096, + cpu_limit DOUBLE PRECISION DEFAULT 80, + log_limit DOUBLE PRECISION DEFAULT 10, + log_directory VARCHAR(255) DEFAULT '/var/log/iofog/', + bluetooth BOOLEAN DEFAULT FALSE, + hal BOOLEAN DEFAULT FALSE, + log_file_count BIGINT DEFAULT 10, + version TEXT, + is_ready_to_upgrade BOOLEAN DEFAULT TRUE, + is_ready_to_rollback BOOLEAN DEFAULT FALSE, + status_frequency INT DEFAULT 10, + change_frequency INT DEFAULT 20, + device_scan_frequency INT DEFAULT 20, + tunnel VARCHAR(255) DEFAULT '', + isolated_docker_container BOOLEAN DEFAULT TRUE, + docker_pruning_freq INT DEFAULT 1, + available_disk_threshold DOUBLE PRECISION DEFAULT 20, + log_level VARCHAR(10) DEFAULT 'INFO', + is_system BOOLEAN DEFAULT FALSE, + router_id INT DEFAULT 0, + time_zone VARCHAR(32) DEFAULT 'Etc/UTC', + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + fog_type_id INT DEFAULT 0, + FOREIGN KEY (fog_type_id) REFERENCES "FogTypes" (id) +); + +CREATE INDEX idx_fog_fog_type_id ON "Fogs" (fog_type_id); + +CREATE TABLE IF NOT EXISTS "ChangeTrackings" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + microservice_config BOOLEAN DEFAULT false, + reboot BOOLEAN DEFAULT false, + deletenode BOOLEAN DEFAULT false, + version BOOLEAN DEFAULT false, + microservice_list BOOLEAN DEFAULT false, + config BOOLEAN DEFAULT false, + routing BOOLEAN DEFAULT false, + registries BOOLEAN DEFAULT false, + tunnel BOOLEAN DEFAULT false, + diagnostics BOOLEAN DEFAULT false, + router_changed BOOLEAN DEFAULT false, + image_snapshot BOOLEAN DEFAULT false, + prune BOOLEAN DEFAULT false, + linked_edge_resources BOOLEAN DEFAULT false, + last_updated VARCHAR(255) DEFAULT false, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_change_tracking_iofog_uuid ON "ChangeTrackings" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "FogAccessTokens" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + expiration_time BIGINT, + token TEXT, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_access_tokens_iofogUuid ON "FogAccessTokens" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "FogProvisionKeys" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + provisioning_string VARCHAR(100), + expiration_time BIGINT, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_provision_keys_iofogUuid ON "FogProvisionKeys" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "FogVersionCommands" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + version_command VARCHAR(100), + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_version_commands_iofogUuid ON "FogVersionCommands" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "HWInfos" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + info TEXT, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_hw_infos_iofogUuid ON "HWInfos" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "USBInfos" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + info TEXT, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_usb_infos_iofogUuid ON "USBInfos" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "Tunnels" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + username TEXT, + password TEXT, + host TEXT, + remote_port INT, + local_port INT DEFAULT 22, + rsa_key TEXT, + closed BOOLEAN DEFAULT false, + iofog_uuid VARCHAR(32), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_tunnels_iofogUuid ON "Tunnels" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "Microservices" ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + config TEXT, + name VARCHAR(255) DEFAULT 'New Microservice', + config_last_updated BIGINT, + rebuild BOOLEAN DEFAULT false, + root_host_access BOOLEAN DEFAULT false, + log_size BIGINT DEFAULT 0, + image_snapshot VARCHAR(255) DEFAULT '', + delete BOOLEAN DEFAULT false, + delete_with_cleanup BOOLEAN DEFAULT false, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + catalog_item_id INT, + registry_id INT DEFAULT 1, + iofog_uuid VARCHAR(32), + application_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, + FOREIGN KEY (registry_id) REFERENCES "Registries" (id) ON DELETE SET NULL, + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, + FOREIGN KEY (application_id) REFERENCES "Flows" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_microservices_catalogItemId ON "Microservices" (catalog_item_id); +CREATE INDEX idx_microservices_registryId ON "Microservices" (registry_id); +CREATE INDEX idx_microservices_iofogUuid ON "Microservices" (iofog_uuid); +CREATE INDEX idx_microservices_applicationId ON "Microservices" (application_id); + +CREATE TABLE IF NOT EXISTS "MicroserviceArgs" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + cmd TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_args_microserviceUuid ON "MicroserviceArgs" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "MicroserviceEnvs" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + key TEXT, + value TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_envs_microserviceUuid ON "MicroserviceEnvs" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "MicroserviceExtraHost" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + template_type TEXT, + name TEXT, + public_port INT, + template TEXT, + value TEXT, + microservice_uuid VARCHAR(32), + target_microservice_uuid VARCHAR(32), + target_fog_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (target_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (target_fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_extra_host_microserviceUuid ON "MicroserviceExtraHost" (microservice_uuid); +CREATE INDEX idx_microservice_extra_host_targetMicroserviceUuid ON "MicroserviceExtraHost" (target_microservice_uuid); +CREATE INDEX idx_microservice_extra_host_targetFogUuid ON "MicroserviceExtraHost" (target_fog_uuid); + +CREATE TABLE IF NOT EXISTS "MicroservicePorts" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + port_internal INT, + port_external INT, + is_udp BOOLEAN, + is_public BOOLEAN, + is_proxy BOOLEAN, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_port_microserviceUuid ON "MicroservicePorts" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "MicroservicePublicPorts" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + port_id INT UNIQUE, + host_id VARCHAR(255) UNIQUE, + local_proxy_id TEXT, + remote_proxy_id TEXT, + public_port INT, + queue_name TEXT, + schemes VARCHAR(255) DEFAULT '["https"]', + is_tcp BOOLEAN DEFAULT false, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + protocol VARCHAR(255) GENERATED ALWAYS AS (CASE WHEN is_tcp THEN 'tcp' ELSE 'http' END) STORED, + FOREIGN KEY (port_id) REFERENCES "MicroservicePorts" (id) ON DELETE CASCADE, + FOREIGN KEY (host_id) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_public_port_portId ON "MicroservicePublicPorts" (port_id); +CREATE INDEX idx_microservice_public_port_hostId ON "MicroservicePublicPorts" (host_id); + + +CREATE TABLE IF NOT EXISTS "MicroserviceStatuses" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + status VARCHAR(255) DEFAULT 'QUEUED', + operating_duration BIGINT DEFAULT 0, + start_time BIGINT DEFAULT 0, + cpu_usage DOUBLE PRECISION DEFAULT 0.000, + memory_usage BIGINT DEFAULT 0, + container_id VARCHAR(255) DEFAULT '', + percentage DOUBLE PRECISION DEFAULT 0.00, + error_message TEXT, + microservice_uuid VARCHAR(32), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_status_microserviceUuid ON "MicroserviceStatuses" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "StraceDiagnostics" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + strace_run BOOLEAN, + buffer VARCHAR(255) DEFAULT '', + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_strace_diagnostics_microserviceUuid ON "StraceDiagnostics" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "VolumeMappings" ( + uuid INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + host_destination TEXT, + container_destination TEXT, + access_mode TEXT, + type TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_volume_mappings_microserviceUuid ON "VolumeMappings" (microservice_uuid); + + +CREATE TABLE IF NOT EXISTS "CatalogItemImages" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + container_image TEXT, + catalog_item_id INT, + microservice_uuid VARCHAR(32), + fog_type_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (fog_type_id) REFERENCES "FogTypes" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_image_catalog_item_id ON "CatalogItemImages" (catalog_item_id); +CREATE INDEX idx_catalog_item_image_microservice_uuid ON "CatalogItemImages" (microservice_uuid); +CREATE INDEX idx_catalog_item_image_fog_type_id ON "CatalogItemImages" (fog_type_id); + +CREATE TABLE IF NOT EXISTS "CatalogItemInputTypes" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + info_type TEXT, + info_format TEXT, + catalog_item_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_input_type_catalog_item_id ON "CatalogItemInputTypes" (catalog_item_id); + +CREATE TABLE IF NOT EXISTS "CatalogItemOutputTypes" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + info_type TEXT, + info_format TEXT, + catalog_item_id INT, + FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_catalog_item_output_type_catalog_item_id ON "CatalogItemOutputTypes" (catalog_item_id); + + +CREATE TABLE IF NOT EXISTS "Routings" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name TEXT NOT NULL, + source_microservice_uuid VARCHAR(32), + dest_microservice_uuid VARCHAR(32), + application_id INT, + FOREIGN KEY (source_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (dest_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (application_id) REFERENCES "Flows" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_routing_sourceMicroserviceUuid ON "Routings" (source_microservice_uuid); +CREATE INDEX idx_routing_destMicroserviceUuid ON "Routings" (dest_microservice_uuid); +CREATE INDEX idx_routing_applicationId ON "Routings" (application_id); + +CREATE TABLE IF NOT EXISTS "Routers" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + is_edge BOOLEAN DEFAULT true, + messaging_port INT DEFAULT 5671, + edge_router_port INT, + inter_router_port INT, + host TEXT, + is_default BOOLEAN DEFAULT false, + iofog_uuid VARCHAR(32), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE + +); + +CREATE INDEX idx_router_iofogUuid ON "Routers" (iofog_uuid); + + +CREATE TABLE "RouterConnections" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + source_router INT, + dest_router INT, + created_at TIMESTAMP(0) NOT NULL, + updated_at TIMESTAMP(0) NOT NULL, + FOREIGN KEY (source_router) REFERENCES "Routers"(id) ON DELETE CASCADE, + FOREIGN KEY (dest_router) REFERENCES "Routers"(id) ON DELETE CASCADE +); + +CREATE INDEX idx_routerconnections_sourceRouter ON "RouterConnections" (source_router); +CREATE INDEX idx_routerconnections_destRouter ON "RouterConnections" (dest_router); + + + +CREATE TABLE IF NOT EXISTS "Config" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + key VARCHAR(255) NOT NULL UNIQUE, + value VARCHAR(255) NOT NULL, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) +); + +CREATE INDEX idx_config_key ON "Config" (key); + + +CREATE TABLE IF NOT EXISTS "Tags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + value VARCHAR(255) UNIQUE NOT NULL +); + +CREATE TABLE IF NOT EXISTS "IofogTags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_iofogtags_fog_uuid ON "IofogTags" (fog_uuid); +CREATE INDEX idx_iofogtags_tag_id ON "IofogTags" (tag_id); + +CREATE TABLE IF NOT EXISTS "EdgeResources" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name VARCHAR(255) NOT NULL, + version TEXT, + description TEXT, + display_name TEXT, + display_color TEXT, + display_icon TEXT, + interface_protocol TEXT, + interface_id INT, + custom TEXT +); + + +CREATE TABLE IF NOT EXISTS "AgentEdgeResources" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + edge_resource_id INT, + FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, + FOREIGN KEY (edge_resource_id) REFERENCES "EdgeResources" (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS "EdgeResourceOrchestrationTags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + edge_resource_id INT, + tag_id INT, + FOREIGN KEY (edge_resource_id) REFERENCES "EdgeResources" (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_agentedgeresources_fog_id ON "AgentEdgeResources" (fog_uuid); +CREATE INDEX idx_agentedgeresources_edge_resource_id ON "AgentEdgeResources" (edge_resource_id); +CREATE INDEX idx_edgeresourceorchestrationtags_edge_resource_id ON "EdgeResourceOrchestrationTags" (edge_resource_id); +CREATE INDEX idx_edgeresourceorchestrationtags_tag_id ON "EdgeResourceOrchestrationTags" (tag_id); + +CREATE TABLE IF NOT EXISTS "HTTPBasedResourceInterfaces" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + edge_resource_id INT, + FOREIGN KEY (edge_resource_id) REFERENCES "EdgeResources" (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS "HTTPBasedResourceInterfaceEndpoints" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + interface_id INT, + name TEXT, + description TEXT, + method TEXT, + url TEXT, + requestType TEXT, + responseType TEXT, + requestPayloadExample TEXT, + responsePayloadExample TEXT, + FOREIGN KEY (interface_id) REFERENCES "HTTPBasedResourceInterfaces" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_httpbasedresourceinterfaces_edge_resource_id ON "HTTPBasedResourceInterfaces" (edge_resource_id); +CREATE INDEX idx_httpbasedresourceinterfaceendpoints_interface_id ON "HTTPBasedResourceInterfaceEndpoints" (interface_id); + + +CREATE TABLE IF NOT EXISTS "ApplicationTemplates" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL DEFAULT 'new-application', + description VARCHAR(255) DEFAULT '', + schema_version VARCHAR(255) DEFAULT '', + application_json TEXT, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) + +); + + +CREATE TABLE IF NOT EXISTS "ApplicationTemplateVariables" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + application_template_id INT NOT NULL, + key TEXT, + description VARCHAR(255) DEFAULT '', + default_value VARCHAR(255), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (application_template_id) REFERENCES "ApplicationTemplates" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_applicationtemplatevariables_application_template_id ON "ApplicationTemplateVariables" (application_template_id); + +CREATE TABLE IF NOT EXISTS "MicroserviceCdiDevices" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + cdi_devices TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_cdiDevices_microserviceUuid ON "MicroserviceCdiDevices" (microservice_uuid); + +ALTER TABLE "Microservices" +ADD COLUMN run_as_user TEXT DEFAULT NULL, +ADD COLUMN platform TEXT DEFAULT NULL, +ADD COLUMN runtime TEXT DEFAULT NULL; + +ALTER TABLE "Routers" +ADD COLUMN require_ssl TEXT, +ADD COLUMN ssl_profile TEXT, +ADD COLUMN sasl_mechanisms TEXT, +ADD COLUMN authenticate_peer TEXT, +ADD COLUMN ca_cert TEXT, +ADD COLUMN tls_cert TEXT, +ADD COLUMN tls_key TEXT; + +CREATE TABLE IF NOT EXISTS "MicroservicePubTags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + microservice_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS "MicroserviceSubTags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + microservice_uuid VARCHAR(32), + tag_id INT, + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_microservicepubtags_microservice_uuid ON "MicroservicePubTags" (microservice_uuid); +CREATE INDEX idx_microservicesubtags_microservice_uuid ON "MicroserviceSubTags" (microservice_uuid); +CREATE INDEX idx_microservicepubtags_tag_id ON "MicroservicePubTags" (tag_id); +CREATE INDEX idx_microservicesubtags_tag_id ON "MicroserviceSubTags" (tag_id); + +CREATE TABLE IF NOT EXISTS "MicroserviceCapAdd" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + cap_add TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capAdd_microserviceUuid ON "MicroserviceCapAdd" (microservice_uuid); + +CREATE TABLE IF NOT EXISTS "MicroserviceCapDrop" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + cap_drop TEXT, + microservice_uuid VARCHAR(32), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_capDrop_microserviceUuid ON "MicroserviceCapDrop" (microservice_uuid); + +ALTER TABLE "Microservices" +ADD COLUMN annotations TEXT; + +CREATE TABLE IF NOT EXISTS "FogPublicKeys" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + public_key TEXT, + iofog_uuid VARCHAR(32), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_public_keys_iofogUuid ON "FogPublicKeys" (iofog_uuid); + +CREATE TABLE IF NOT EXISTS "FogUsedTokens" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + jti VARCHAR(255) NOT NULL, + iofog_uuid VARCHAR(32), + expiry_time TIMESTAMP(0) NOT NULL, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_used_tokens_iofogUuid ON "FogUsedTokens" (iofog_uuid); + +ALTER TABLE "MicroserviceStatuses" +ADD COLUMN ip_address TEXT; + +DROP TABLE IF EXISTS "FogAccessTokens"; + +CREATE TABLE IF NOT EXISTS "Secrets" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + data TEXT NOT NULL, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) +); + +CREATE INDEX idx_secrets_name ON "Secrets" (name); + +CREATE TABLE IF NOT EXISTS "Certificates" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name TEXT UNIQUE NOT NULL, + subject TEXT NOT NULL, + is_ca BOOLEAN DEFAULT false, + signed_by_id INT, + hosts TEXT, + valid_from TIMESTAMP(0) NOT NULL, + valid_to TIMESTAMP(0) NOT NULL, + serial_number TEXT NOT NULL, + secret_id INT, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (signed_by_id) REFERENCES "Certificates" (id) ON DELETE SET NULL, + FOREIGN KEY (secret_id) REFERENCES "Secrets" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_certificates_name ON "Certificates" (name); +CREATE INDEX idx_certificates_valid_to ON "Certificates" (valid_to); +CREATE INDEX idx_certificates_is_ca ON "Certificates" (is_ca); +CREATE INDEX idx_certificates_signed_by_id ON "Certificates" (signed_by_id); +CREATE INDEX idx_certificates_secret_id ON "Certificates" (secret_id); + +CREATE TABLE IF NOT EXISTS "Services" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name TEXT UNIQUE NOT NULL, + type TEXT NOT NULL, + resource TEXT NOT NULL, + target_port INTEGER NOT NULL, + service_port INTEGER, + bridge_port INTEGER, + service_endpoint TEXT, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) +); + +CREATE INDEX idx_services_name ON "Services" (name); +CREATE INDEX idx_services_id ON "Services" (id); + +CREATE TABLE IF NOT EXISTS "ServiceTags" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + service_id INTEGER NOT NULL, + tag_id INTEGER NOT NULL, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (service_id) REFERENCES "Services" (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE +); + +CREATE INDEX idx_service_tags_service_id ON "ServiceTags" (service_id); +CREATE INDEX idx_service_tags_tag_id ON "ServiceTags" (tag_id); + +ALTER TABLE "Routers" DROP COLUMN IF EXISTS require_ssl; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS ssl_profile; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS sasl_mechanisms; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS authenticate_peer; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS ca_cert; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS tls_cert; +ALTER TABLE "Routers" DROP COLUMN IF EXISTS tls_key; \ No newline at end of file diff --git a/src/data/migrations/db_migration_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql similarity index 90% rename from src/data/migrations/db_migration_v1.0.2.sql rename to src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql index 8f802a65..7a8a825d 100644 --- a/src/data/migrations/db_migration_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql @@ -1,4 +1,3 @@ - CREATE TABLE IF NOT EXISTS Flows ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name VARCHAR(255) UNIQUE, @@ -415,7 +414,7 @@ CREATE INDEX idx_routing_applicationId ON Routings (application_id); CREATE TABLE IF NOT EXISTS Routers ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, is_edge BOOLEAN DEFAULT true, - messaging_port INT DEFAULT 5672, + messaging_port INT DEFAULT 5671, edge_router_port INT, inter_router_port INT, host TEXT, @@ -565,14 +564,6 @@ CREATE TABLE IF NOT EXISTS MicroserviceCdiDevices ( CREATE INDEX idx_microservice_cdiDevices_microserviceUuid ON MicroserviceCdiDevices (microservice_uuid); -ALTER TABLE Routers ADD COLUMN require_ssl TEXT; -ALTER TABLE Routers ADD COLUMN ssl_profile TEXT; -ALTER TABLE Routers ADD COLUMN sasl_mechanisms TEXT; -ALTER TABLE Routers ADD COLUMN authenticate_peer TEXT; -ALTER TABLE Routers ADD COLUMN ca_cert TEXT; -ALTER TABLE Routers ADD COLUMN tls_cert TEXT; -ALTER TABLE Routers ADD COLUMN tls_key TEXT; - CREATE TABLE IF NOT EXISTS MicroservicePubTags ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, microservice_uuid VARCHAR(32), @@ -637,4 +628,70 @@ CREATE TABLE IF NOT EXISTS FogUsedTokens ( CREATE INDEX idx_fog_used_tokens_iofogUuid ON FogUsedTokens (iofog_uuid); -DROP TABLE IF EXISTS FogAccessTokens; \ No newline at end of file +DROP TABLE IF EXISTS FogAccessTokens; + +ALTER TABLE MicroserviceStatuses ADD COLUMN ip_address TEXT; + +CREATE TABLE IF NOT EXISTS Secrets ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + data TEXT NOT NULL, + created_at DATETIME, + updated_at DATETIME +); + +CREATE INDEX idx_secrets_name ON Secrets (name); + +CREATE TABLE IF NOT EXISTS Certificates ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name TEXT UNIQUE NOT NULL, + subject TEXT NOT NULL, + is_ca BOOLEAN DEFAULT false, + signed_by_id INTEGER, + hosts TEXT, + valid_from DATETIME NOT NULL, + valid_to DATETIME NOT NULL, + serial_number TEXT NOT NULL, + secret_id INTEGER, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (signed_by_id) REFERENCES Certificates (id) ON DELETE SET NULL, + FOREIGN KEY (secret_id) REFERENCES Secrets (id) ON DELETE CASCADE +); + +CREATE INDEX idx_certificates_name ON Certificates (name); +CREATE INDEX idx_certificates_valid_to ON Certificates (valid_to); +CREATE INDEX idx_certificates_is_ca ON Certificates (is_ca); +CREATE INDEX idx_certificates_signed_by_id ON Certificates (signed_by_id); +CREATE INDEX idx_certificates_secret_id ON Certificates (secret_id); + +CREATE TABLE IF NOT EXISTS Services ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name TEXT UNIQUE NOT NULL, + type TEXT NOT NULL, + resource TEXT NOT NULL, + target_port INTEGER NOT NULL, + service_port INTEGER, + bridge_port INTEGER, + service_endpoint TEXT, + created_at DATETIME, + updated_at DATETIME, +); + +CREATE INDEX idx_services_id ON Services (id); +CREATE INDEX idx_services_name ON Services (name); + +CREATE TABLE IF NOT EXISTS ServiceTags ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + service_id INTEGER NOT NULL, + tag_id INTEGER NOT NULL, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (service_id) REFERENCES Services (id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE +); + +CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); +CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); + diff --git a/src/data/models/certificate.js b/src/data/models/certificate.js new file mode 100644 index 00000000..c343332f --- /dev/null +++ b/src/data/models/certificate.js @@ -0,0 +1,130 @@ +'use strict' + +module.exports = (sequelize, DataTypes) => { + const Certificate = sequelize.define('Certificate', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + name: { + type: DataTypes.TEXT, + allowNull: false, + field: 'name', + unique: true + }, + subject: { + type: DataTypes.TEXT, + allowNull: false, + field: 'subject' + }, + isCA: { + type: DataTypes.BOOLEAN, + allowNull: false, + defaultValue: false, + field: 'is_ca' + }, + signedById: { + type: DataTypes.INTEGER, + allowNull: true, + field: 'signed_by_id', + references: { + model: 'Certificates', + key: 'id' + } + }, + hosts: { + type: DataTypes.TEXT, + allowNull: true, + field: 'hosts' + }, + validFrom: { + type: DataTypes.DATE, + allowNull: false, + field: 'valid_from' + }, + validTo: { + type: DataTypes.DATE, + allowNull: false, + field: 'valid_to' + }, + serialNumber: { + type: DataTypes.TEXT, + allowNull: false, + field: 'serial_number' + }, + secretId: { + type: DataTypes.INTEGER, + allowNull: true, + field: 'secret_id', + references: { + model: 'Secrets', + key: 'id' + } + } + }, { + tableName: 'Certificates', + timestamps: true, + underscored: true, + indexes: [ + { + unique: true, + fields: ['name'] + }, + { + fields: ['valid_to'] + }, + { + fields: ['is_ca'] + }, + { + fields: ['signed_by_id'] + }, + { + fields: ['secret_id'] + } + ] + }) + + Certificate.associate = (models) => { + Certificate.belongsTo(Certificate, { + as: 'signingCA', + foreignKey: 'signed_by_id' + }) + Certificate.hasMany(Certificate, { + as: 'signedCertificates', + foreignKey: 'signed_by_id' + }) + + Certificate.belongsTo(models.Secret, { + foreignKey: 'secret_id', + as: 'secret' + }) + } + + // Add a getter for days remaining until expiration + Certificate.prototype.getDaysUntilExpiration = function () { + const today = new Date() + const expiryDate = new Date(this.validTo) + const diffTime = expiryDate - today + const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)) + return diffDays > 0 ? diffDays : 0 + } + + // Add a method to check if certificate is expired + Certificate.prototype.isExpired = function () { + const today = new Date() + const expiryDate = new Date(this.validTo) + return today > expiryDate + } + + // Add a method to check if certificate is expiring soon + Certificate.prototype.isExpiringSoon = function (days = 30) { + const daysRemaining = this.getDaysUntilExpiration() + return daysRemaining > 0 && daysRemaining <= days + } + + return Certificate +} diff --git a/src/data/models/index.js b/src/data/models/index.js index 71666d16..30b833b7 100644 --- a/src/data/models/index.js +++ b/src/data/models/index.js @@ -11,26 +11,28 @@ const config = require('../../config') const logger = require('../../logger') const databaseProvider = require('../providers/database-factory') -const sequelize = databaseProvider.sequelize -fs - .readdirSync(__dirname) - .filter((file) => { - return (file.indexOf('.') !== 0) && (file !== basename) && (file.slice(-3) === '.js') - }) - .forEach((file) => { - const model = require(path.join(__dirname, file))(sequelize, Sequelize.DataTypes) - db[model.name] = model - }) +// Initialize models after database is ready +const initializeModels = (sequelize) => { + fs + .readdirSync(__dirname) + .filter((file) => { + return (file.indexOf('.') !== 0) && (file !== basename) && (file.slice(-3) === '.js') + }) + .forEach((file) => { + const model = require(path.join(__dirname, file))(sequelize, Sequelize.DataTypes) + db[model.name] = model + }) -Object.keys(db).forEach((modelName) => { - if (db[modelName].associate) { - db[modelName].associate(db) - } -}) + Object.keys(db).forEach((modelName) => { + if (db[modelName].associate) { + db[modelName].associate(db) + } + }) -db.sequelize = sequelize -db.Sequelize = Sequelize + db.sequelize = sequelize + db.Sequelize = Sequelize +} const configureImage = async (db, name, fogTypes, images) => { const catalogItem = await db.CatalogItem.findOne({ where: { name, isPublic: false } }) @@ -47,19 +49,23 @@ const configureImage = async (db, name, fogTypes, images) => { db.initDB = async (isStart) => { await databaseProvider.initDB(isStart) + // Initialize models after database is ready + initializeModels(databaseProvider.sequelize) + if (isStart) { if (databaseProvider instanceof require('../providers/sqlite')) { const sqliteDbPath = databaseProvider.sequelize.options.storage - - // Check if the database file exists - if (fs.existsSync(sqliteDbPath)) { - logger.info('Database file exists. Running migrations only...') - await databaseProvider.runMigration(sqliteDbPath) // Ensure migration finishes before moving on - } else { - logger.info('Database file does not exist. Running migrations and seeders...') - await databaseProvider.runMigration(sqliteDbPath) // Wait for migration to finish - await databaseProvider.runSeeder(sqliteDbPath) // Wait for seeding to finish - } + logger.info('Running SQLite database migrations and seeders...') + await databaseProvider.runMigrationSQLite(sqliteDbPath) + await databaseProvider.runSeederSQLite(sqliteDbPath) + } else if (databaseProvider instanceof require('../providers/mysql')) { + logger.info('Running MySQL database migrations and seeders...') + await databaseProvider.runMigrationMySQL(databaseProvider.sequelize) + await databaseProvider.runSeederMySQL(databaseProvider.sequelize) + } else if (databaseProvider instanceof require('../providers/postgres')) { + logger.info('Running PostgreSQL database migrations and seeders...') + await databaseProvider.runMigrationPostgres(databaseProvider.sequelize) + await databaseProvider.runSeederPostgres(databaseProvider.sequelize) } // Configure system images diff --git a/src/data/models/microservicestatus.js b/src/data/models/microservicestatus.js index 7fe3c46c..136fda26 100644 --- a/src/data/models/microservicestatus.js +++ b/src/data/models/microservicestatus.js @@ -60,6 +60,11 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.TEXT, defaultValue: '', field: 'error_message' + }, + ipAddress: { + type: DataTypes.TEXT, + defaultValue: '', + field: 'ip_address' } }, { tableName: 'MicroserviceStatuses', diff --git a/src/data/models/router.js b/src/data/models/router.js index 97d75e94..e4cd9980 100644 --- a/src/data/models/router.js +++ b/src/data/models/router.js @@ -17,7 +17,7 @@ module.exports = (sequelize, DataTypes) => { messagingPort: { type: DataTypes.INTEGER, field: 'messaging_port', - defaultValue: 5672 + defaultValue: 5671 }, edgeRouterPort: { type: DataTypes.INTEGER, @@ -35,34 +35,6 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.BOOLEAN, field: 'is_default', defaultValue: false - }, - requireSsl: { - type: DataTypes.TEXT, - field: 'require_ssl' - }, - sslProfile: { - type: DataTypes.TEXT, - field: 'ssl_profile' - }, - saslMechanisms: { - type: DataTypes.TEXT, - field: 'sasl_mechanisms' - }, - authenticatePeer: { - type: DataTypes.TEXT, - field: 'authenticate_peer' - }, - caCert: { - type: DataTypes.TEXT, - field: 'ca_cert' - }, - tlsCert: { - type: DataTypes.TEXT, - field: 'tls_cert' - }, - tlsKey: { - type: DataTypes.TEXT, - field: 'tls_key' } }, { tableName: 'Routers', diff --git a/src/data/models/secret.js b/src/data/models/secret.js new file mode 100644 index 00000000..3b19442a --- /dev/null +++ b/src/data/models/secret.js @@ -0,0 +1,79 @@ +'use strict' + +const SecretHelper = require('../../helpers/secret-helper') + +module.exports = (sequelize, DataTypes) => { + const Secret = sequelize.define('Secret', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + name: { + type: DataTypes.TEXT, + allowNull: false, + field: 'name', + unique: true + }, + type: { + type: DataTypes.TEXT, + allowNull: false, + field: 'type', + validate: { + isIn: [['opaque', 'tls']] + } + }, + data: { + type: DataTypes.TEXT, + allowNull: false, + field: 'data', + defaultValue: '{}', + get () { + const rawValue = this.getDataValue('data') + return rawValue ? JSON.parse(rawValue) : {} + }, + set (value) { + this.setDataValue('data', JSON.stringify(value)) + } + } + }, { + tableName: 'Secrets', + timestamps: true, + underscored: true, + indexes: [ + { + unique: true, + fields: ['name'] + } + ], + hooks: { + beforeSave: async (secret) => { + if (secret.changed('data')) { + const encryptedData = await SecretHelper.encryptSecret( + secret.data, + secret.name + ) + secret.data = encryptedData + } + }, + afterFind: async (secret) => { + if (secret && secret.data) { + try { + const decryptedData = await SecretHelper.decryptSecret( + secret.data, + secret.name + ) + secret.data = decryptedData + } catch (error) { + console.error('Error decrypting secret data:', error) + secret.data = {} + } + } + } + } + }) + + return Secret +} diff --git a/src/data/providers/database-provider.js b/src/data/providers/database-provider.js index 9ada5d73..df1ef454 100644 --- a/src/data/providers/database-provider.js +++ b/src/data/providers/database-provider.js @@ -8,9 +8,251 @@ class DatabaseProvider { this.basename = path.basename(__filename) } - // Async function for the migration process - async runMigration (dbName) { - const migrationSqlPath = path.resolve(__dirname, '../migrations/db_migration_v1.0.2.sql') + // Helper method to create database if it doesn't exist + async createDatabaseIfNotExists (db, provider, dbName) { + let checkQuery, createQuery + switch (provider) { + case 'mysql': + checkQuery = `SHOW DATABASES LIKE '${dbName}'` + createQuery = `CREATE DATABASE IF NOT EXISTS \`${dbName}\`` + break + case 'postgres': + checkQuery = `SELECT 1 FROM pg_database WHERE datname = '${dbName}'` + createQuery = `CREATE DATABASE "${dbName}"` + break + default: + return // No need to create database for SQLite + } + + try { + // For MySQL, we need to connect without a database first + if (provider === 'mysql') { + const mysql = require('mysql2/promise') + const config = { ...db.config } + // Remove database from config for initial connection + delete config.database + + const tempConnection = await mysql.createConnection(config) + try { + const [result] = await tempConnection.query(checkQuery) + const databaseExists = result.length > 0 + + if (!databaseExists) { + logger.info(`Creating database ${dbName}...`) + await tempConnection.query(createQuery) + logger.info(`Database ${dbName} created successfully`) + } else { + logger.info(`Database ${dbName} already exists`) + } + } finally { + await tempConnection.end() + } + } else if (provider === 'postgres') { + const { Pool } = require('pg') + const config = { ...db.config } + // Remove database from config for initial connection + delete config.database + + const pool = new Pool(config) + try { + const result = await pool.query(checkQuery) + const databaseExists = result.rows && result.rows.length > 0 + + if (!databaseExists) { + logger.info(`Creating database ${dbName}...`) + await pool.query(createQuery) + logger.info(`Database ${dbName} created successfully`) + } else { + logger.info(`Database ${dbName} already exists`) + } + } finally { + await pool.end() + } + } + } catch (err) { + logger.error(`Error checking/creating database ${dbName}:`, err) + throw err + } + } + + // Common method to check if migration has been run + async checkMigrationVersion (db, provider) { + let query + switch (provider) { + case 'sqlite': + query = 'SELECT migration_version FROM SchemaVersion WHERE migration_version IS NOT NULL ORDER BY id DESC LIMIT 1' + return new Promise((resolve, reject) => { + db.get(query, (err, row) => { + if (err) { + if (err.message.includes('no such table')) { + resolve(null) // Table doesn't exist yet + } else { + reject(err) + } + } else { + resolve(row ? row.migration_version : null) + } + }) + }) + case 'mysql': + query = 'SELECT migration_version FROM SchemaVersion WHERE migration_version IS NOT NULL ORDER BY id DESC LIMIT 1' + break + case 'postgres': + query = 'SELECT migration_version FROM "SchemaVersion" WHERE migration_version IS NOT NULL ORDER BY id DESC LIMIT 1' + break + } + + try { + const [results] = await db.query(query) + return results && results.length > 0 ? results[0].migration_version : null + } catch (err) { + if (err.code === 'ER_NO_SUCH_TABLE' || err.code === '42P01') { + return null // Table doesn't exist yet + } + throw err + } + } + + // Common method to check if seeder has been run + async checkSeederVersion (db, provider) { + let query + switch (provider) { + case 'sqlite': + query = 'SELECT seeder_version FROM SchemaVersion WHERE seeder_version IS NOT NULL ORDER BY id DESC LIMIT 1' + return new Promise((resolve, reject) => { + db.get(query, (err, row) => { + if (err) { + if (err.message.includes('no such table')) { + resolve(null) // Table doesn't exist yet + } else { + reject(err) + } + } else { + resolve(row ? row.seeder_version : null) + } + }) + }) + case 'mysql': + query = 'SELECT seeder_version FROM SchemaVersion WHERE seeder_version IS NOT NULL ORDER BY id DESC LIMIT 1' + break + case 'postgres': + query = 'SELECT seeder_version FROM "SchemaVersion" WHERE seeder_version IS NOT NULL ORDER BY id DESC LIMIT 1' + break + } + + try { + const [results] = await db.query(query) + return results && results.length > 0 ? results[0].seeder_version : null + } catch (err) { + if (err.code === 'ER_NO_SUCH_TABLE' || err.code === '42P01') { + return null // Table doesn't exist yet + } + throw err + } + } + + // Common method to create SchemaVersion table + async createSchemaVersionTable (db, provider) { + let query + switch (provider) { + case 'sqlite': + query = ` + CREATE TABLE IF NOT EXISTS SchemaVersion ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + migration_version TEXT NOT NULL, + seeder_version TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) + ` + return new Promise((resolve, reject) => { + db.run(query, (err) => { + if (err) reject(err) + else resolve() + }) + }) + case 'mysql': + query = ` + CREATE TABLE IF NOT EXISTS SchemaVersion ( + id INT AUTO_INCREMENT PRIMARY KEY, + migration_version VARCHAR(255) NOT NULL, + seeder_version VARCHAR(255), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP + ) + ` + break + case 'postgres': + query = ` + CREATE TABLE IF NOT EXISTS "SchemaVersion" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, + migration_version VARCHAR(255) NOT NULL, + seeder_version VARCHAR(255), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + ` + break + } + + try { + await db.query(query) + } catch (err) { + throw err + } + } + + // Common method to update migration version + async updateMigrationVersion (db, version, provider) { + let query + switch (provider) { + case 'sqlite': + query = 'INSERT INTO SchemaVersion (migration_version) VALUES (?)' + return new Promise((resolve, reject) => { + db.run(query, [version], (err) => { + if (err) reject(err) + else resolve() + }) + }) + case 'mysql': + query = 'INSERT INTO SchemaVersion (migration_version) VALUES (?)' + await db.query(query, { replacements: [version] }) + break + case 'postgres': + query = 'INSERT INTO "SchemaVersion" (migration_version) VALUES ($1)' + await db.query(query, { bind: [version] }) + break + } + } + + // Common method to update seeder version + async updateSeederVersion (db, version, provider) { + switch (provider) { + case 'sqlite': + const sqliteQuery = 'UPDATE SchemaVersion SET seeder_version = ?, updated_at = CURRENT_TIMESTAMP WHERE id = (SELECT MAX(id) FROM SchemaVersion)' + return new Promise((resolve, reject) => { + db.run(sqliteQuery, [version], (err) => { + if (err) reject(err) + else resolve() + }) + }) + case 'mysql': + const [result] = await db.query('SELECT MAX(id) as maxId FROM SchemaVersion') + const maxId = result[0].maxId + const mysqlQuery = 'UPDATE SchemaVersion SET seeder_version = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?' + await db.query(mysqlQuery, { replacements: [version, maxId] }) + break + case 'postgres': + const postgresQuery = 'UPDATE "SchemaVersion" SET seeder_version = $1, updated_at = CURRENT_TIMESTAMP WHERE id = (SELECT MAX(id) FROM "SchemaVersion")' + await db.query(postgresQuery, { bind: [version] }) + break + } + } + + // SQLite migration + async runMigrationSQLite (dbName) { + const migrationSqlPath = path.resolve(__dirname, '../migrations/sqlite/db_migration_sqlite_v1.0.2.sql') + const migrationVersion = '1.0.2' if (!fs.existsSync(migrationSqlPath)) { logger.error(`Migration file not found: ${migrationSqlPath}`) @@ -18,7 +260,7 @@ class DatabaseProvider { } const migrationSql = fs.readFileSync(migrationSqlPath).toString() - const dataArr = migrationSql.split(';') // Split by semicolon + const dataArr = migrationSql.split(';') let db = new sqlite3.Database(dbName, (err) => { if (err) { @@ -29,28 +271,31 @@ class DatabaseProvider { }) try { + await this.createSchemaVersionTable(db, 'sqlite') + const currentVersion = await this.checkMigrationVersion(db, 'sqlite') + + if (currentVersion === migrationVersion) { + logger.info('Migration already up to date, skipping...') + return + } + db.serialize(() => { - db.run('PRAGMA foreign_keys=OFF;') // Disable foreign key checks during migration - db.run('BEGIN TRANSACTION;') // Start transaction + db.run('PRAGMA foreign_keys=OFF;') + db.run('BEGIN TRANSACTION;') }) for (let query of dataArr) { if (query.trim()) { - query = query.trim() + ';' // Ensure semicolon is added back - - // Run each query sequentially + query = query.trim() + ';' await new Promise((resolve, reject) => { db.run(query, (err) => { if (err) { - if ( - err.message.includes('already exists') || - err.message.includes('duplicate') - ) { + if (err.message.includes('already exists') || err.message.includes('duplicate')) { logger.warn(`Ignored error: ${err.message}`) - resolve() // Ignore specific errors + resolve() } else { - db.run('ROLLBACK;') // Rollback transaction on error - reject(err) // Reject on other errors + db.run('ROLLBACK;') + reject(err) } } else { resolve() @@ -60,7 +305,7 @@ class DatabaseProvider { } } - // Commit the transaction if all queries succeed + await this.updateMigrationVersion(db, migrationVersion, 'sqlite') db.run('COMMIT;') logger.info('Migration completed successfully.') } catch (err) { @@ -77,9 +322,114 @@ class DatabaseProvider { } } - // Async function for the seeding process - async runSeeder (dbName) { - const seederSqlPath = path.resolve(__dirname, '../seeders/db_seeder_v1.0.2.sql') + // MySQL migration + async runMigrationMySQL (db) { + const migrationSqlPath = path.resolve(__dirname, '../migrations/mysql/db_migration_mysql_v1.0.2.sql') + const migrationVersion = '1.0.2' + + if (!fs.existsSync(migrationSqlPath)) { + logger.error(`Migration file not found: ${migrationSqlPath}`) + throw new Error('Migration file not found') + } + + const migrationSql = fs.readFileSync(migrationSqlPath).toString() + const dataArr = migrationSql.split(';') + + try { + await this.createSchemaVersionTable(db, 'mysql') + const currentVersion = await this.checkMigrationVersion(db, 'mysql') + + if (currentVersion === migrationVersion) { + logger.info('Migration already up to date, skipping...') + return + } + + await db.query('START TRANSACTION') + + for (let query of dataArr) { + if (query.trim()) { + query = query.trim() + ';' + try { + await db.query(query) + } catch (err) { + if (err.code === 'ER_TABLE_EXISTS_ERROR' || + err.code === 'ER_DUP_FIELDNAME' || + err.code === 'ER_DUP_KEYNAME') { + logger.warn(`Ignored MySQL error: ${err.message}`) + } else { + await db.query('ROLLBACK') + throw err + } + } + } + } + + await this.updateMigrationVersion(db, migrationVersion, 'mysql') + await db.query('COMMIT') + logger.info('Migration completed successfully.') + } catch (err) { + await db.query('ROLLBACK') + logger.error('Migration failed:', err) + throw err + } + } + + // PostgreSQL migration + async runMigrationPostgres (db) { + const migrationSqlPath = path.resolve(__dirname, '../migrations/postgres/db_migration_pg_v1.0.2.sql') + const migrationVersion = '1.0.2' + + if (!fs.existsSync(migrationSqlPath)) { + logger.error(`Migration file not found: ${migrationSqlPath}`) + throw new Error('Migration file not found') + } + + const migrationSql = fs.readFileSync(migrationSqlPath).toString() + const dataArr = migrationSql.split(';') + + try { + await this.createSchemaVersionTable(db, 'postgres') + const currentVersion = await this.checkMigrationVersion(db, 'postgres') + + if (currentVersion === migrationVersion) { + logger.info('Migration already up to date, skipping...') + return + } + + await db.query('BEGIN') + + for (let query of dataArr) { + if (query.trim()) { + query = query.trim() + ';' + try { + await db.query(query) + } catch (err) { + if (err.code === '42P07' || // duplicate_table + err.code === '42701' || // duplicate_column + err.code === '42P06') { // duplicate_schema + logger.warn(`Ignored PostgreSQL error: ${err.message}`) + } else { + await db.query('ROLLBACK') + throw err + } + } + } + } + + await this.updateMigrationVersion(db, migrationVersion, 'postgres') + await db.query('COMMIT') + logger.info('Migration completed successfully.') + } catch (err) { + await db.query('ROLLBACK') + logger.error('Migration failed:', err) + throw err + } + } + + // SQLite seeder + async runSeederSQLite (dbName) { + const seederSqlPath = path.resolve(__dirname, '../seeders/sqlite/db_seeder_sqlite_v1.0.2.sql') + const seederVersion = '1.0.2' if (!fs.existsSync(seederSqlPath)) { logger.error(`Seeder file not found: ${seederSqlPath}`) @@ -87,7 +437,7 @@ class DatabaseProvider { } const seederSql = fs.readFileSync(seederSqlPath).toString() - const dataArr = seederSql.split(';') // Split by semicolon + const dataArr = seederSql.split(';') let db = new sqlite3.Database(dbName, (err) => { if (err) { @@ -98,28 +448,30 @@ class DatabaseProvider { }) try { + const currentVersion = await this.checkSeederVersion(db, 'sqlite') + + if (currentVersion === seederVersion) { + logger.info('Seeder already up to date, skipping...') + return + } + db.serialize(() => { - db.run('PRAGMA foreign_keys=OFF;') // Disable foreign key checks during seeding - db.run('BEGIN TRANSACTION;') // Start transaction + db.run('PRAGMA foreign_keys=OFF;') + db.run('BEGIN TRANSACTION;') }) for (let query of dataArr) { if (query.trim()) { - query = query.trim() + ';' // Ensure semicolon is added back - - // Run each query sequentially + query = query.trim() + ';' await new Promise((resolve, reject) => { db.run(query, (err) => { if (err) { - if ( - err.message.includes('already exists') || - err.message.includes('duplicate') - ) { + if (err.message.includes('already exists') || err.message.includes('duplicate')) { logger.warn(`Ignored error: ${err.message}`) - resolve() // Ignore specific errors + resolve() } else { - db.run('ROLLBACK;') // Rollback transaction on error - reject(err) // Reject on other errors + db.run('ROLLBACK;') + reject(err) } } else { resolve() @@ -129,7 +481,7 @@ class DatabaseProvider { } } - // Commit the transaction if all queries succeed + await this.updateSeederVersion(db, seederVersion, 'sqlite') db.run('COMMIT;') logger.info('Seeding completed successfully.') } catch (err) { @@ -145,6 +497,106 @@ class DatabaseProvider { }) } } + + // MySQL seeder + async runSeederMySQL (db) { + const seederSqlPath = path.resolve(__dirname, '../seeders/mysql/db_seeder_mysql_v1.0.2.sql') + const seederVersion = '1.0.2' + + if (!fs.existsSync(seederSqlPath)) { + logger.error(`Seeder file not found: ${seederSqlPath}`) + throw new Error('Seeder file not found') + } + + const seederSql = fs.readFileSync(seederSqlPath).toString() + const dataArr = seederSql.split(';') + + try { + const currentVersion = await this.checkSeederVersion(db, 'mysql') + + if (currentVersion === seederVersion) { + logger.info('Seeder already up to date, skipping...') + return + } + + await db.query('START TRANSACTION') + + for (let query of dataArr) { + if (query.trim()) { + query = query.trim() + ';' + try { + await db.query(query) + } catch (err) { + if (err.code === 'ER_DUP_ENTRY' || + err.code === 'ER_DUP_KEY') { + logger.warn(`Ignored MySQL error: ${err.message}`) + } else { + await db.query('ROLLBACK') + throw err + } + } + } + } + + await this.updateSeederVersion(db, seederVersion, 'mysql') + await db.query('COMMIT') + logger.info('Seeding completed successfully.') + } catch (err) { + await db.query('ROLLBACK') + logger.error('Seeding failed:', err) + throw err + } + } + + // PostgreSQL seeder + async runSeederPostgres (db) { + const seederSqlPath = path.resolve(__dirname, '../seeders/postgres/db_seeder_pg_v1.0.2.sql') + const seederVersion = '1.0.2' + + if (!fs.existsSync(seederSqlPath)) { + logger.error(`Seeder file not found: ${seederSqlPath}`) + throw new Error('Seeder file not found') + } + + const seederSql = fs.readFileSync(seederSqlPath).toString() + const dataArr = seederSql.split(';') + + try { + const currentVersion = await this.checkSeederVersion(db, 'postgres') + + if (currentVersion === seederVersion) { + logger.info('Seeder already up to date, skipping...') + return + } + + await db.query('BEGIN') + + for (let query of dataArr) { + if (query.trim()) { + query = query.trim() + ';' + try { + await db.query(query) + } catch (err) { + if (err.code === '23505' || // unique_violation + err.code === '23503') { // foreign_key_violation + logger.warn(`Ignored PostgreSQL error: ${err.message}`) + } else { + await db.query('ROLLBACK') + throw err + } + } + } + } + + await this.updateSeederVersion(db, seederVersion, 'postgres') + await db.query('COMMIT') + logger.info('Seeding completed successfully.') + } catch (err) { + await db.query('ROLLBACK') + logger.error('Seeding failed:', err) + throw err + } + } } module.exports = DatabaseProvider diff --git a/src/data/providers/mysql.js b/src/data/providers/mysql.js index 3c654f8a..4225a204 100644 --- a/src/data/providers/mysql.js +++ b/src/data/providers/mysql.js @@ -1,24 +1,65 @@ const Sequelize = require('sequelize') - const config = require('../../config') const DatabaseProvider = require('./database-provider') +const logger = require('../../logger') +const mysql = require('mysql2/promise') class MySqlDatabaseProvider extends DatabaseProvider { constructor () { super() + // Get MySQL configuration from config or environment variables const mysqlConfig = config.get('database.mysql', {}) - mysqlConfig.dialect = 'mysql' - mysqlConfig.host = process.env.DB_HOST || mysqlConfig.host - mysqlConfig.port = process.env.DB_PORT || mysqlConfig.port - mysqlConfig.username = process.env.DB_USERNAME || mysqlConfig.username - mysqlConfig.password = process.env.DB_PASSWORD || mysqlConfig.password - mysqlConfig.databaseName = process.env.DB_NAME || mysqlConfig.database - this.sequelize = new Sequelize(mysqlConfig.databaseName, mysqlConfig.username, mysqlConfig.password, mysqlConfig) + // Base MySQL connection options + const connectionOptions = { + host: process.env.DB_HOST || mysqlConfig.host, + port: process.env.DB_PORT || mysqlConfig.port, + user: process.env.DB_USERNAME || mysqlConfig.username, + password: process.env.DB_PASSWORD || mysqlConfig.password, + database: process.env.DB_NAME || mysqlConfig.databaseName, + connectTimeout: 10000 + } + + // Sequelize configuration + const sequelizeConfig = { + dialect: 'mysql', + host: connectionOptions.host, + port: connectionOptions.port, + username: connectionOptions.user, + password: connectionOptions.password, + database: connectionOptions.database, + dialectOptions: { + connectTimeout: connectionOptions.connectTimeout + }, + logging: false + } + + this.sequelize = new Sequelize(sequelizeConfig) + this.connectionOptions = connectionOptions } async initDB () { + try { + // First try to connect to the database directly + const connection = await mysql.createConnection(this.connectionOptions) + await connection.end() + } catch (err) { + if (err.code === 'ER_BAD_DB_ERROR') { + // Database doesn't exist, try to create it + logger.info('Database does not exist, attempting to create it...') + const { database, ...connectionConfig } = this.connectionOptions + const tempConnection = await mysql.createConnection(connectionConfig) + try { + await tempConnection.query(`CREATE DATABASE IF NOT EXISTS \`${database}\``) + logger.info(`Database ${database} created successfully`) + } finally { + await tempConnection.end() + } + } else { + throw err + } + } } } diff --git a/src/data/providers/postgres.js b/src/data/providers/postgres.js index 1c8a07e1..f1e44ade 100644 --- a/src/data/providers/postgres.js +++ b/src/data/providers/postgres.js @@ -1,24 +1,66 @@ const Sequelize = require('sequelize') - const config = require('../../config') const DatabaseProvider = require('./database-provider') +const logger = require('../../logger') +const { Pool } = require('pg') class PostgresDatabaseProvider extends DatabaseProvider { constructor () { super() + // Get PostgreSQL configuration from config or environment variables const postgresConfig = config.get('database.postgres', {}) - postgresConfig.dialect = 'postgres' - postgresConfig.host = process.env.DB_HOST || postgresConfig.host - postgresConfig.port = process.env.DB_PORT || postgresConfig.port - postgresConfig.username = process.env.DB_USERNAME || postgresConfig.username - postgresConfig.password = process.env.DB_PASSWORD || postgresConfig.password - postgresConfig.databaseName = process.env.DB_NAME || postgresConfig.database - this.sequelize = new Sequelize(postgresConfig.databaseName, postgresConfig.username, postgresConfig.password, postgresConfig) + // Base PostgreSQL connection options + const connectionOptions = { + host: process.env.DB_HOST || postgresConfig.host, + port: process.env.DB_PORT || postgresConfig.port, + user: process.env.DB_USERNAME || postgresConfig.username, + password: process.env.DB_PASSWORD || postgresConfig.password, + database: process.env.DB_NAME || postgresConfig.databaseName, + connectTimeout: 10000 + } + + // Sequelize configuration + const sequelizeConfig = { + dialect: 'postgres', + host: connectionOptions.host, + port: connectionOptions.port, + username: connectionOptions.user, + password: connectionOptions.password, + database: connectionOptions.database, + dialectOptions: { + connectTimeout: connectionOptions.connectTimeout + }, + logging: false + } + + this.sequelize = new Sequelize(sequelizeConfig) + this.connectionOptions = connectionOptions } async initDB () { + try { + // First try to connect to the database directly + const pool = new Pool(this.connectionOptions) + await pool.query('SELECT 1') + await pool.end() + } catch (err) { + if (err.code === '3D000') { // PostgreSQL error code for database doesn't exist + // Database doesn't exist, try to create it + logger.info('Database does not exist, attempting to create it...') + const { database, ...connectionConfig } = this.connectionOptions + const pool = new Pool(connectionConfig) + try { + await pool.query(`CREATE DATABASE "${database}"`) + logger.info(`Database ${database} created successfully`) + } finally { + await pool.end() + } + } else { + throw err + } + } } } diff --git a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql new file mode 100644 index 00000000..bfa21802 --- /dev/null +++ b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql @@ -0,0 +1,43 @@ +START TRANSACTION; + +INSERT INTO `Registries` (url, is_public, secure, certificate, requires_cert, user_name, password, user_email) +VALUES + ('registry.hub.docker.com', true, true, '', false, '', '', ''), + ('from_cache', true, true, '', false, '', '', ''); + +INSERT INTO `CatalogItems` (name, description, category, publisher, disk_required, ram_required, picture, config_example, is_public, registry_id) +VALUES + ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('Proxy', 'The built-in proxy for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + +INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) +VALUES + (0, 'Unspecified', 'iointegrator0.png', 'Unspecified device. Fog Type will be selected on provision', 1, 3, 2), + (1, 'Standard Linux (x86)', 'iointegrator1.png', 'A standard Linux server of at least moderate processing power and capacity. Compatible with common Linux types such as Ubuntu, Red Hat, and CentOS.', 1, 3, 2), + (2, 'ARM Linux', 'iointegrator2.png', 'A version of ioFog meant to run on Linux systems with ARM processors. Microservices for this ioFog type will be tailored to ARM systems.', 1, 3, 2); + +UPDATE `Fogs` +SET fog_type_id = 0 +WHERE fog_type_id IS NULL; + +INSERT INTO `CatalogItemImages` (catalog_item_id, fog_type_id, container_image) +VALUES + (1, 1, 'ghcr.io/datasance/nats:latest'), + (1, 2, 'ghcr.io/datasance/nats:latest'), + (2, 1, 'ghcr.io/datasance/restblue:latest'), + (2, 2, 'ghcr.io/datasance/restblue:latest'), + (3, 1, 'ghcr.io/datasance/hal:latest'), + (3, 2, 'ghcr.io/datasance/hal:latest'), + (4, 1, 'ghcr.io/datasance/edge-guard:latest'), + (4, 2, 'ghcr.io/datasance/edge-guard:latest'), + (5, 1, 'ghcr.io/datasance/router:latest'), + (5, 2, 'ghcr.io/datasance/router:latest'), + (6, 1, 'ghcr.io/datasance/proxy:latest'), + (6, 2, 'ghcr.io/datasance/proxy:latest'); + + +COMMIT; \ No newline at end of file diff --git a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql new file mode 100644 index 00000000..aaa987d9 --- /dev/null +++ b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql @@ -0,0 +1,42 @@ +START TRANSACTION; + +INSERT INTO "Registries" (url, is_public, secure, certificate, requires_cert, user_name, password, user_email) +VALUES + ('registry.hub.docker.com', true, true, '', false, '', '', ''), + ('from_cache', true, true, '', false, '', '', ''); + +INSERT INTO "CatalogItems" (name, description, category, publisher, disk_required, ram_required, picture, config_example, is_public, registry_id) +VALUES + ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('Proxy', 'The built-in proxy for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + +INSERT INTO "FogTypes" (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) +VALUES + (0, 'Unspecified', 'iointegrator0.png', 'Unspecified device. Fog Type will be selected on provision', 1, 3, 2), + (1, 'Standard Linux (x86)', 'iointegrator1.png', 'A standard Linux server of at least moderate processing power and capacity. Compatible with common Linux types such as Ubuntu, Red Hat, and CentOS.', 1, 3, 2), + (2, 'ARM Linux', 'iointegrator2.png', 'A version of ioFog meant to run on Linux systems with ARM processors. Microservices for this ioFog type will be tailored to ARM systems.', 1, 3, 2); + +UPDATE "Fogs" +SET fog_type_id = 0 +WHERE fog_type_id IS NULL; + +INSERT INTO "CatalogItemImages" (catalog_item_id, fog_type_id, container_image) +VALUES + (1, 1, 'ghcr.io/datasance/nats:latest'), + (1, 2, 'ghcr.io/datasance/nats:latest'), + (2, 1, 'ghcr.io/datasance/restblue:latest'), + (2, 2, 'ghcr.io/datasance/restblue:latest'), + (3, 1, 'ghcr.io/datasance/hal:latest'), + (3, 2, 'ghcr.io/datasance/hal:latest'), + (4, 1, 'ghcr.io/datasance/edge-guard:latest'), + (4, 2, 'ghcr.io/datasance/edge-guard:latest'), + (5, 1, 'ghcr.io/datasance/router:latest'), + (5, 2, 'ghcr.io/datasance/router:latest'), + (6, 1, 'ghcr.io/datasance/proxy:latest'), + (6, 2, 'ghcr.io/datasance/proxy:latest'); + +COMMIT; \ No newline at end of file diff --git a/src/data/seeders/db_seeder_v1.0.2.sql b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql similarity index 100% rename from src/data/seeders/db_seeder_v1.0.2.sql rename to src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql diff --git a/src/decorators/authorization-decorator.js b/src/decorators/authorization-decorator.js index 656a4d17..9003b0b6 100644 --- a/src/decorators/authorization-decorator.js +++ b/src/decorators/authorization-decorator.js @@ -51,6 +51,7 @@ function checkFogToken (f) { const payload = JSON.parse(Buffer.from(tokenParts[1], 'base64').toString()) const fogUuid = payload.sub logger.debug({ payload }, 'JWT payload') + logger.info({ iofogUUID: payload.sub }) if (!fogUuid) { logger.error('JWT missing subject claim') diff --git a/src/helpers/error-messages.js b/src/helpers/error-messages.js index 3eac8674..af41a2c5 100644 --- a/src/helpers/error-messages.js +++ b/src/helpers/error-messages.js @@ -114,5 +114,16 @@ module.exports = { INVALID_MICROSERVICE_PUB_TAG: 'Invalid microservice Pub Tag \'{}\'', INVALID_MICROSERVICE_SUB_TAG: 'Invalid microservice Sub Tag \'{}\'', NOTFOUND_MICROSERVICE_PUB_TAG: 'No microservice found for Pub Tag \'{}\'', - NOTFOUND_MICROSERVICE_SUB_TAG: 'No microservice found for Sub Tag \'{}\'' + NOTFOUND_MICROSERVICE_SUB_TAG: 'No microservice found for Sub Tag \'{}\'', + SECRET_ALREADY_EXISTS: 'Secret with name "{0}" already exists', + SECRET_NOT_FOUND: 'Secret with name "{0}" not found', + // Certificate related error messages + CA_ALREADY_EXISTS: 'CA with name %s already exists', + CA_NOT_FOUND: 'CA with name %s not found', + CERTIFICATE_ALREADY_EXISTS: 'Certificate with name %s already exists', + CERTIFICATE_NOT_FOUND: 'Certificate with name %s not found', + INVALID_CERTIFICATE: 'Invalid certificate: %s', + INVALID_CA: 'Invalid CA: %s', + NOT_KUBERNETES_ENV: 'Controller is not running in Kubernetes environment', + K8S_SECRET_NOT_ALLOWED: 'Kubernetes secret type is not allowed in non-Kubernetes environment' } diff --git a/src/helpers/errors.js b/src/helpers/errors.js index cb57e675..3d4f8c1c 100644 --- a/src/helpers/errors.js +++ b/src/helpers/errors.js @@ -102,6 +102,14 @@ class CLIArgsNotProvidedError extends Error { } } +class ConflictError extends Error { + constructor (message) { + super(message) + this.name = 'ConflictError' + this.status = 409 + } +} + module.exports = { AuthenticationError: AuthenticationError, TransactionError: TransactionError, @@ -113,5 +121,6 @@ module.exports = { FtpError: FtpError, InvalidArgumentError: InvalidArgumentError, InvalidArgumentTypeError: InvalidArgumentTypeError, - CLIArgsNotProvidedError: CLIArgsNotProvidedError + CLIArgsNotProvidedError: CLIArgsNotProvidedError, + ConflictError: ConflictError } diff --git a/src/helpers/secret-helper.js b/src/helpers/secret-helper.js new file mode 100644 index 00000000..6e17595d --- /dev/null +++ b/src/helpers/secret-helper.js @@ -0,0 +1,59 @@ +const crypto = require('crypto') + +class SecretHelper { + constructor () { + this.ALGORITHM = 'aes-256-gcm' + this.IV_LENGTH = 12 + this.SALT_LENGTH = 16 + this.TAG_LENGTH = 16 + this.KEY_LENGTH = 32 + this.ITERATIONS = 100000 + } + + async encryptSecret (secretData, secretName) { + const salt = crypto.randomBytes(this.SALT_LENGTH) + const key = await this._deriveKey(secretName, salt) + const iv = crypto.randomBytes(this.IV_LENGTH) + const cipher = crypto.createCipheriv(this.ALGORITHM, key, iv) + const encrypted = Buffer.concat([ + cipher.update(JSON.stringify(secretData), 'utf8'), + cipher.final() + ]) + const tag = cipher.getAuthTag() + return Buffer.concat([salt, iv, tag, encrypted]).toString('base64') + } + + async decryptSecret (encryptedData, secretName) { + const buffer = Buffer.from(encryptedData, 'base64') + const salt = buffer.subarray(0, this.SALT_LENGTH) + const iv = buffer.subarray(this.SALT_LENGTH, this.SALT_LENGTH + this.IV_LENGTH) + const tag = buffer.subarray(this.SALT_LENGTH + this.IV_LENGTH, this.SALT_LENGTH + this.IV_LENGTH + this.TAG_LENGTH) + const encrypted = buffer.subarray(this.SALT_LENGTH + this.IV_LENGTH + this.TAG_LENGTH) + const key = await this._deriveKey(secretName, salt) + const decipher = crypto.createDecipheriv(this.ALGORITHM, key, iv) + decipher.setAuthTag(tag) + const decrypted = Buffer.concat([ + decipher.update(encrypted), + decipher.final() + ]) + return JSON.parse(decrypted.toString('utf8')) + } + + async _deriveKey (secretName, salt) { + return new Promise((resolve, reject) => { + crypto.pbkdf2( + secretName, + salt, + this.ITERATIONS, + this.KEY_LENGTH, + 'sha256', + (err, key) => { + if (err) reject(err) + else resolve(key) + } + ) + }) + } +} + +module.exports = new SecretHelper() diff --git a/src/helpers/template-helper.js b/src/helpers/template-helper.js index 543297a5..9ecdcf9d 100755 --- a/src/helpers/template-helper.js +++ b/src/helpers/template-helper.js @@ -1,190 +1,188 @@ -/* - * Software Name : eclipse-iofog/Controller - * Version: 2.0.x - * SPDX-FileCopyrightText: Copyright (c) 2020-2020 Orange - * SPDX-License-Identifier: EPL-2.0 - * - * This software is distributed under the , - * the text of which is available at http://www.eclipse.org/legal/epl-2.0 - * or see the "license.txt" file for more details. - * - * Author: Franck Roudet - */ - -const ApplicationManager = require('../data/managers/application-manager.js') // Using manager instead of service to avoid dependency loop -const FogService = require('../services/iofog-service') -const MicroservicesService = require('../services/microservices-service') -const EdgeResourceService = require('../services/edge-resource-service') - -// ninja2 like template engine -const { Liquid } = require('../lib/liquidjs/liquid.node.cjs') -const templateEngine = new Liquid() - -/** - * Add filter findAgent to template engine. - * Syntaxe {{ microservice | findMicroserviceAgent }} - */ - -function findMicroserviceAgentHandler (microservice) { - const user = this.context.environments._user - if (!user) { - return undefined - } - const result = FogService.getFogEndPoint({ uuid: microservice.iofogUuid }, user, false) - return result -} - -async function findEdgeResourcehandler (name, version) { - const key = `${name}/${version}` - const user = this.context.environments._user - if (!user) { - return undefined - } - if (this.context.environments._edgeResourcesByName && this.context.environments._edgeResourcesByName[key]) { - return this.context.environments._edgeResourcesByName[key] - } - const result = await EdgeResourceService.getEdgeResource({ name, version }, user) - - if (result && this.context.environments._edgeResourcesByName) { - this.context.environments._edgeResourcesByName[key] = result - } - return result -} - -async function findApplicationHandler (name) { - const user = this.context.environments._user - if (!user) { - return undefined - } - if (this.context.environments._applicationsByName && this.context.environments._applicationsByName[name]) { - return this.context.environments._applicationsByName[name] - } - - const result = await ApplicationManager.findOnePopulated({ exclude: ['created_at', 'updated_at'] }, { fakeTransaction: true }) // TODO: Get a proper DB transaction - if (result) { - result.microservices = (await MicroservicesService.listMicroservicesEndPoint({ applicationName: name }, user, false)).microservices - if (this.context.environments._applicationsByName) { - this.context.environments._applicationsByName[name] = result - } - } - return result -} - -async function findAgentHandler (name) { - const user = this.context.environments._user - if (!user) { - return undefined - } - if (name === '') { - const { fogs: result } = await FogService.getFogListEndPoint([], user, false, false) - if (result && this.context.environments._agentsByName) { - result.forEach(agent => { - this.context.environments._agentsByName[agent.name] = agent - }) - } - return result - } - if (this.context.environments._agentsByName && this.context.environments._agentsByName[name]) { - return this.context.environments._agentsByName[name] - } - const result = await FogService.getFogEndPoint({ name }, user, false) - if (result && this.context.environments._agentsByName) { - this.context.environments._agentsByName[result.name] = result - } - return result -} - -async function JSONParser (variable) { - try { - console.log({ variable }) - return JSON.parse(variable) - } catch (e) { - return variable - } -} - -function toStringParser (variable) { - try { - if (typeof variable === 'string') { - return variable - } - if (variable.toString) { - return variable.toString() - } - return JSON.stringify(variable) - } catch (e) { - return variable - } -} -/** - * Add filter findEdgeRessource to template engine. - * user is in liquid context _user - * Syntaxe {{ name findEdgeRessource: version }} - */ -templateEngine.registerFilter('findEdgeResource', findEdgeResourcehandler) -templateEngine.registerFilter('findApplication', findApplicationHandler) -templateEngine.registerFilter('findAgent', findAgentHandler) -templateEngine.registerFilter('findMicroserviceAgent', findMicroserviceAgentHandler) -templateEngine.registerFilter('toNumber', JSONParser) -templateEngine.registerFilter('toBoolean', JSONParser) -templateEngine.registerFilter('toString', toStringParser) - -/** - * Object in depth traversal and right value templateEngine rendering - * @param {*} subjects - * @param {*} templateContext - */ -const rvaluesVarSubstition = async (subjects, templateContext, user) => { - let context = templateContext - // Due to the recursive nature of this function, user will only be defined on the first iteration - if (user) { - context = { - ...templateContext, - // Private context - _user: user // need by edge resource and every on demand request - } - } - - // Create local cache for filters if they do not exists - context._agentsByName = context._agentsByName || {} - context._edgeResourcesByName = context._edgeResourcesByName || {} - context._applicationsByName = context._applicationsByName || {} - - for (let key in subjects) { - try { - if (typeof subjects[key] === 'object') { - await rvaluesVarSubstition(subjects[key], context, null) - } else if (typeof subjects[key] === 'string') { - const result = await templateEngine.parseAndRender(subjects[key], context, { keepOutputType: true }) - subjects[key] = result - } - } catch (e) { - // Trace error in rendering - console.log({ e }) - subjects[key] = e.toString() - } - } - return subjects -} - -const substitutionMiddleware = async (req, res, next) => { - if (['POST', 'PUT', 'PATCH'].indexOf(req.method) > -1) { - let user - let tmplContext = { - self: req.body, - // Private context - _user: user // need by edge resource and every on demand request - } - try { - await rvaluesVarSubstition(req.body, tmplContext, user) - } catch (e) { - next(e) - } - } - next() -} - -module.exports = { - rvaluesVarSubstition, - substitutionMiddleware -} +/* + * Software Name : eclipse-iofog/Controller + * Version: 2.0.x + * SPDX-FileCopyrightText: Copyright (c) 2020-2020 Orange + * SPDX-License-Identifier: EPL-2.0 + * + * This software is distributed under the , + * the text of which is available at http://www.eclipse.org/legal/epl-2.0 + * or see the "license.txt" file for more details. + * + * Author: Franck Roudet + */ + +const ApplicationManager = require('../data/managers/application-manager.js') // Using manager instead of service to avoid dependency loop +const FogService = require('../services/iofog-service') +const MicroservicesService = require('../services/microservices-service') +const EdgeResourceService = require('../services/edge-resource-service') + +// ninja2 like template engine +const { Liquid } = require('../lib/liquidjs/liquid.node.cjs') +const templateEngine = new Liquid() + +/** + * Add filter findAgent to template engine. + * Syntaxe {{ microservice | findMicroserviceAgent }} + */ + +function findMicroserviceAgentHandler (microservice) { + // const user = this.context.environments._user + // if (!user) { + // return undefined + // } + const result = FogService.getFogEndPoint({ uuid: microservice.iofogUuid }, false) + return result +} + +async function findEdgeResourcehandler (name, version) { + const key = `${name}/${version}` + // const user = this.context.environments._user + // if (!user) { + // return undefined + // } + if (this.context.environments._edgeResourcesByName && this.context.environments._edgeResourcesByName[key]) { + return this.context.environments._edgeResourcesByName[key] + } + const result = await EdgeResourceService.getEdgeResource({ name, version }) + + if (result && this.context.environments._edgeResourcesByName) { + this.context.environments._edgeResourcesByName[key] = result + } + return result +} + +async function findApplicationHandler (name) { + // const user = this.context.environments._user + // if (!user) { + // return undefined + // } + if (this.context.environments._applicationsByName && this.context.environments._applicationsByName[name]) { + return this.context.environments._applicationsByName[name] + } + + const result = await ApplicationManager.findOnePopulated({ exclude: ['created_at', 'updated_at'] }, { fakeTransaction: true }) // TODO: Get a proper DB transaction + if (result) { + result.microservices = (await MicroservicesService.listMicroservicesEndPoint({ applicationName: name }, false)).microservices + if (this.context.environments._applicationsByName) { + this.context.environments._applicationsByName[name] = result + } + } + return result +} + +async function findAgentHandler (name) { + // const user = this.context.environments._user + // if (!user) { + // return undefined + // } + if (name === '') { + const { fogs: result } = await FogService.getFogListEndPoint([], false, false) + if (result && this.context.environments._agentsByName) { + result.forEach(agent => { + this.context.environments._agentsByName[agent.name] = agent + }) + } + return result + } + if (this.context.environments._agentsByName && this.context.environments._agentsByName[name]) { + return this.context.environments._agentsByName[name] + } + const result = await FogService.getFogEndPoint({ name }, false) + if (result && this.context.environments._agentsByName) { + this.context.environments._agentsByName[result.name] = result + } + return result +} + +async function JSONParser (variable) { + try { + console.log({ variable }) + return JSON.parse(variable) + } catch (e) { + return variable + } +} + +function toStringParser (variable) { + try { + if (typeof variable === 'string') { + return variable + } + if (variable.toString) { + return variable.toString() + } + return JSON.stringify(variable) + } catch (e) { + return variable + } +} +/** + * Add filter findEdgeRessource to template engine. + * user is in liquid context _user + * Syntaxe {{ name findEdgeRessource: version }} + */ +templateEngine.registerFilter('findEdgeResource', findEdgeResourcehandler) +templateEngine.registerFilter('findApplication', findApplicationHandler) +templateEngine.registerFilter('findAgent', findAgentHandler) +templateEngine.registerFilter('findMicroserviceAgent', findMicroserviceAgentHandler) +templateEngine.registerFilter('toNumber', JSONParser) +templateEngine.registerFilter('toBoolean', JSONParser) +templateEngine.registerFilter('toString', toStringParser) + +/** + * Object in depth traversal and right value templateEngine rendering + * @param {*} subjects + * @param {*} templateContext + */ +const rvaluesVarSubstition = async (subjects, templateContext) => { + let context = templateContext + // Due to the recursive nature of this function, user will only be defined on the first iteration + context = { + ...templateContext, + // Private context + // _user: user // need by edge resource and every on demand request + } + + // Create local cache for filters if they do not exists + context._agentsByName = context._agentsByName || {} + context._edgeResourcesByName = context._edgeResourcesByName || {} + context._applicationsByName = context._applicationsByName || {} + + for (let key in subjects) { + try { + if (typeof subjects[key] === 'object') { + await rvaluesVarSubstition(subjects[key], context, null) + } else if (typeof subjects[key] === 'string') { + const result = await templateEngine.parseAndRender(subjects[key], context, { keepOutputType: true }) + subjects[key] = result + } + } catch (e) { + // Trace error in rendering + console.log({ e }) + subjects[key] = e.toString() + } + } + return subjects +} + +const substitutionMiddleware = async (req, res, next) => { + if (['POST', 'PUT', 'PATCH'].indexOf(req.method) > -1) { + // let user + let tmplContext = { + self: req.body, + // Private context + // _user: user // need by edge resource and every on demand request + } + try { + await rvaluesVarSubstition(req.body, tmplContext) + } catch (e) { + next(e) + } + } + next() +} + +module.exports = { + rvaluesVarSubstition, + substitutionMiddleware +} diff --git a/src/logger/index.js b/src/logger/index.js index d855e847..db7461a8 100644 --- a/src/logger/index.js +++ b/src/logger/index.js @@ -89,6 +89,7 @@ const defaultFormat = { let result = {} if (log.req) { + // Create base request info result = Object.assign( result, serializer.req(log.req), @@ -99,10 +100,46 @@ const defaultFormat = { username: log.req.kauth && log.req.kauth.grant && log.req.kauth.grant.access_token && log.req.kauth.grant.access_token.content && log.req.kauth.grant.access_token.content.preferred_username } ) + // Filter request headers + if (result.headers) { + const allowedHeaders = ['content-type', 'content-length', 'user-agent'] + const filteredHeaders = {} + for (const header of allowedHeaders) { + if (result.headers[header]) { + filteredHeaders[header] = result.headers[header] + } + } + result.headers = filteredHeaders + } } if (log.res) { - result = Object.assign(result, serializer.res(log.res)) + // Get serialized response + const serializedRes = serializer.res(log.res) + // Find status code + let statusCode = null + if (log.statusCode !== undefined) { + statusCode = log.statusCode + } else if (log.res.statusCode !== undefined) { + statusCode = log.res.statusCode + } else if (serializedRes.statusCode !== undefined) { + statusCode = serializedRes.statusCode + } + // Filter response headers + if (serializedRes.headers) { + const allowedHeaders = ['content-type', 'content-length', 'x-timestamp', 'etag'] + const filteredHeaders = {} + for (const header of allowedHeaders) { + if (serializedRes.headers[header]) { + filteredHeaders[header] = serializedRes.headers[header] + } + } + serializedRes.headers = filteredHeaders + } + // Add filtered response to result + result = Object.assign(result, serializedRes, { statusCode }) + // Remove body for privacy + delete result.body } return result diff --git a/src/routes/agent.js b/src/routes/agent.js index e2e86e27..f43836fb 100644 --- a/src/routes/agent.js +++ b/src/routes/agent.js @@ -48,7 +48,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -77,7 +77,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -101,7 +101,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -130,7 +130,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -159,7 +159,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -188,7 +188,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -217,7 +217,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -242,7 +242,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -267,7 +267,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -296,7 +296,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -321,7 +321,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -350,7 +350,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -379,7 +379,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -412,7 +412,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -441,7 +441,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -470,7 +470,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -499,7 +499,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -524,7 +524,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -553,7 +553,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -582,7 +582,36 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) + } + }, + { + method: 'get', + path: '/api/v3/agent/cert', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + const getControllerCAEndPoint = ResponseDecorator.handleErrors(AgentController.getControllerCAEndPoint, + successCode, errorCodes) + const responseObject = await getControllerCAEndPoint(req) + + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } } ] diff --git a/src/routes/application.js b/src/routes/application.js index 5185cbfb..0a6e6f60 100644 --- a/src/routes/application.js +++ b/src/routes/application.js @@ -42,7 +42,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -70,7 +70,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -103,7 +103,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -136,7 +136,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -168,7 +168,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) return null }) } @@ -206,7 +206,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -243,7 +243,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -280,7 +280,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -312,7 +312,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -344,7 +344,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/applicationTemplate.js b/src/routes/applicationTemplate.js index ff36626b..f0bae7b2 100644 --- a/src/routes/applicationTemplate.js +++ b/src/routes/applicationTemplate.js @@ -42,7 +42,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -74,7 +74,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -107,7 +107,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -139,7 +139,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -175,7 +175,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -212,7 +212,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -248,7 +248,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -280,7 +280,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/catalog.js b/src/routes/catalog.js index 80134e44..66580b17 100644 --- a/src/routes/catalog.js +++ b/src/routes/catalog.js @@ -46,7 +46,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -87,7 +87,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req, user: user, res: responseObject }) + logger.apiRes({ req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -123,7 +123,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req, user: user, res: responseObject }) + logger.apiRes({ req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -168,7 +168,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req, user: user, res: responseObject }) + logger.apiRes({ req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -204,7 +204,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req, user: user, res: responseObject }) + logger.apiRes({ req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/certificate.js b/src/routes/certificate.js new file mode 100644 index 00000000..2f30cfbc --- /dev/null +++ b/src/routes/certificate.js @@ -0,0 +1,356 @@ +const constants = require('../helpers/constants') +const CertificateController = require('../controllers/certificate-controller') +const ResponseDecorator = require('../decorators/response-decorator') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const keycloak = require('../config/keycloak.js').initKeycloak() + +module.exports = [ + { + method: 'post', + path: '/api/v3/certificates/ca', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createCAEndpoint = ResponseDecorator.handleErrors(CertificateController.createCAEndpoint, successCode, errorCodes) + const responseObject = await createCAEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/certificates/ca/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getCAEndpoint = ResponseDecorator.handleErrors(CertificateController.getCAEndpoint, successCode, errorCodes) + const responseObject = await getCAEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/certificates/ca', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listCAEndpoint = ResponseDecorator.handleErrors(CertificateController.listCAEndpoint, successCode, errorCodes) + const responseObject = await listCAEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/certificates/ca/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteCAEndpoint = ResponseDecorator.handleErrors(CertificateController.deleteCAEndpoint, successCode, errorCodes) + const responseObject = await deleteCAEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/certificates', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createCertificateEndpoint = ResponseDecorator.handleErrors(CertificateController.createCertificateEndpoint, successCode, errorCodes) + const responseObject = await createCertificateEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/certificates/expiring', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listExpiringCertificatesEndpoint = ResponseDecorator.handleErrors(CertificateController.listExpiringCertificatesEndpoint, successCode, errorCodes) + const responseObject = await listExpiringCertificatesEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/certificates/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getCertificateEndpoint = ResponseDecorator.handleErrors(CertificateController.getCertificateEndpoint, successCode, errorCodes) + const responseObject = await getCertificateEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/certificates', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listCertificatesEndpoint = ResponseDecorator.handleErrors(CertificateController.listCertificatesEndpoint, successCode, errorCodes) + const responseObject = await listCertificatesEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/certificates/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteCertificateEndpoint = ResponseDecorator.handleErrors(CertificateController.deleteCertificateEndpoint, successCode, errorCodes) + const responseObject = await deleteCertificateEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/certificates/:name/renew', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const renewCertificateEndpoint = ResponseDecorator.handleErrors(CertificateController.renewCertificateEndpoint, successCode, errorCodes) + const responseObject = await renewCertificateEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/certificates/yaml', + fileInput: 'certificate', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createCertificateFromYamlEndpoint = ResponseDecorator.handleErrors(CertificateController.createCertificateFromYamlEndpoint, successCode, errorCodes) + const responseObject = await createCertificateFromYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + } +] diff --git a/src/routes/config.js b/src/routes/config.js index fba93b48..366d42f2 100644 --- a/src/routes/config.js +++ b/src/routes/config.js @@ -41,7 +41,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -72,7 +72,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -104,7 +104,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/controller.js b/src/routes/controller.js index 3c41255b..36e9fb8f 100644 --- a/src/routes/controller.js +++ b/src/routes/controller.js @@ -31,7 +31,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, { @@ -49,7 +49,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, res: responseObject }) + logger.apiRes({ req: req, res: res, responseObject: responseObject }) } } ] diff --git a/src/routes/diagnostics.js b/src/routes/diagnostics.js index 5c9270a0..49e83a33 100644 --- a/src/routes/diagnostics.js +++ b/src/routes/diagnostics.js @@ -50,7 +50,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -86,7 +86,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) } else { res.writeHead(successCode, { 'Content-Length': responseObject.body['Content-Length'], @@ -133,7 +133,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -168,7 +168,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -211,7 +211,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/edgeResource.js b/src/routes/edgeResource.js index 55558160..0f55c3e3 100644 --- a/src/routes/edgeResource.js +++ b/src/routes/edgeResource.js @@ -41,7 +41,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -72,7 +72,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -103,7 +103,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -139,7 +139,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -174,7 +174,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -206,7 +206,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -237,7 +237,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -268,7 +268,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/flow.js b/src/routes/flow.js index d3e922dd..e9397c04 100644 --- a/src/routes/flow.js +++ b/src/routes/flow.js @@ -41,7 +41,7 @@ module.exports = [ .status(responseObject.code) .send({ flows: responseObject.body.applications }) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -72,7 +72,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -103,7 +103,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -138,7 +138,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -169,7 +169,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/iofog.js b/src/routes/iofog.js index ab7f132a..73ab91ac 100644 --- a/src/routes/iofog.js +++ b/src/routes/iofog.js @@ -45,7 +45,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -77,7 +77,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -113,7 +113,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -144,7 +144,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -175,7 +175,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -206,7 +206,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -241,7 +241,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -276,7 +276,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -307,7 +307,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -337,7 +337,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -372,7 +372,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/microservices.js b/src/routes/microservices.js index a43a8cf4..38bef8d6 100644 --- a/src/routes/microservices.js +++ b/src/routes/microservices.js @@ -44,7 +44,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -71,7 +71,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -103,7 +103,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -136,7 +136,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -167,7 +167,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -198,7 +198,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -229,7 +229,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -265,7 +265,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -301,7 +301,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -336,7 +336,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -371,7 +371,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -408,7 +408,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -439,7 +439,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -474,7 +474,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -509,7 +509,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -544,7 +544,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -579,7 +579,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -610,7 +610,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -641,7 +641,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -672,7 +672,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -706,7 +706,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -744,7 +744,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -782,7 +782,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -820,7 +820,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -858,7 +858,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/registries.js b/src/routes/registries.js index 5f80e3c6..e2d2f8d4 100644 --- a/src/routes/registries.js +++ b/src/routes/registries.js @@ -45,7 +45,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -75,7 +75,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -109,7 +109,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -145,7 +145,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/router.js b/src/routes/router.js index 12a64f9d..cc13fbbd 100644 --- a/src/routes/router.js +++ b/src/routes/router.js @@ -49,7 +49,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -85,7 +85,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/routing.js b/src/routes/routing.js index 105e9add..e44c9d2a 100644 --- a/src/routes/routing.js +++ b/src/routes/routing.js @@ -49,7 +49,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -84,7 +84,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -128,7 +128,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -168,7 +168,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -203,7 +203,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/secret.js b/src/routes/secret.js new file mode 100644 index 00000000..54289adf --- /dev/null +++ b/src/routes/secret.js @@ -0,0 +1,246 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const constants = require('../helpers/constants') +const SecretController = require('../controllers/secret-controller') +const ResponseDecorator = require('../decorators/response-decorator') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const keycloak = require('../config/keycloak.js').initKeycloak() + +module.exports = [ + { + method: 'post', + path: '/api/v3/secrets', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createSecretEndpoint = ResponseDecorator.handleErrors(SecretController.createSecretEndpoint, successCode, errorCodes) + const responseObject = await createSecretEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/secrets/yaml', + fileInput: 'secret', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createSecretFromYamlEndpoint = ResponseDecorator.handleErrors(SecretController.createSecretFromYamlEndpoint, successCode, errorCodes) + const responseObject = await createSecretFromYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'put', + path: '/api/v3/secrets/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateSecretEndpoint = ResponseDecorator.handleErrors(SecretController.updateSecretEndpoint, successCode, errorCodes) + const responseObject = await updateSecretEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'put', + path: '/api/v3/secrets/yaml/:name', + fileInput: 'secret', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateSecretFromYamlEndpoint = ResponseDecorator.handleErrors(SecretController.updateSecretFromYamlEndpoint, successCode, errorCodes) + const responseObject = await updateSecretFromYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/secrets/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getSecretEndpoint = ResponseDecorator.handleErrors(SecretController.getSecretEndpoint, successCode, errorCodes) + const responseObject = await getSecretEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/secrets', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listSecretsEndpoint = ResponseDecorator.handleErrors(SecretController.listSecretsEndpoint, successCode, errorCodes) + const responseObject = await listSecretsEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/secrets/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteSecretEndpoint = ResponseDecorator.handleErrors(SecretController.deleteSecretEndpoint, successCode, errorCodes) + const responseObject = await deleteSecretEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + } +] diff --git a/src/routes/tunnel.js b/src/routes/tunnel.js index e3021a46..40c3b622 100644 --- a/src/routes/tunnel.js +++ b/src/routes/tunnel.js @@ -53,7 +53,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } }, @@ -88,7 +88,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } } diff --git a/src/routes/user.js b/src/routes/user.js index 93eaa1f8..f900af89 100644 --- a/src/routes/user.js +++ b/src/routes/user.js @@ -100,7 +100,7 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) - logger.apiRes({ req: req, user: user, res: responseObject }) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) } }, { diff --git a/src/schemas/certificate.js b/src/schemas/certificate.js new file mode 100644 index 00000000..28591cbd --- /dev/null +++ b/src/schemas/certificate.js @@ -0,0 +1,144 @@ +const caCreate = { + id: '/caCreate', + type: 'object', + properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, + subject: { type: 'string', minLength: 1 }, + expiration: { type: 'integer', minimum: 0 }, + type: { + type: 'string', + enum: ['k8s-secret', 'direct', 'self-signed'] + }, + secretName: { type: 'string' } + }, + required: ['type'], + additionalProperties: false, + allOf: [ + { + if: { properties: { type: { const: 'self-signed' } } }, + then: { required: ['name', 'subject', 'expiration'] } + }, + { + if: { + properties: { + type: { + enum: ['k8s-secret', 'direct'] + } + } + }, + then: { required: ['secretName'] } + } + ] +} + +const certificateCreate = { + id: '/certificateCreate', + type: 'object', + properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, + subject: { type: 'string', minLength: 1 }, + hosts: { type: 'string', minLength: 1 }, + expiration: { type: 'integer', minimum: 0 }, + ca: { + type: 'object', + properties: { + type: { type: 'string', enum: ['k8s-secret', 'direct', 'self-signed'] }, + secretName: { type: 'string' }, + cert: { type: 'string' }, + key: { type: 'string' } + }, + required: ['type'] + } + }, + required: ['name', 'subject', 'hosts'], + additionalProperties: false +} + +const caResponse = { + id: '/caResponse', + type: 'object', + properties: { + name: { type: 'string' }, + subject: { type: 'string' }, + type: { type: 'string' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['name', 'subject', 'type', 'created_at', 'updated_at'], + additionalProperties: false +} + +const certificateResponse = { + id: '/certificateResponse', + type: 'object', + properties: { + name: { type: 'string' }, + subject: { type: 'string' }, + hosts: { type: 'string' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['name', 'subject', 'hosts', 'created_at', 'updated_at'], + additionalProperties: false +} + +const caListResponse = { + id: '/caListResponse', + type: 'object', + properties: { + cas: { + type: 'array', + items: { + type: 'object', + properties: { + name: { type: 'string' }, + subject: { type: 'string' }, + type: { type: 'string' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['name', 'subject', 'type', 'created_at', 'updated_at'], + additionalProperties: false + } + } + }, + required: ['cas'], + additionalProperties: false +} + +const certificateListResponse = { + id: '/certificateListResponse', + type: 'object', + properties: { + certificates: { + type: 'array', + items: { + type: 'object', + properties: { + name: { type: 'string' }, + subject: { type: 'string' }, + hosts: { type: 'string' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['name', 'subject', 'hosts', 'created_at', 'updated_at'], + additionalProperties: false + } + } + }, + required: ['certificates'], + additionalProperties: false +} + +module.exports = { + mainSchemas: [ + caCreate, + certificateCreate, + caResponse, + certificateResponse, + caListResponse, + certificateListResponse + ], + innerSchemas: [] +} + \ No newline at end of file diff --git a/src/schemas/secret.js b/src/schemas/secret.js new file mode 100644 index 00000000..af2ac259 --- /dev/null +++ b/src/schemas/secret.js @@ -0,0 +1,65 @@ +const secretCreate = { + id: '/secretCreate', + type: 'object', + properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, + type: { type: 'string', enum: ['opaque', 'tls'] }, + data: { type: 'object' } + }, + required: ['name', 'type', 'data'], + additionalProperties: false +} + +const secretUpdate = { + id: '/secretUpdate', + type: 'object', + properties: { + data: { type: 'object' } + }, + required: ['data'], + additionalProperties: false +} + +const secretResponse = { + id: '/secretResponse', + type: 'object', + properties: { + id: { type: 'integer' }, + name: { type: 'string' }, + type: { type: 'string', enum: ['opaque', 'tls'] }, + data: { type: 'object' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['id', 'name', 'type', 'data', 'created_at', 'updated_at'], + additionalProperties: false +} + +const secretListResponse = { + id: '/secretListResponse', + type: 'object', + properties: { + secrets: { + type: 'array', + items: { + type: 'object', + properties: { + id: { type: 'integer' }, + name: { type: 'string' }, + type: { type: 'string', enum: ['opaque', 'tls'] }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['id', 'name', 'type', 'created_at', 'updated_at'], + additionalProperties: false + } + } + }, + required: ['secrets'], + additionalProperties: false +} + +module.exports = { + mainSchemas: [secretCreate, secretUpdate, secretResponse, secretListResponse], + innerSchemas: [] +} diff --git a/src/services/agent-service.js b/src/services/agent-service.js index fa2a6f1f..ec416cc9 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -11,6 +11,7 @@ * */ +const config = require('../config') const path = require('path') const fs = require('fs') const formidable = require('formidable') @@ -41,6 +42,7 @@ const MicroserviceService = require('../services/microservices-service') const RouterManager = require('../data/managers/router-manager') const EdgeResourceService = require('./edge-resource-service') const constants = require('../helpers/constants') +const SecretManager = require('../data/managers/secret-manager') const IncomingForm = formidable.IncomingForm const CHANGE_TRACKING_DEFAULT = {} const CHANGE_TRACKING_KEYS = ['config', 'version', 'reboot', 'deleteNode', 'microserviceList', 'microserviceConfig', 'routing', 'registries', 'tunnel', 'diagnostics', 'isImageSnapshot', 'prune', 'routerChanged', 'linkedEdgeResources'] @@ -121,6 +123,9 @@ const _invalidateFogNode = async function (fog, transaction) { const getAgentConfig = async function (fog, transaction) { const router = fog.routerId ? await RouterManager.findOne({ id: fog.routerId }, transaction) : await fog.getRouter() + // Get local agent certificate from secrets + const localAgentSecret = await SecretManager.getSecret(`${fog.uuid}-local-agent`, transaction) + // fog is the result of FogManager.FindOne() in the checkFogToken middleware return { networkInterface: fog.networkInterface, @@ -143,7 +148,10 @@ const getAgentConfig = async function (fog, transaction) { dockerPruningFrequency: fog.dockerPruningFrequency, routerHost: router.host === fog.host ? 'localhost' : router.host, routerPort: router.messagingPort, - timeZone: fog.timeZone + timeZone: fog.timeZone, + caCert: localAgentSecret ? localAgentSecret.data['ca.crt'] : null, + tlsCert: localAgentSecret ? localAgentSecret.data['tls.crt'] : null, + tlsKey: localAgentSecret ? localAgentSecret.data['tls.key'] : null } } @@ -253,7 +261,8 @@ const _updateMicroserviceStatuses = async function (microserviceStatus, fog, tra cpuUsage: status.cpuUsage, memoryUsage: status.memoryUsage, percentage: status.percentage, - errorMessage: status.errorMessage + errorMessage: status.errorMessage, + ipAddress: status.ipAddress } microserviceStatus = AppHelper.deleteUndefinedFields(microserviceStatus) const microservice = await MicroserviceManager.findOne({ @@ -581,6 +590,44 @@ async function _checkMicroservicesFogType (fog, fogTypeId, transaction) { } } +const getControllerCA = async function (fog, transaction) { + const devMode = process.env.DEV_MODE || config.get('server.devMode') + const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') + const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') + const sslCertBase64 = config.get('server.ssl.base64.cert') + const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') + const hasFileBasedSSL = !devMode && sslCert + const hasBase64SSL = !devMode && sslCertBase64 + + if (devMode) { + throw new Errors.ValidationError('Controller is in development mode') + } + + if (hasFileBasedSSL) { + try { + if (intermedKey) { + const certData = fs.readFileSync(intermedKey) + return Buffer.from(certData).toString('base64') + } else { + const certData = fs.readFileSync(sslCert) + return Buffer.from(certData).toString('base64') + } + } catch (error) { + throw new Errors.ValidationError('Failed to read SSL certificate file') + } + } + + if (hasBase64SSL) { + if (intermedKeyBase64) { + return intermedKeyBase64 + } else if (sslCertBase64) { + return sslCertBase64 + } + } + + throw new Errors.ValidationError('No valid SSL certificate configuration found') +} + module.exports = { agentProvision: TransactionDecorator.generateTransaction(agentProvision), agentDeprovision: TransactionDecorator.generateTransaction(agentDeprovision), @@ -601,5 +648,6 @@ module.exports = { deleteNode: TransactionDecorator.generateTransaction(deleteNode), getImageSnapshot: TransactionDecorator.generateTransaction(getImageSnapshot), putImageSnapshot: TransactionDecorator.generateTransaction(putImageSnapshot), - getAgentLinkedEdgeResources: TransactionDecorator.generateTransaction(getAgentLinkedEdgeResources) + getAgentLinkedEdgeResources: TransactionDecorator.generateTransaction(getAgentLinkedEdgeResources), + getControllerCA: TransactionDecorator.generateTransaction(getControllerCA) } diff --git a/src/services/certificate-service.js b/src/services/certificate-service.js new file mode 100644 index 00000000..59d5f139 --- /dev/null +++ b/src/services/certificate-service.js @@ -0,0 +1,605 @@ +const TransactionDecorator = require('../decorators/transaction-decorator') +const SecretService = require('./secret-service') +const CertificateManager = require('../data/managers/certificate-manager') +const SecretManager = require('../data/managers/secret-manager') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const AppHelper = require('../helpers/app-helper') +const Validator = require('../schemas/index') +const { generateSelfSignedCA, storeCA, generateCertificate } = require('../utils/cert') +const config = require('../config') +const forge = require('node-forge') + +// Helper function to check Kubernetes environment +function checkKubernetesEnvironment () { + const isKubernetes = process.env.CONTROL_PLANE || config.get('app.ControlPlane') === 'kubernetes' + if (!isKubernetes) { + throw new Errors.ValidationError(ErrorMessages.NOT_KUBERNETES_ENV) + } +} + +// Helper function to validate CA type +function validateCertType (type) { + if (type === 'k8s-secret') { + checkKubernetesEnvironment() + } else if (type !== 'self-signed' && type !== 'direct') { + throw new Errors.ValidationError(`Invalid CA type: ${type}. Must be one of: self-signed, direct, k8s-secret`) + } +} + +// Parse PEM certificate to extract metadata +function parseCertificate (certPem) { + try { + const cert = forge.pki.certificateFromPem(certPem) + return { + subject: cert.subject.getField('CN') ? cert.subject.getField('CN').value : '', + issuer: cert.issuer.getField('CN') ? cert.issuer.getField('CN').value : '', + validFrom: cert.validity.notBefore, + validTo: cert.validity.notAfter, + serialNumber: cert.serialNumber + } + } catch (error) { + throw new Errors.ValidationError(`Invalid certificate: ${error.message}`) + } +} + +// Helper function to convert months to milliseconds +function monthsToMilliseconds (months) { + // Average month length in milliseconds (30.44 days per month) + const avgMonthInMs = 30.44 * 24 * 60 * 60 * 1000 + return months * avgMonthInMs +} + +// Helper function to handle expiration input +function processExpiration (expiration) { + // If expiration is less than 1000, assume it's in months + // This threshold is chosen because no realistic certificate would expire in less than 1 second + if (expiration && expiration < 1000) { + return monthsToMilliseconds(expiration) + } + // Otherwise, use as-is (assuming milliseconds) + return expiration +} + +async function createCAEndpoint (caData, transaction) { + // Validate input data + const validation = await Validator.validate(caData, Validator.schemas.caCreate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + + // Only process expiration if present (for self-signed) + if (caData.expiration) { + caData.expiration = processExpiration(caData.expiration) + } + // Validate CA type based on environment + validateCertType(caData.type) + + try { + const secretName = caData.type === 'self-signed' ? caData.name : caData.secretName + const existingSecret = await SecretService.getSecretEndpoint(secretName) + if (caData.type === 'self-signed') { + if (existingSecret) { + throw new Errors.ConflictError(`CA with name ${secretName} already exists`) + } + } else { + if (!existingSecret) { + throw new Errors.NotFoundError(`Secret with name ${secretName} does not exist. You must create the secret first.`) + } + // For direct/k8s-secret, check if CA record already exists + const existingCA = await CertificateManager.findCertificateByName(secretName, transaction) + if (existingCA && existingCA.isCA) { + throw new Errors.ConflictError(`CA with name ${secretName} already exists`) + } + } + } catch (error) { + // Only proceed if the error is NotFoundError + if (!(error instanceof Errors.NotFoundError)) { + throw error + } + // For self-signed, NotFoundError is fine (secret doesn't exist yet) + // For direct/k8s-secret, NotFoundError is handled above + } + + let ca + let certDetails + + if (caData.type === 'self-signed') { + ca = await generateSelfSignedCA(caData.subject, caData.expiration) + await storeCA(ca, caData.name) + certDetails = parseCertificate(ca.cert) + } else if (caData.type === 'k8s-secret') { + // Import CA from Kubernetes secret + ca = await require('../utils/cert').getCAFromK8sSecret(caData.secretName) + certDetails = parseCertificate(ca.certificate) + // Store the CA locally with the same name as the secret + await storeCA({ cert: ca.certificate, key: ca.key }, caData.secretName) + } else if (caData.type === 'direct') { + // Load from internal secret + const caObj = await require('../utils/cert').loadCA(caData.secretName) + ca = await require('../utils/cert').getCAFromDirect(caObj) + certDetails = parseCertificate(ca.certificate) + } else { + throw new Errors.ValidationError('Unsupported CA type') + } + + // Get the secret that was just created or referenced + const secret = await SecretManager.findOne({ name: caData.secretName || caData.name }, transaction) + + // Create certificate record in database + await CertificateManager.createCertificateRecord({ + name: caData.secretName || caData.name, // Use secretName if available, otherwise use provided name + subject: certDetails.subject, + isCA: true, + validFrom: certDetails.validFrom, + validTo: certDetails.validTo, + serialNumber: certDetails.serialNumber, + secretId: secret ? secret.id : null + }, transaction) + + return { + name: caData.secretName || caData.name, // Use secretName if available, otherwise use provided name + subject: certDetails.subject, + type: caData.type, + valid_from: certDetails.validFrom, + valid_to: certDetails.validTo + } +} + +async function getCAEndpoint (name, transaction) { + const certRecord = await CertificateManager.findCertificateByName(name, transaction) + + if (!certRecord || !certRecord.isCA) { + throw new Errors.NotFoundError(`CA with name ${name} not found`) + } + + // Get the actual cert data from the secret + const secret = await SecretService.getSecretEndpoint(name) + + if (!secret || secret.type !== 'tls') { + throw new Errors.NotFoundError(`CA with name ${name} not found`) + } + + return { + name: certRecord.name, + subject: certRecord.subject, + is_ca: certRecord.isCA, + valid_from: certRecord.validFrom, + valid_to: certRecord.validTo, + serial_number: certRecord.serialNumber, + data: { + certificate: Buffer.from(secret.data['tls.crt'], 'base64').toString(), + private_key: Buffer.from(secret.data['tls.key'], 'base64').toString() + } + } +} + +async function listCAEndpoint (transaction) { + const caRecords = await CertificateManager.findAllCAs(transaction) + + return { + cas: caRecords.map(ca => ({ + name: ca.name, + subject: ca.subject, + valid_from: ca.validFrom, + valid_to: ca.validTo, + days_remaining: ca.getDaysUntilExpiration(), + is_expired: ca.isExpired() + })) + } +} + +async function deleteCAEndpoint (name, transaction) { + const caRecord = await CertificateManager.findCertificateByName(name, transaction) + + if (!caRecord || !caRecord.isCA) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CA_NOT_FOUND, name)) + } + + // Check if this CA has signed certificates + const signedCerts = await CertificateManager.findCertificatesByCA(caRecord.id, transaction) + + if (signedCerts.length > 0) { + throw new Errors.ValidationError(`Cannot delete CA that has signed certificates. Please delete the following certificates first: ${signedCerts.map(cert => cert.name).join(', ')}`) + } + + // Delete certificate record and the secret + await CertificateManager.deleteCertificate(name, transaction) + await SecretService.deleteSecretEndpoint(name) + + return {} +} + +async function createCertificateEndpoint (certData, transaction) { + // Validate input data + const validation = await Validator.validate(certData, Validator.schemas.certificateCreate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + // Validate CA type based on environment + validateCertType(certData.ca.type) + // Process expiration in months if needed + if (certData.expiration) { + certData.expiration = processExpiration(certData.expiration) + } + + // Check if certificate already exists + try { + const existingSecret = await SecretService.getSecretEndpoint(certData.name) + if (existingSecret) { + throw new Errors.ConflictError(`Certificate with name ${certData.name} already exists`) + } + } catch (error) { + if (!(error instanceof Errors.NotFoundError)) { + throw error + } + } + + // Find signing CA if one is specified + let caRecord = null + if (certData.ca && certData.ca.secretName) { + // Skip CA lookup for self-signed type - it's meant to be self-signed, not signed by another CA + if (certData.ca.type && certData.ca.type.toLowerCase() === 'self-signed') { + // Modify the CA structure to properly indicate self-signed + certData.ca = { type: 'self-signed' } + // Continue with certificate generation + } else { + caRecord = await CertificateManager.findCertificateByName(certData.ca.secretName, transaction) + if (!caRecord || !caRecord.isCA) { + // Log if we're dealing with a k8s-secret type + if (certData.ca.type === 'k8s-secret') { + try { + // Try to directly generate cert with k8s CA - this should invoke getCAFromInput + await generateCertificate({ + name: certData.name, + subject: certData.subject, + hosts: certData.hosts, + expiration: certData.expiration, + ca: certData.ca + }) + + // Get certificate details from newly created secret + const certSecret = await SecretService.getSecretEndpoint(certData.name) + const certPem = Buffer.from(certSecret.data['tls.crt'], 'base64').toString() + const certDetails = parseCertificate(certPem) + + // Find or create the CA record to get its ID + let caId = null + const caRecord = await CertificateManager.findCertificateByName(certData.ca.secretName, transaction) + if (caRecord) { + caId = caRecord.id + } + + // Create certificate record in database + await CertificateManager.createCertificateRecord({ + name: certData.name, + subject: certDetails.subject, + isCA: false, + signedById: caId, + hosts: certData.hosts, + validFrom: certDetails.validFrom, + validTo: certDetails.validTo, + serialNumber: certDetails.serialNumber + }, transaction) + + // Return response with CA name + return { + name: certData.name, + subject: certData.subject, + hosts: certData.hosts, + valid_from: certDetails.validFrom, + valid_to: certDetails.validTo, + ca_name: certData.ca.secretName + } + } catch (error) { + throw error + } + } + throw new Errors.NotFoundError(`CA with name ${certData.ca.secretName} not found`) + } + // Check if CA is expired + if (caRecord.isExpired()) { + throw new Errors.ValidationError(`CA ${certData.ca.secretName} is expired and cannot be used to sign new certificates`) + } + } + } + + // Generate certificate + await generateCertificate({ + name: certData.name, + subject: certData.subject, + hosts: certData.hosts, + expiration: certData.expiration, + ca: certData.ca + }) + + // Get certificate from secret to parse details + const certSecret = await SecretService.getSecretEndpoint(certData.name) + const certPem = Buffer.from(certSecret.data['tls.crt'], 'base64').toString() + const certDetails = parseCertificate(certPem) + + // Create certificate record in database + await CertificateManager.createCertificateRecord({ + name: certData.name, + subject: certDetails.subject, + isCA: false, + signedById: caRecord ? caRecord.id : null, + hosts: certData.hosts, + validFrom: certDetails.validFrom, + validTo: certDetails.validTo, + serialNumber: certDetails.serialNumber + }, transaction) + + return { + name: certData.name, + subject: certData.subject, + hosts: certData.hosts, + valid_from: certDetails.validFrom, + valid_to: certDetails.validTo, + ca_name: caRecord ? caRecord.name : null + } +} + +async function getCertificateEndpoint (name, transaction) { + const certRecord = await CertificateManager.findCertificateByName(name, transaction) + + if (!certRecord) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CERTIFICATE_NOT_FOUND, name)) + } + + // Get the actual cert data from the secret + const secret = await SecretService.getSecretEndpoint(name) + + if (!secret || secret.type !== 'tls') { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CERTIFICATE_NOT_FOUND, name)) + } + + // Get the certificate chain if available + const certChain = await CertificateManager.getCertificateChain(certRecord.id, transaction) + const chainInfo = certChain.length > 1 + ? certChain.slice(1).map(c => ({ name: c.name, subject: c.subject })) + : [] + + return { + name: certRecord.name, + subject: certRecord.subject, + hosts: certRecord.hosts, + is_ca: certRecord.isCA, + valid_from: certRecord.validFrom, + valid_to: certRecord.validTo, + serial_number: certRecord.serialNumber, + ca_name: certRecord.signingCA ? certRecord.signingCA.name : null, + certificate_chain: chainInfo, + days_remaining: certRecord.getDaysUntilExpiration(), + is_expired: certRecord.isExpired(), + data: { + certificate: Buffer.from(secret.data['tls.crt'], 'base64').toString(), + private_key: Buffer.from(secret.data['tls.key'], 'base64').toString() + } + } +} + +async function listCertificatesEndpoint (transaction) { + const certRecords = await CertificateManager.findAllCertificates(transaction) + + return { + certificates: certRecords.map(cert => ({ + name: cert.name, + subject: cert.subject, + hosts: cert.hosts, + is_ca: cert.isCA, + valid_from: cert.validFrom, + valid_to: cert.validTo, + days_remaining: cert.getDaysUntilExpiration(), + is_expired: cert.isExpired(), + ca_name: cert.signingCA ? cert.signingCA.name : null + })) + } +} + +async function deleteCertificateEndpoint (name, transaction) { + const certRecord = await CertificateManager.findCertificateByName(name, transaction) + + if (!certRecord) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CERTIFICATE_NOT_FOUND, name)) + } + + // Check if this is a CA with signed certificates + if (certRecord.isCA) { + const signedCerts = await CertificateManager.findCertificatesByCA(certRecord.id, transaction) + if (signedCerts.length > 0) { + throw new Errors.ValidationError(`Cannot delete CA that has signed certificates. Please delete the following certificates first: ${signedCerts.map(cert => cert.name).join(', ')}`) + } + } + + // Delete certificate record and the secret + await CertificateManager.deleteCertificate(name, transaction) + await SecretService.deleteSecretEndpoint(name) + + return {} +} + +// Phase 3: Renewal Implementation +async function renewCertificateEndpoint (name, transaction) { + try { + // First check if certificate exists in database + let certRecord = await CertificateManager.findCertificateByName(name, transaction) + let isNewRecord = false + + // If no certificate record but secret exists, we'll create a new record + if (!certRecord) { + try { + const secret = await SecretManager.findOne({ name, type: 'tls' }, transaction) + if (secret) { + isNewRecord = true + console.log(`Certificate record not found for ${name}, but secret exists. Will create new record.`) + } else { + throw new Errors.NotFoundError(`Certificate with name ${name} not found`) + } + } catch (error) { + if (error instanceof Errors.NotFoundError) { + throw error + } + throw new Errors.NotFoundError(`Certificate with name ${name} not found: ${error.message}`) + } + } + + // Delete existing secret (if any) - we'll create a new one + try { + await SecretService.deleteSecretEndpoint(name) + } catch (error) { + // Ignore NotFoundError + if (!(error instanceof Errors.NotFoundError)) { + throw error + } + } + + // Prepare renewal data + const renewalData = { + name: name, + subject: certRecord ? certRecord.subject : name, + hosts: certRecord ? certRecord.hosts : null, + isRenewal: true + } + + // Handle signing CA if this certificate was signed by a CA + if (certRecord && certRecord.signedById) { + const signingCA = await CertificateManager.findOne({ id: certRecord.signedById }, transaction) + + if (!signingCA || !signingCA.isCA) { + throw new Errors.NotFoundError(`Signing CA for certificate ${name} not found or is not a valid CA`) + } + + if (signingCA.isExpired()) { + throw new Errors.ValidationError(`CA ${signingCA.name} is expired and cannot be used to renew certificates. Please renew the CA first.`) + } + + renewalData.ca = { + type: 'direct', + secretName: signingCA.name + } + } else { + // Self-signed renewal + renewalData.ca = { + type: 'self-signed' + } + } + + // Generate new certificate + await generateCertificate(renewalData) + + // Get the newly created secret + const secretModel = await SecretManager.findOne({ name }, transaction) + + if (!secretModel) { + throw new Errors.NotFoundError(`Failed to find renewed certificate secret: ${name}`) + } + + // Current date and expiration date + const nowDate = new Date() + const expiryDate = new Date() + expiryDate.setMonth(expiryDate.getMonth() + (certRecord && certRecord.isCA ? 36 : 12)) + + // Use Sequelize transaction for both operations + if (isNewRecord) { + // Create new certificate record + await CertificateManager.create({ + name: name, + subject: renewalData.subject, + hosts: renewalData.hosts, + isCA: renewalData.ca.type === 'self-signed', + validFrom: nowDate, + validTo: expiryDate, + serialNumber: `renewed-${Date.now()}`, + secretId: secretModel.id + }, transaction) + } else { + // Update the existing certificate record + await CertificateManager.update( + { id: certRecord.id }, + { + validFrom: nowDate, + validTo: expiryDate, + secretId: secretModel.id + }, + transaction + ) + } + + // Get the updated certificate record + const updatedCert = await CertificateManager.findCertificateByName(name, transaction) + + if (!updatedCert) { + // If certificate record still doesn't exist, try to create it again with all fields + await CertificateManager.create({ + name: name, + subject: renewalData.subject, + hosts: renewalData.hosts, + isCA: renewalData.ca.type === 'self-signed', + validFrom: nowDate, + validTo: expiryDate, + serialNumber: `renewed-${Date.now()}`, + secretId: secretModel.id + }, transaction) + + // Try to get it again + const newCert = await CertificateManager.findCertificateByName(name, transaction) + if (!newCert) { + throw new Error(`Failed to retrieve or create certificate record for ${name}`) + } + + return { + name: newCert.name, + subject: newCert.subject, + hosts: newCert.hosts, + valid_from: newCert.validFrom, + valid_to: newCert.validTo, + renewed: true + } + } + + return { + name: updatedCert.name, + subject: updatedCert.subject, + hosts: updatedCert.hosts, + valid_from: updatedCert.validFrom, + valid_to: updatedCert.validTo, + renewed: true + } + } catch (error) { + console.error(`Certificate renewal error: ${error.message}`) + throw error + } +} + +// Get certificates expiring soon +async function listExpiringCertificatesEndpoint (days = 30, transaction) { + const expiringCerts = await CertificateManager.findCertificatesForRenewal(days, transaction) + + // Ensure we return an empty array, not null, if no certificates are expiring + return { + certificates: expiringCerts ? expiringCerts.map(cert => ({ + name: cert.name, + subject: cert.subject, + hosts: cert.hosts, + is_ca: cert.isCA, + valid_from: cert.validFrom, + valid_to: cert.validTo, + days_remaining: cert.getDaysUntilExpiration(), + ca_name: cert.signingCA ? cert.signingCA.name : null + })) : [] + } +} + +module.exports = { + createCAEndpoint: TransactionDecorator.generateTransaction(createCAEndpoint), + getCAEndpoint: TransactionDecorator.generateTransaction(getCAEndpoint), + listCAEndpoint: TransactionDecorator.generateTransaction(listCAEndpoint), + deleteCAEndpoint: TransactionDecorator.generateTransaction(deleteCAEndpoint), + createCertificateEndpoint: TransactionDecorator.generateTransaction(createCertificateEndpoint), + getCertificateEndpoint: TransactionDecorator.generateTransaction(getCertificateEndpoint), + listCertificatesEndpoint: TransactionDecorator.generateTransaction(listCertificatesEndpoint), + deleteCertificateEndpoint: TransactionDecorator.generateTransaction(deleteCertificateEndpoint), + renewCertificateEndpoint: TransactionDecorator.generateTransaction(renewCertificateEndpoint), + listExpiringCertificatesEndpoint: TransactionDecorator.generateTransaction(listExpiringCertificatesEndpoint) +} diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index 1cc27db5..7172c1ab 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -11,6 +11,8 @@ * */ +const config = require('../config') +const fs = require('fs') const TransactionDecorator = require('../decorators/transaction-decorator') const AppHelper = require('../helpers/app-helper') const FogManager = require('../data/managers/iofog-manager') @@ -36,6 +38,107 @@ const RouterService = require('./router-service') const Constants = require('../helpers/constants') const Op = require('sequelize').Op const lget = require('lodash/get') +const CertificateService = require('./certificate-service') +const logger = require('../logger') + +const SITE_CA_CERT = 'pot-site-ca' +const DEFAULT_ROUTER_LOCAL_CA = 'default-router-local-ca' + +async function _handleRouterCertificates (fogData, transaction) { + // Helper to check CA existence + async function ensureCA (name, subject) { + try { + await CertificateService.getCAEndpoint(name, transaction) + // CA exists + } catch (err) { + if (err.name === 'NotFoundError') { + await CertificateService.createCAEndpoint({ + name, + subject: `${subject}`, + expiration: 60, // months + type: 'self-signed' + }, transaction) + } else if (err.name === 'ConflictError') { + // Already exists, ignore + } else { + throw err + } + } + } + + // Helper to check cert existence + async function ensureCert (name, subject, hosts, ca) { + try { + await CertificateService.getCertificateEndpoint(name, transaction) + // Cert exists + } catch (err) { + if (err.name === 'NotFoundError') { + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + } else if (err.name === 'ConflictError') { + // Already exists, ignore + } else { + throw err + } + } + } + + // Build hosts string from available fields + const hosts = [ + fogData.host, + fogData.ipAddress, + fogData.ipAddressExternal + ].filter(Boolean).join(',') || 'localhost' + + try { + // Always ensure SITE_CA_CERT exists + await ensureCA(SITE_CA_CERT, SITE_CA_CERT) + + // Always ensure site-server cert exists + await ensureCert( + `${fogData.uuid}-site-server`, + `${fogData.uuid}-site-server`, + hosts, + { type: 'direct', secretName: SITE_CA_CERT } + ) + + // Always ensure local-ca exists + await ensureCA(`${fogData.uuid}-local-ca`, `${fogData.uuid}-local-ca`) + + // Always ensure local-server cert exists + await ensureCert( + `${fogData.uuid}-local-server`, + `${fogData.uuid}-local-server`, + hosts, + { type: 'direct', secretName: `${fogData.uuid}-local-ca` } + ) + + // Always ensure local-agent cert exists + await ensureCert( + `${fogData.uuid}-local-agent`, + `${fogData.uuid}-local-agent`, + hosts, + { type: 'direct', secretName: `${fogData.uuid}-local-ca` } + ) + + // If routerMode is 'none', also ensure DEFAULT_ROUTER_LOCAL_CA and local-agent signed by it + if (fogData.routerMode === 'none') { + await ensureCA(DEFAULT_ROUTER_LOCAL_CA, DEFAULT_ROUTER_LOCAL_CA) + await ensureCert( + `${fogData.uuid}-local-agent`, + `${fogData.uuid}-local-agent`, + hosts, + { type: 'direct', secretName: DEFAULT_ROUTER_LOCAL_CA } + ) + } + } catch (error) { + logger.error('Certificate operation failed:', error) + } +} async function createFogEndPoint (fogData, isCLI, transaction) { await Validator.validate(fogData, Validator.schemas.iofogCreate) @@ -72,6 +175,10 @@ async function createFogEndPoint (fogData, isCLI, transaction) { routerId: null, timeZone: fogData.timeZone } + + // Add certificate handling + await _handleRouterCertificates(fogData, transaction) + createFogData = AppHelper.deleteUndefinedFields(createFogData) // Default router is edge @@ -190,6 +297,9 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) } + // Add certificate handling + await _handleRouterCertificates(fogData, transaction) + // Update tags await _setTags(oldFog, fogData.tags, transaction) @@ -213,13 +323,6 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { const messagingPort = fogData.messagingPort || (router ? router.messagingPort : null) const interRouterPort = fogData.interRouterPort || (router ? router.interRouterPort : null) const edgeRouterPort = fogData.edgeRouterPort || (router ? router.edgeRouterPort : null) - const requireSsl = fogData.requireSsl || (router ? router.requireSsl : null) - const sslProfile = fogData.sslProfile || (router ? router.sslProfile : null) - const saslMechanisms = fogData.saslMechanisms || (router ? router.saslMechanisms : null) - const authenticatePeer = fogData.authenticatePeer || (router ? router.authenticatePeer : null) - const caCert = fogData.caCert || (router ? router.caCert : null) - const tlsCert = fogData.tlsCert || (router ? router.tlsCert : null) - const tlsKey = fogData.tlsKey || (router ? router.tlsKey : null) let networkRouter // const isSystem = updateFogData.isSystem === undefined ? oldFog.isSystem : updateFogData.isSystem @@ -246,7 +349,7 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { } else { // Update existing router networkRouter = await RouterService.updateRouter(router, { - messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host, requireSsl, sslProfile, saslMechanisms, authenticatePeer, caCert, tlsCert, tlsKey + messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host }, upstreamRouters) await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) } @@ -500,9 +603,41 @@ async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { const provisioningKeyData = await FogProvisionKeyManager.updateOrCreate({ iofogUuid: fogData.uuid }, newProvision, transaction) + const devMode = process.env.DEV_MODE || config.get('server.devMode') + const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') + const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') + const sslCertBase64 = config.get('server.ssl.base64.cert') + const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') + const hasFileBasedSSL = !devMode && sslCert + const hasBase64SSL = !devMode && sslCertBase64 + let caCert = '' + + if (!devMode) { + if (hasFileBasedSSL) { + try { + if (intermedKey) { + const certData = fs.readFileSync(intermedKey) + caCert = Buffer.from(certData).toString('base64') + } else { + const certData = fs.readFileSync(sslCert) + caCert = Buffer.from(certData).toString('base64') + } + } catch (error) { + throw new Errors.ValidationError('Failed to read SSL certificate file') + } + } + if (hasBase64SSL) { + if (intermedKeyBase64) { + caCert = intermedKeyBase64 + } else if (sslCertBase64) { + caCert = sslCertBase64 + } + } + } return { key: provisioningKeyData.provisionKey, - expirationTime: provisioningKeyData.expirationTime + expirationTime: provisioningKeyData.expirationTime, + caCert: caCert } } diff --git a/src/services/router-service.js b/src/services/router-service.js index 124089e9..e1d4821c 100644 --- a/src/services/router-service.js +++ b/src/services/router-service.js @@ -28,6 +28,11 @@ const TransactionDecorator = require('../decorators/transaction-decorator') const Validator = require('../schemas') const ldifferenceWith = require('lodash/differenceWith') const constants = require('../helpers/constants') +const MicroserviceEnvManager = require('../data/managers/microservice-env-manager') +const SecretManager = require('../data/managers/secret-manager') + +const SITE_CONFIG_VERSION = 'pot' +const SITE_CONFIG_NAMESPACE = 'datasance' async function validateAndReturnUpstreamRouters (upstreamRouterIds, isSystemFog, defaultRouter, transaction) { if (!upstreamRouterIds) { @@ -56,7 +61,7 @@ async function validateAndReturnUpstreamRouters (upstreamRouterIds, isSystemFog, async function createRouterForFog (fogData, uuid, upstreamRouters, transaction) { const isEdge = fogData.routerMode === 'edge' - const messagingPort = fogData.messagingPort || 5672 + const messagingPort = fogData.messagingPort || 5671 // Is default router if we are on a system fog and no other default router already exists const isDefault = (fogData.isSystem) ? !(await RouterManager.findOne({ isDefault: true }, transaction)) : false const routerData = { @@ -66,23 +71,17 @@ async function createRouterForFog (fogData, uuid, upstreamRouters, transaction) edgeRouterPort: !isEdge ? fogData.edgeRouterPort : null, interRouterPort: !isEdge ? fogData.interRouterPort : null, isDefault: isDefault, - requireSsl: fogData.requireSsl, - sslProfile: fogData.sslProfile, - saslMechanisms: fogData.saslMechanisms, - authenticatePeer: fogData.authenticatePeer, - caCert: fogData.caCert, - tlsCert: fogData.tlsCert, - tlsKey: fogData.tlsKey, iofogUuid: uuid } const router = await RouterManager.create(routerData, transaction) - const microserviceConfig = _getRouterMicroserviceConfig(isEdge, uuid, messagingPort, router.interRouterPort, router.edgeRouterPort, router.saslMechanisms, router.authenticatePeer, router.sslProfile, router.requireSsl, router.caCert, router.tlsCert, router.tlsKey) + const microserviceConfig = await _getRouterMicroserviceConfig(isEdge, uuid, messagingPort, router.interRouterPort, router.edgeRouterPort, transaction) for (const upstreamRouter of upstreamRouters) { await RouterConnectionManager.create({ sourceRouter: router.id, destRouter: upstreamRouter.id }, transaction) - microserviceConfig.connectors = (microserviceConfig.connectors || []).concat(_getRouterConnectorConfig(isEdge, upstreamRouter, router.sslProfile, router.saslMechanisms)) + const connectorConfig = _getRouterConnectorConfig(isEdge, upstreamRouter, uuid) + microserviceConfig.connectors[connectorConfig.name] = connectorConfig } const routerMicroservice = await _createRouterMicroservice(isEdge, uuid, microserviceConfig, transaction) @@ -120,7 +119,7 @@ async function updateRouter (oldRouter, newRouterData, upstreamRouters, transact await _createRouterPorts(routerMicroservice.uuid, newRouterData.edgeRouterPort, transaction) await _createRouterPorts(routerMicroservice.uuid, newRouterData.interRouterPort, transaction) } - newRouterData.messagingPort = newRouterData.messagingPort || 5672 + newRouterData.messagingPort = newRouterData.messagingPort || 5671 await RouterManager.update({ id: oldRouter.id }, newRouterData, transaction) // Update upstream routers @@ -163,42 +162,91 @@ async function _deleteRouterPorts (routerMicroserviceUuid, port, transaction) { await MicroservicePortManager.delete({ microserviceUuid: routerMicroserviceUuid, portInternal: port }, transaction) } +async function _updateRouterPorts (routerMicroserviceUuid, router, transaction) { + await MicroservicePortManager.delete({ microserviceUuid: routerMicroserviceUuid }, transaction) + await _createRouterPorts(routerMicroserviceUuid, router.messagingPort, transaction) + if (!router.isEdge) { + await _createRouterPorts(routerMicroserviceUuid, router.edgeRouterPort, transaction) + await _createRouterPorts(routerMicroserviceUuid, router.interRouterPort, transaction) + } +} + async function updateConfig (routerID, transaction) { const router = await RouterManager.findOne({ id: routerID }, transaction) if (!router) { throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, routerID)) } - const microserviceConfig = _getRouterMicroserviceConfig(router.isEdge, router.iofogUuid, router.messagingPort, router.interRouterPort, router.edgeRouterPort, router.saslMechanisms, router.authenticatePeer, router.sslProfile, router.requireSsl, router.caCert, router.tlsCert, router.tlsKey) - const upstreamRoutersConnections = await RouterConnectionManager.findAllWithRouters({ sourceRouter: router.id }, transaction) - - for (const upstreamRouterConnection of upstreamRoutersConnections) { - microserviceConfig.connectors = (microserviceConfig.connectors || []).concat(_getRouterConnectorConfig(router.isEdge, upstreamRouterConnection.dest, router.sslProfile, router.saslMechanisms)) - } + // Get current configuration const routerCatalog = await CatalogService.getRouterCatalogItem(transaction) const routerMicroservice = await MicroserviceManager.findOne({ catalogItemId: routerCatalog.id, iofogUuid: router.iofogUuid }, transaction) + if (!routerMicroservice) { throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, router.id)) } - if (routerMicroservice.config !== JSON.stringify(microserviceConfig)) { - await MicroserviceManager.update({ uuid: routerMicroservice.uuid }, { config: JSON.stringify(microserviceConfig) }, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') - if (_listenersChanged(JSON.parse(routerMicroservice.config || '{}').listeners, microserviceConfig.listeners)) { - MicroservicePortManager.delete({ microserviceUuid: routerMicroservice.uuid }, transaction) - await _createRouterPorts(routerMicroservice.uuid, router.messagingPort, transaction) - if (!router.isEdge) { - await _createRouterPorts(routerMicroservice.uuid, router.edgeRouterPort, transaction) - await _createRouterPorts(routerMicroservice.uuid, router.interRouterPort, transaction) - } - await MicroserviceManager.update({ uuid: routerMicroservice.uuid }, { rebuild: true }, transaction) - await ChangeTrackingService.update(router.iofogUuid, ChangeTrackingService.events.microserviceList, transaction) + // Generate new configuration + const newConfig = await _getRouterMicroserviceConfig( + router.isEdge, + router.iofogUuid, + router.messagingPort, + router.interRouterPort, + router.edgeRouterPort, + transaction + ) + + // Add connectors for upstream routers + const upstreamRoutersConnections = await RouterConnectionManager.findAllWithRouters( + { sourceRouter: router.id }, + transaction + ) + + for (const upstreamRouterConnection of upstreamRoutersConnections) { + const connectorConfig = _getRouterConnectorConfig( + router.isEdge, + upstreamRouterConnection.dest, + router.iofogUuid + ) + newConfig.connectors[connectorConfig.name] = connectorConfig + } + + // Check if configuration needs update + if (JSON.stringify(currentConfig) !== JSON.stringify(newConfig)) { + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(newConfig) }, + transaction + ) + + // Check if listeners changed + if (_listenersChanged(currentConfig.listeners, newConfig.listeners)) { + await _updateRouterPorts(routerMicroservice.uuid, router, transaction) + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { rebuild: true }, + transaction + ) + await ChangeTrackingService.update( + router.iofogUuid, + ChangeTrackingService.events.microserviceList, + transaction + ) } else { - await MicroserviceManager.update({ uuid: routerMicroservice.uuid }, { rebuild: true }, transaction) - await ChangeTrackingService.update(router.iofogUuid, ChangeTrackingService.events.microserviceConfig, transaction) + // await MicroserviceManager.update( + // { uuid: routerMicroservice.uuid }, + // { rebuild: true }, + // transaction + // ) + await ChangeTrackingService.update( + router.iofogUuid, + ChangeTrackingService.events.microserviceConfig, + transaction + ) } } } @@ -218,6 +266,11 @@ function _listenersChanged (currentListeners, newListeners) { } function _createRouterPorts (routerMicroserviceUuid, port, transaction) { + // Skip port mapping for default AMQP listener (5672) + if (port === 5672) { + return Promise.resolve() + } + const mappingData = { isPublic: false, portInternal: port, @@ -244,7 +297,21 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran iofogUuid: uuid, rootHostAccess: false, logSize: constants.MICROSERVICE_DEFAULT_LOG_SIZE, - configLastUpdated: Date.now() + configLastUpdated: Date.now(), + env: [ + { + key: 'QDROUTERD_CONF', + value: '/home/runner/skupper-router-certs/skrouterd.json' + }, + { + key: 'QDROUTERD_CONF_TYPE', + value: 'json' + }, + { + key: 'SKUPPER_SITE_ID', + value: uuid + } + ] } const capAddValues = [ @@ -262,77 +329,145 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran capAdd: capAdd.capAdd }, transaction) } + + // Create environment variables + for (const env of routerMicroserviceData.env) { + await MicroserviceEnvManager.create({ + microserviceUuid: routerMicroserviceData.uuid, + key: env.key, + value: env.value + }, transaction) + } + return routerMicroservice } -function _getRouterConnectorConfig (isEdge, dest, sslProfile, saslMechanisms) { +function _getRouterConnectorConfig (isEdge, dest, uuid) { const config = { name: dest.iofogUuid || Constants.DEFAULT_ROUTER_NAME, role: isEdge ? 'edge' : 'inter-router', host: dest.host, - port: isEdge ? dest.edgeRouterPort : dest.interRouterPort + port: (isEdge ? dest.edgeRouterPort : dest.interRouterPort).toString(), + sslProfile: `${uuid}-site-server` } - if (sslProfile) { - config.sslProfile = sslProfile + return config +} + +async function _getRouterMicroserviceConfig (isEdge, uuid, messagingPort, interRouterPort, edgeRouterPort, transaction) { + const config = { + addresses: { + mc: { + prefix: 'mc', + distribution: 'multicast' + } + }, + bridges: { + tcpConnectors: {}, + tcpListeners: {} + }, + connectors: {}, + listeners: {}, + logConfig: { + ROUTER_CORE: { + enable: 'error+', + module: 'ROUTER_CORE' + } + }, + metadata: { + helloMaxAgeSeconds: '3', + id: uuid, + mode: isEdge ? 'edge' : 'interior' + }, + siteConfig: { + name: uuid, + namespace: SITE_CONFIG_NAMESPACE, + platform: 'docker', + version: SITE_CONFIG_VERSION + }, + sslProfiles: {} } - if (saslMechanisms) { - config.saslMechanisms = saslMechanisms + // Get SSL secrets for all profiles + const siteServerSecret = await SecretManager.getSecret(`${uuid}-site-server`, transaction) + const localServerSecret = await SecretManager.getSecret(`${uuid}-local-server`, transaction) + const localAgentSecret = await SecretManager.getSecret(`${uuid}-local-agent`, transaction) + + // Add SSL profiles + if (siteServerSecret) { + config.sslProfiles[`${uuid}-site-server`] = { + CaCert: siteServerSecret.data['ca.crt'], + TlsCert: siteServerSecret.data['tls.crt'], + TlsKey: siteServerSecret.data['tls.key'], + name: `${uuid}-site-server` + } } - return config -} + if (localServerSecret) { + config.sslProfiles[`${uuid}-local-server`] = { + CaCert: localServerSecret.data['ca.crt'], + TlsCert: localServerSecret.data['tls.crt'], + TlsKey: localServerSecret.data['tls.key'], + name: `${uuid}-local-server` + } + } -function _getRouterMicroserviceConfig (isEdge, uuid, messagingPort, interRouterPort, edgeRouterPort, saslMechanisms, authenticatePeer, sslProfile, requireSsl, caCert, tlsCert, tlsKey) { - const microserviceConfig = { - mode: isEdge ? 'edge' : 'interior', - id: uuid, - listeners: [ - { - role: 'normal', - host: '0.0.0.0', - port: messagingPort - } - ] + if (localAgentSecret) { + config.sslProfiles[`${uuid}-local-agent`] = { + CaCert: localAgentSecret.data['ca.crt'], + TlsCert: localAgentSecret.data['tls.crt'], + TlsKey: localAgentSecret.data['tls.key'], + name: `${uuid}-local-agent` + } } - // Conditionally add sslProfiles - if (sslProfile && tlsCert && tlsKey) { - microserviceConfig.sslProfiles = [ - { - name: sslProfile, - tlsCert: tlsCert, - tlsKey: tlsKey, - ...(caCert && { caCert }) // Add caCert if provided - } - ] + // Add default AMQP listener (internal) + config.listeners[`${uuid}-amqp`] = { + host: '0.0.0.0', + name: `${uuid}-amqp`, + port: 5672, + role: 'normal' } + // Add AMQPS listener + const amqpsListener = { + host: '0.0.0.0', + name: `${uuid}-amqps`, + port: messagingPort, + role: 'normal', + authenticatePeer: true, + saslMechanisms: 'EXTERNAL', + sslProfile: `${uuid}-local-server` + } + config.listeners[`${uuid}-amqps`] = amqpsListener + if (!isEdge) { - microserviceConfig.listeners.push( - { - role: 'inter-router', - host: '0.0.0.0', - port: interRouterPort, - ...(saslMechanisms && { saslMechanisms }), // Add saslMechanisms if provided - ...(authenticatePeer && { authenticatePeer }), // Add authenticatePeer if provided - ...(sslProfile && { sslProfile }), // Add sslProfile if provided - ...(requireSsl && { requireSsl }) // Add requireSsl if provided - }, - { - role: 'edge', - host: '0.0.0.0', - port: edgeRouterPort, - ...(saslMechanisms && { saslMechanisms }), // Add saslMechanisms if provided - ...(authenticatePeer && { authenticatePeer }), // Add authenticatePeer if provided - ...(sslProfile && { sslProfile }), // Add sslProfile if provided - ...(requireSsl && { requireSsl }) // Add requireSsl if provided - } - ) + // Add inter-router listener + const interRouterListener = { + host: '0.0.0.0', + name: `${uuid}-inter-router`, + port: interRouterPort, + role: 'inter-router', + authenticatePeer: true, + saslMechanisms: 'EXTERNAL', + sslProfile: `${uuid}-site-server` + } + config.listeners[`${uuid}-inter-router`] = interRouterListener + + // Add edge listener + const edgeListener = { + host: '0.0.0.0', + name: `${uuid}-edge`, + port: edgeRouterPort, + role: 'edge', + authenticatePeer: true, + saslMechanisms: 'EXTERNAL', + sslProfile: `${uuid}-site-server` + } + config.listeners[`${uuid}-edge`] = edgeListener } - return microserviceConfig + return config } async function getNetworkRouter (networkRouterId, transaction) { @@ -364,10 +499,10 @@ async function upsertDefaultRouter (routerData, transaction) { const createRouterData = { isEdge: false, - messagingPort: routerData.messagingPort || 5672, + messagingPort: routerData.messagingPort || 5671, host: routerData.host, - edgeRouterPort: routerData.edgeRouterPort || 56722, - interRouterPort: routerData.interRouterPort || 56721, + edgeRouterPort: routerData.edgeRouterPort || 45671, + interRouterPort: routerData.interRouterPort || 55671, isDefault: true } diff --git a/src/services/secret-service.js b/src/services/secret-service.js new file mode 100644 index 00000000..16084702 --- /dev/null +++ b/src/services/secret-service.js @@ -0,0 +1,136 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const TransactionDecorator = require('../decorators/transaction-decorator') +const SecretManager = require('../data/managers/secret-manager') +const AppHelper = require('../helpers/app-helper') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const Validator = require('../schemas/index') + +function validateBase64 (value) { + try { + const decoded = Buffer.from(value, 'base64').toString('utf-8') + const reencoded = Buffer.from(decoded).toString('base64') + return reencoded === value + } catch (error) { + return false + } +} + +function validateSecretData (type, data) { + if (type === 'tls') { + const invalidKeys = Object.entries(data) + .filter(([_, value]) => !validateBase64(value)) + .map(([key]) => key) + + if (invalidKeys.length > 0) { + throw new Errors.ValidationError( + `Invalid base64 encoding for keys: ${invalidKeys.join(', ')}` + ) + } + } +} + +async function createSecretEndpoint (secretData, transaction) { + const validation = await Validator.validate(secretData, Validator.schemas.secretCreate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + + validateSecretData(secretData.type, secretData.data) + + const existingSecret = await SecretManager.findOne({ name: secretData.name }, transaction) + if (existingSecret) { + throw new Errors.ConflictError(AppHelper.formatMessage(ErrorMessages.SECRET_ALREADY_EXISTS, secretData.name)) + } + + const secret = await SecretManager.createSecret(secretData.name, secretData.type, secretData.data, transaction) + return { + id: secret.id, + name: secret.name, + type: secret.type, + created_at: secret.created_at, + updated_at: secret.updated_at + } +} + +async function updateSecretEndpoint (secretName, secretData, transaction) { + const validation = await Validator.validate(secretData, Validator.schemas.secretUpdate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + + const existingSecret = await SecretManager.findOne({ name: secretName }, transaction) + if (!existingSecret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, secretName)) + } + + validateSecretData(existingSecret.type, secretData.data) + + const secret = await SecretManager.updateSecret(secretName, secretData.data, transaction) + return { + id: secret.id, + name: secret.name, + type: secret.type, + created_at: secret.created_at, + updated_at: secret.updated_at + } +} + +async function getSecretEndpoint (secretName, transaction) { + const secret = await SecretManager.getSecret(secretName, transaction) + if (!secret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, secretName)) + } + + return { + id: secret.id, + name: secret.name, + type: secret.type, + data: secret.data, + created_at: secret.created_at, + updated_at: secret.updated_at + } +} + +async function listSecretsEndpoint (transaction) { + const secrets = await SecretManager.listSecrets(transaction) + return { + secrets: secrets.map(secret => ({ + id: secret.id, + name: secret.name, + type: secret.type, + created_at: secret.created_at, + updated_at: secret.updated_at + })) + } +} + +async function deleteSecretEndpoint (secretName, transaction) { + const existingSecret = await SecretManager.findOne({ name: secretName }, transaction) + if (!existingSecret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, secretName)) + } + + await SecretManager.deleteSecret(secretName, transaction) + return {} +} + +module.exports = { + createSecretEndpoint: TransactionDecorator.generateTransaction(createSecretEndpoint), + updateSecretEndpoint: TransactionDecorator.generateTransaction(updateSecretEndpoint), + getSecretEndpoint: TransactionDecorator.generateTransaction(getSecretEndpoint), + listSecretsEndpoint: TransactionDecorator.generateTransaction(listSecretsEndpoint), + deleteSecretEndpoint: TransactionDecorator.generateTransaction(deleteSecretEndpoint) +} diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index bdbf1ea5..aa33472c 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -44,6 +44,45 @@ async function parseAppTemplateFile (fileContent) { return appTemplate } +async function parseSecretFile (fileContent, options = {}) { + try { + const doc = yaml.load(fileContent) + if (!doc || !doc.kind) { + throw new Errors.ValidationError(`Invalid YAML format: missing kind field`) + } + if (doc.kind !== 'Secret') { + throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) + } + if (doc.metadata == null || doc.spec == null) { + throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') + } + + // If this is an update, validate that the name matches + if (options.isUpdate && options.secretName) { + if (doc.metadata.name !== options.secretName) { + throw new Errors.ValidationError(`Secret name in YAML (${doc.metadata.name}) doesn't match endpoint path (${options.secretName})`) + } + + // For updates, we only need the data + return { + data: doc.spec.data + } + } + + // For creates, return full object + return { + name: lget(doc, 'metadata.name', undefined), + type: doc.spec.type, + data: doc.spec.data + } + } catch (error) { + if (error instanceof Errors.ValidationError) { + throw error + } + throw new Errors.ValidationError(`Error parsing YAML: ${error.message}`) + } +} + const mapImages = (images) => { const imgs = [] if (images.x86 != null) { @@ -80,14 +119,15 @@ const parseMicroserviceImages = async (fileImages) => { const parseMicroserviceYAML = async (microservice) => { const { registryId, catalogItemId, images } = await parseMicroserviceImages(microservice.images) + const container = microservice.container || {} const microserviceData = { config: microservice.config != null ? JSON.stringify(microservice.config) : undefined, name: microservice.name, catalogItemId, agentName: lget(microservice, 'agent.name'), registryId, - ...microservice.container, - annotations: microservice.container.annotations != null ? JSON.stringify(microservice.container.annotations) : undefined, + ...container, + annotations: container.annotations != null ? JSON.stringify(container.annotations) : undefined, capAdd: lget(microservice, 'container.capAdd', []), capDrop: lget(microservice, 'container.capDrop', []), ports: (lget(microservice, 'container.ports', [])), @@ -140,8 +180,41 @@ async function parseMicroserviceFile (fileContent) { const _deleteUndefinedFields = (obj) => Object.keys(obj).forEach(key => obj[key] === undefined && delete obj[key]) +async function parseCertificateFile (fileContent) { + try { + const doc = yaml.load(fileContent) + if (!doc || !doc.kind) { + throw new Errors.ValidationError(`Invalid YAML format: missing kind field`) + } + if (doc.kind !== 'Certificate' && doc.kind !== 'CertificateAuthority') { + throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) + } + if (doc.metadata == null || doc.spec == null) { + throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') + } + + const result = { + name: lget(doc, 'metadata.name', undefined), + ...doc.spec + } + + if (doc.kind === 'CertificateAuthority') { + result.isCA = true + } + + return result + } catch (error) { + if (error instanceof Errors.ValidationError) { + throw error + } + throw new Errors.ValidationError(`Error parsing YAML: ${error.message}`) + } +} + module.exports = { parseAppTemplateFile: parseAppTemplateFile, parseAppFile: parseAppFile, - parseMicroserviceFile: parseMicroserviceFile + parseMicroserviceFile: parseMicroserviceFile, + parseSecretFile: parseSecretFile, + parseCertificateFile: parseCertificateFile } diff --git a/src/utils/cert.js b/src/utils/cert.js new file mode 100644 index 00000000..91398b5f --- /dev/null +++ b/src/utils/cert.js @@ -0,0 +1,518 @@ +const forge = require('node-forge') +const k8sClient = require('./k8s-client') + +// Types for CA input +const CA_TYPES = { + K8S_SECRET: 'k8s-secret', + DIRECT: 'direct', + SELF_SIGNED: 'self-signed' +} + +/** + * Certificate Authority class + * Holds certificate, private key, and certificate data + */ +class CertificateAuthority { + constructor (certificate, key, crtData) { + this.certificate = certificate + this.key = key + this.crtData = crtData + } + + // Get certificate in PEM format + get certPem () { + return this.certificate + } +} + +/** + * CA Storage Format + * @typedef {Object} CAStorage + * @property {string} cert - PEM encoded certificate + * @property {string} key - PEM encoded private key + */ + +/** + * Validates a CA certificate and key pair + * @param {string} cert - PEM encoded certificate + * @param {string} key - PEM encoded private key + * @returns {boolean} - True if valid + * @throws {Error} - If validation fails + */ +async function validateCA (cert, key) { + try { + // Convert PEM to forge objects + const forgeCert = forge.pki.certificateFromPem(cert) + const forgeKey = forge.pki.privateKeyFromPem(key) + + // Extract public key from the certificate + const certPublicKey = forgeCert.publicKey + + // Create a message to test the keys + const md = forge.md.sha256.create() + md.update('test', 'utf8') + + // Sign with private key + const signature = forge.util.encode64( + forgeKey.sign(md) + ) + + // Verify with the certificate's public key + const verified = certPublicKey.verify( + md.digest().getBytes(), + forge.util.decode64(signature) + ) + + if (!verified) { + throw new Error('Private key does not match certificate') + } + + return true + } catch (error) { + throw new Error(`CA validation failed: ${error.message}`) + } +} + +/** + * Stores CA certificate and key to internal secret storage + * @param {CAStorage} ca - CA data to store + * @param {string} name - Name of the secret + * @returns {Promise} + */ +async function storeCA (ca, name) { + try { + // Ensure data is in base64 format for TLS secrets + const secretData = { + 'tls.crt': Buffer.from(ca.cert).toString('base64'), + 'tls.key': Buffer.from(ca.key).toString('base64'), + 'ca.crt': Buffer.from(ca.cert).toString('base64') + } + + const secret = { + name: name, + type: 'tls', + data: secretData + } + + // Use the secret service to store the CA + const SecretService = require('../services/secret-service') + await SecretService.createSecretEndpoint(secret) + } catch (error) { + throw new Error(`Failed to store CA: ${error.message}`) + } +} + +/** + * Loads CA certificate and key from internal secret storage + * @param {string} name - Name of the secret + * @returns {Promise} + */ +async function loadCA (name) { + try { + // Use SecretManager to get the secret with decryption handling + const SecretManager = require('../data/managers/secret-manager') + const fakeTransaction = { fakeTransaction: true } + + const secret = await SecretManager.getSecret(name, fakeTransaction) + if (!secret) { + throw new Error(`TLS secret with name ${name} not found`) + } + + if (secret.type !== 'tls') { + throw new Error(`Secret ${name} is not a TLS secret`) + } + + if (!secret.data || !secret.data['tls.crt'] || !secret.data['tls.key']) { + throw new Error(`Invalid TLS secret data for ${name}`) + } + + // Convert base64 data back to PEM format + return { + cert: Buffer.from(secret.data['tls.crt'], 'base64').toString(), + key: Buffer.from(secret.data['tls.key'], 'base64').toString() + } + } catch (error) { + throw new Error(`Failed to load CA: ${error.message}`) + } +} + +/** + * Generates a self-signed CA certificate + * @param {string} subject - CA subject name + * @param {number} expiration - Expiration time in milliseconds + * @returns {Promise} + */ +async function generateSelfSignedCA (subject, expiration = 5 * 365 * 24 * 60 * 60 * 1000) { + try { + // Generate RSA key pair + const keys = forge.pki.rsa.generateKeyPair(2048) + + // Create a certificate + const cert = forge.pki.createCertificate() + + // Set certificate fields + cert.publicKey = keys.publicKey + cert.serialNumber = forge.util.bytesToHex(forge.random.getBytesSync(16)) + + // Set validity period + const now = new Date() + cert.validity.notBefore = now + cert.validity.notAfter = new Date(now.getTime() + expiration) + + // Parse the subject string (format: /CN=Subject Name) + const subjectAttrs = [] + const issuerAttrs = [] + + // Extract CN from subject string + let commonName = subject + if (subject.startsWith('/CN=')) { + commonName = subject.substring(4) + } + + subjectAttrs.push({ name: 'commonName', value: commonName }) + issuerAttrs.push({ name: 'commonName', value: commonName }) + + cert.setSubject(subjectAttrs) + cert.setIssuer(issuerAttrs) // Self-signed, so issuer = subject + + // Add extensions for a CA certificate + cert.setExtensions([ + { + name: 'basicConstraints', + cA: true, + critical: true + }, + { + name: 'keyUsage', + keyCertSign: true, + cRLSign: true, + critical: true + }, + { + name: 'subjectKeyIdentifier' + } + ]) + + // Self-sign the certificate with SHA-256 + cert.sign(keys.privateKey, forge.md.sha256.create()) + + // Convert to PEM + const certPem = forge.pki.certificateToPem(cert) + const keyPem = forge.pki.privateKeyToPem(keys.privateKey) + + return { + cert: certPem, + key: keyPem + } + } catch (error) { + throw new Error(`Failed to generate certificate: ${error.message}`) + } +} + +// CA handling functions +async function getCAFromK8sSecret (secretName) { + try { + // Check that k8sClient is properly required and available + if (!k8sClient) { + throw new Error('Kubernetes client not available') + } + const secret = await k8sClient.getSecret(secretName) + if (!secret) { + return null + } + if (!secret.data) { + return null + } + if (!secret.data['tls.crt'] || !secret.data['tls.key']) { + return null + } + + const cert = Buffer.from(secret.data['tls.crt'], 'base64').toString() + const key = Buffer.from(secret.data['tls.key'], 'base64').toString() + + // Check if we need to register this CA in our local database + try { + // Use SecretManager to check if there's a local secret + const SecretManager = require('../data/managers/secret-manager') + const localSecret = await SecretManager.findOne({ name: secretName }, { fakeTransaction: true }) + + // If no local secret, we need to create one + if (!localSecret) { + // Store the CA in local secret storage + await storeCA({ cert, key }, secretName) + // Also create a certificate record + const CertificateManager = require('../data/managers/certificate-manager') + const forge = require('node-forge') + const forgeCert = forge.pki.certificateFromPem(cert) + // Extract subject + const subject = forgeCert.subject.getField('CN') ? forgeCert.subject.getField('CN').value : secretName + + // Create CA record + await CertificateManager.createCertificateRecord({ + name: secretName, + subject: subject, + isCA: true, + validFrom: forgeCert.validity.notBefore, + validTo: forgeCert.validity.notAfter, + serialNumber: forgeCert.serialNumber + }, { fakeTransaction: true }) + } + } catch (dbError) { + // Continue anyway - we at least have the cert/key + } + + return new CertificateAuthority( + cert, + key, + cert + ) + } catch (error) { + throw new Error(`Failed to get CA from Kubernetes secret: ${error.message}`) + } +} + +async function getCAFromDirect (ca) { + if (!ca.cert || !ca.key) { + throw new Error('CA must provide both certificate and private key in PEM format') + } + + try { + // Validate the CA + await validateCA(ca.cert, ca.key) + + return new CertificateAuthority(ca.cert, ca.key, ca.cert) + } catch (error) { + throw new Error(`failed to get CA from direct input: ${error.message}`) + } +} + +async function getCAFromInput (ca) { + if (!ca) { + return null + } + + // Normalize CA type to lowercase for case-insensitive matching + const caType = ca.type ? ca.type.toLowerCase() : '' + + switch (caType) { + case CA_TYPES.K8S_SECRET.toLowerCase(): + return getCAFromK8sSecret(ca.secretName) + case CA_TYPES.DIRECT.toLowerCase(): + if (ca.secretName) { + // If secretName is provided, load from internal secret storage + const caData = await loadCA(ca.secretName) + return getCAFromDirect(caData) + } + return getCAFromDirect(ca) + case CA_TYPES.SELF_SIGNED.toLowerCase(): + return null + default: + throw new Error(`unknown CA type: ${caType}. Expected one of: ${Object.values(CA_TYPES).join(', ')}`) + } +} + +/** + * Main certificate generation function + * @param {Object} params - Certificate parameters + * @returns {Promise} - Certificate data + */ +async function generateCertificate ({ + name, + subject, + hosts, + expiration = 5 * 365 * 24 * 60 * 60 * 1000, + ca, + isRenewal = false +}) { + try { + const caCert = await getCAFromInput(ca) + + // Generate RSA key pair + const keys = forge.pki.rsa.generateKeyPair(2048) + + // Create a certificate + const cert = forge.pki.createCertificate() + + // Set certificate fields + cert.publicKey = keys.publicKey + cert.serialNumber = forge.util.bytesToHex(forge.random.getBytesSync(16)) + + // Set validity period + const now = new Date() + cert.validity.notBefore = now + cert.validity.notAfter = new Date(now.getTime() + expiration) + + // Parse the subject string (format: /CN=Subject Name) + const subjectAttrs = [] + + // Extract CN from subject string + let commonName = subject + if (subject.startsWith('/CN=')) { + commonName = subject.substring(4) + } + + subjectAttrs.push({ name: 'commonName', value: commonName }) + cert.setSubject(subjectAttrs) + + // Process hosts for Subject Alternative Names + const hostsList = hosts ? hosts.split(',').map(h => h.trim()) : [] + const altNames = [] + + for (const host of hostsList) { + if (host.match(/^(\d{1,3}\.){3}\d{1,3}$/)) { + // IP address + altNames.push({ type: 7, ip: host }) + } else { + // DNS name + altNames.push({ type: 2, value: host }) + } + } + + // Set up the certificate based on whether we have a CA or not + if (caCert) { + // If we have a CA, use it to sign the certificate + const caForgeCert = forge.pki.certificateFromPem(caCert.certPem || caCert.crtData) + const caForgeKey = forge.pki.privateKeyFromPem(caCert.key) + + // Set the issuer from the CA + cert.setIssuer(caForgeCert.subject.attributes) + + // Add extensions for a server certificate + cert.setExtensions([ + { + name: 'basicConstraints', + cA: false, + critical: true + }, + { + name: 'keyUsage', + digitalSignature: true, + keyEncipherment: true, + critical: true + }, + { + name: 'extKeyUsage', + serverAuth: true, + clientAuth: true + }, + { + name: 'subjectAltName', + altNames: altNames + }, + { + name: 'authorityKeyIdentifier', + authorityCertIssuer: true, + serialNumber: caForgeCert.serialNumber + } + ]) + + // Sign the certificate with the CA's private key + cert.sign(caForgeKey, forge.md.sha256.create()) + } else { + // Self-signed certificate + cert.setIssuer(subjectAttrs) + + // Add extensions for a self-signed server certificate + cert.setExtensions([ + { + name: 'basicConstraints', + cA: false, + critical: true + }, + { + name: 'keyUsage', + digitalSignature: true, + keyEncipherment: true, + critical: true + }, + { + name: 'extKeyUsage', + serverAuth: true, + clientAuth: true + }, + { + name: 'subjectAltName', + altNames: altNames + }, + { + name: 'subjectKeyIdentifier' + } + ]) + + // Self-sign the certificate + cert.sign(keys.privateKey, forge.md.sha256.create()) + } + + // Convert to PEM + const certPem = forge.pki.certificateToPem(cert) + const keyPem = forge.pki.privateKeyToPem(keys.privateKey) + + // Store the certificate as a TLS secret + const secretData = { + 'tls.crt': Buffer.from(certPem).toString('base64'), + 'tls.key': Buffer.from(keyPem).toString('base64'), + 'ca.crt': Buffer.from(caCert ? caCert.certPem || caCert.crtData : certPem).toString('base64') + } + + const secret = { + name: name, + type: 'tls', + data: secretData + } + + // Use the secret service to store the certificate + const SecretService = require('../services/secret-service') + + if (isRenewal) { + // For renewals, delete the existing secret first + try { + await SecretService.deleteSecretEndpoint(name) + } catch (error) { + // If the secret doesn't exist, that's okay, just continue + if (error.name !== 'NotFoundError') { + throw error + } + } + } + + // Create new secret with certificate data + await SecretService.createSecretEndpoint(secret) + + return { + cert: certPem, + key: keyPem, + ca: caCert ? caCert.crtData : certPem + } + } catch (error) { + throw error + } +} + +function decodeCertificate (data) { + try { + const cert = forge.pki.certificateFromPem(data) + return { + subject: cert.subject.getField('CN').value, + issuer: cert.issuer.getField('CN').value, + validFrom: cert.validity.notBefore, + validTo: cert.validity.notAfter, + serialNumber: cert.serialNumber, + extensions: cert.extensions + } + } catch (error) { + throw new Error(`Failed to decode certificate: ${error.message}`) + } +} + +module.exports = { + CA_TYPES, + CertificateAuthority, + generateCertificate, + decodeCertificate, + generateSelfSignedCA, + storeCA, + loadCA, + validateCA, + getCAFromDirect, + getCAFromK8sSecret +} diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js new file mode 100644 index 00000000..e84afda1 --- /dev/null +++ b/src/utils/k8s-client.js @@ -0,0 +1,153 @@ +const logger = require('../logger') +let k8sApi = null + +async function initializeK8sClient () { + if (!k8sApi) { + logger.debug('Initializing Kubernetes client') + const k8s = require('@kubernetes/client-node') + const kubeConfig = new k8s.KubeConfig() + + // Use the in-cluster configuration + kubeConfig.loadFromCluster() + k8sApi = kubeConfig.makeApiClient(k8s.CoreV1Api) + logger.info('Kubernetes client initialized successfully') + } + return k8sApi +} + +async function getSecret (secretName, namespace) { + logger.debug(`Getting secret: ${secretName} in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + const response = await api.readNamespacedSecret(secretName, namespace) + logger.info(`Successfully retrieved secret: ${secretName}`) + return response.body + } catch (error) { + logger.error(`Failed to get secret ${secretName}: ${error.message}`) + throw error + } +} + +// ConfigMap methods +async function getConfigMap (configMapName, namespace) { + logger.debug(`Getting ConfigMap: ${configMapName} in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + const response = await api.readNamespacedConfigMap(configMapName, namespace) + logger.info(`Successfully retrieved ConfigMap: ${configMapName}`) + return response.body + } catch (error) { + logger.error(`Failed to get ConfigMap ${configMapName}: ${error.message}`) + throw error + } +} + +async function patchConfigMap (configMapName, namespace, patchData) { + logger.debug(`Patching ConfigMap: ${configMapName} in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + // Pass all options in one object - much cleaner than multiple undefined parameters + const response = await api.patchNamespacedConfigMap( + configMapName, + namespace, + patchData, + { + headers: { 'Content-Type': 'application/strategic-merge-patch+json' } + } + ) + logger.info(`Successfully patched ConfigMap: ${configMapName}`) + return response.body + } catch (error) { + logger.error(`Failed to patch ConfigMap ${configMapName}: ${error.message}`) + throw error + } +} + +// Service methods +async function getNamespacedServices (namespace) { + logger.debug(`Listing services in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + const response = await api.listNamespacedService(namespace) + logger.info(`Successfully retrieved ${response.body.items.length} services in namespace: ${namespace}`) + return response.body + } catch (error) { + logger.error(`Failed to list services in namespace ${namespace}: ${error.message}`) + throw error + } +} + +async function createService (namespace, serviceSpec) { + logger.debug(`Creating service in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + const response = await api.createNamespacedService(namespace, serviceSpec) + logger.info(`Successfully created service: ${response.body.metadata.name} in namespace: ${namespace}`) + return response.body + } catch (error) { + logger.error(`Failed to create service in namespace ${namespace}: ${error.message}`) + throw error + } +} + +async function deleteService (serviceName, namespace) { + logger.debug(`Deleting service: ${serviceName} in namespace: ${namespace}`) + try { + const api = await initializeK8sClient() + const response = await api.deleteNamespacedService(serviceName, namespace) + logger.info(`Successfully deleted service: ${serviceName} from namespace: ${namespace}`) + return response.body + } catch (error) { + logger.error(`Failed to delete service ${serviceName}: ${error.message}`) + throw error + } +} + +/** + * Gets the LoadBalancer IP for a service if it exists + * @param {string} serviceName - The name of the service + * @param {string} namespace - The namespace of the service + * @returns {Promise} The LoadBalancer IP or null if not available + */ +async function watchLoadBalancerIP (serviceName, namespace) { + logger.debug(`Checking LoadBalancer IP for service: ${serviceName} in namespace: ${namespace}`) + const api = await initializeK8sClient() + try { + const response = await api.readNamespacedService(serviceName, namespace) + const service = response.body + + // Check if the service type is LoadBalancer + if (service.spec && service.spec.type === 'LoadBalancer') { + // Check if the LoadBalancer IP exists + if (service.status && + service.status.loadBalancer && + service.status.loadBalancer.ingress && + service.status.loadBalancer.ingress.length > 0) { + const ip = service.status.loadBalancer.ingress[0].ip + if (ip) { + logger.info(`Found LoadBalancer IP: ${ip} for service: ${serviceName}`) + return ip + } + } + logger.info(`Service ${serviceName} is LoadBalancer type but IP not yet assigned`) + } else { + const serviceType = service.spec && service.spec.type ? service.spec.type : 'unknown' + logger.info(`Service ${serviceName} is not of type LoadBalancer (type: ${serviceType})`) + } + // Return null if the service is not a LoadBalancer or IP is not yet assigned + return null + } catch (error) { + logger.error(`Error getting LoadBalancer IP for service ${serviceName}: ${error.message}`) + return null + } +} + +module.exports = { + getSecret, + getConfigMap, + patchConfigMap, + getNamespacedServices, + createService, + deleteService, + watchLoadBalancerIP +} From 2ccb64ed0925d6d6a74c8203e0a810c451d8d85c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 4 Jun 2025 01:29:11 +0300 Subject: [PATCH 08/25] secret, certificate, volumemount, configmap endpoints and logic added, microservice execenabled added, k8s handler fixed --- docs/swagger.yaml | 1222 ++++++++++++++-- package-lock.json | 66 +- package.json | 4 +- src/config/controller.yaml | 21 +- src/config/env-mapping.js | 14 +- src/config/index.js | 14 +- src/controllers/agent-controller.js | 5 + src/controllers/config-map-controller.js | 66 + src/controllers/iofog-controller.js | 5 +- src/controllers/microservices-controller.js | 27 +- src/controllers/service-controller.js | 66 + src/controllers/volume-mount-controller.js | 71 + src/data/constants.js | 3 +- src/data/managers/config-map-manager.js | 54 + .../managers/iofog-access-token-manager.js | 36 - .../managers/microservice-port-manager.js | 16 - .../microservice-public-port-manager.js | 25 - src/data/managers/service-manager.js | 90 ++ src/data/managers/volume-mounting-manager.js | 62 + .../mysql/db_migration_mysql_v1.0.2.sql | 103 +- .../postgres/db_migration_pg_v1.0.2.sql | 93 +- .../sqlite/db_migration_sqlite_v1.0.2.sql | 78 +- src/data/models/changetracking.js | 10 + src/data/models/configMap.js | 77 + src/data/models/fog.js | 61 +- src/data/models/fogVolumeMounts.js | 9 + src/data/models/fogaccesstoken.js | 41 - src/data/models/index.js | 1 - src/data/models/microservice.js | 20 + src/data/models/microserviceExtraHost.js | 3 - src/data/models/microservicePublicPort.js | 88 -- src/data/models/microserviceenv.js | 8 + src/data/models/microserviceport.js | 13 - src/data/models/microservicestatus.js | 5 + src/data/models/service.js | 93 ++ src/data/models/serviceTags.js | 9 + src/data/models/tags.js | 1 + src/data/models/volumeMount.js | 43 + src/data/providers/database-provider.js | 48 +- src/data/providers/mysql.js | 14 +- .../seeders/mysql/db_seeder_mysql_v1.0.2.sql | 7 +- .../seeders/postgres/db_seeder_pg_v1.0.2.sql | 7 +- .../sqlite/db_seeder_sqlite_v1.0.2.sql | 7 +- src/enums/fog-state.js | 8 +- src/helpers/app-helper.js | 6 + src/helpers/error-messages.js | 19 +- src/helpers/template-helper.js | 4 +- src/jobs/fog-status-job.js | 5 +- src/routes/agent.js | 25 + src/routes/configMap.js | 246 ++++ src/routes/microservices.js | 184 ++- src/routes/secret.js | 4 +- src/routes/service.js | 274 ++++ src/routes/volumeMount.js | 312 ++++ src/schemas/agent.js | 13 +- src/schemas/certificate.js | 9 +- src/schemas/config-map.js | 66 + src/schemas/iofog.js | 4 + src/schemas/microservice.js | 49 +- src/schemas/service.js | 108 ++ src/schemas/utils/utils.js | 1 + src/schemas/volume-mount.js | 91 ++ src/server.js | 12 +- src/services/agent-service.js | 101 +- src/services/certificate-service.js | 94 +- src/services/change-tracking-service.js | 10 +- src/services/config-map-service.js | 127 ++ src/services/iofog-access-token-service.js | 53 - src/services/iofog-service.js | 662 +++++++-- .../microservice-ports/microservice-port.js | 211 +++ src/services/microservices-service.js | 336 ++++- src/services/router-service.js | 64 +- src/services/secret-service.js | 16 + src/services/services-service.js | 1209 ++++++++++++++++ src/services/volume-mount-service.js | 196 +++ src/services/yaml-parser-service.js | 152 +- src/utils/cert.js | 62 +- src/utils/k8s-client.js | 211 ++- src/utils/ssl-utils.js | 7 +- test/backup/iofog-service.js | 1250 ++++++++++++++++ test/backup/services-service.js | 1261 +++++++++++++++++ 81 files changed, 9198 insertions(+), 970 deletions(-) create mode 100644 src/controllers/config-map-controller.js create mode 100644 src/controllers/service-controller.js create mode 100644 src/controllers/volume-mount-controller.js create mode 100644 src/data/managers/config-map-manager.js delete mode 100644 src/data/managers/iofog-access-token-manager.js delete mode 100644 src/data/managers/microservice-public-port-manager.js create mode 100644 src/data/managers/service-manager.js create mode 100644 src/data/managers/volume-mounting-manager.js create mode 100644 src/data/models/configMap.js create mode 100644 src/data/models/fogVolumeMounts.js delete mode 100644 src/data/models/fogaccesstoken.js delete mode 100644 src/data/models/microservicePublicPort.js create mode 100644 src/data/models/service.js create mode 100644 src/data/models/serviceTags.js create mode 100644 src/data/models/volumeMount.js create mode 100644 src/routes/configMap.js create mode 100644 src/routes/service.js create mode 100644 src/routes/volumeMount.js create mode 100644 src/schemas/config-map.js create mode 100644 src/schemas/service.js create mode 100644 src/schemas/volume-mount.js create mode 100644 src/services/config-map-service.js delete mode 100644 src/services/iofog-access-token-service.js create mode 100644 src/services/microservice-ports/microservice-port.js create mode 100644 src/services/services-service.js create mode 100644 src/services/volume-mount-service.js create mode 100644 test/backup/iofog-service.js create mode 100644 test/backup/services-service.js diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 28aaad1a..622c9514 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -2271,6 +2271,120 @@ paths: description: Not Found "500": description: Internal Server Error + "/microservices/{uuid}/exec": + post: + tags: + - Microservices + summary: Enables a exec for microservice + operationId: enableMicroserviceExec + parameters: + - in: path + name: uuid + description: Microservice UUID + required: true + schema: + type: string + security: + - userToken: [] + responses: + "201": + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Invalid Microservice UUID + "500": + description: Internal Server Error + delete: + tags: + - Microservices + summary: Disables a exec for microservice + operationId: disableMicroserviceExec + parameters: + - in: path + name: uuid + description: Microservice UUID + required: true + schema: + type: string + security: + - userToken: [] + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Invalid Microservice UUID + "500": + description: Internal Server Error + "/microservices/system/{uuid}/exec": + post: + tags: + - Microservices + summary: Enables a exec for system microservice + operationId: enableSystemMicroserviceExec + parameters: + - in: path + name: uuid + description: Microservice UUID + required: true + schema: + type: string + security: + - userToken: [] + responses: + "201": + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Invalid Microservice UUID + "500": + description: Internal Server Error + delete: + tags: + - Microservices + summary: Disables a exec for system microservice + operationId: disableSystemMicroserviceExec + parameters: + - in: path + name: uuid + description: Microservice UUID + required: true + schema: + type: string + security: + - userToken: [] + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Invalid Microservice UUID + "500": + description: Internal Server Error "/microservices/{uuid}/image-snapshot": post: tags: @@ -3258,7 +3372,7 @@ paths: description: Secret Not Found "500": description: Internal Server Error - put: + patch: tags: - Secrets summary: Updates an existing secret @@ -3349,7 +3463,7 @@ paths: "500": description: Internal Server Error "/secrets/yaml/{name}": - put: + patch: tags: - Secrets summary: Updates an existing secret using YAML @@ -3550,86 +3664,819 @@ paths: parameters: - in: path name: name - description: Certificate name + description: Certificate name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + '401': + description: Unauthorized + '404': + description: Certificate not found + '500': + description: Internal Server Error + + /certificates/{name}/renew: + post: + tags: + - Certificates + summary: Renew a certificate + operationId: renewCertificate + parameters: + - in: path + name: name + description: Certificate name + required: true + schema: + type: string + security: + - userToken: [] + responses: + '200': + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/CertificateRenewResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Certificate not found + '500': + description: Internal Server Error + /certificates/yaml: + post: + tags: + - Certificates + summary: Create a certificate or CA from YAML file + operationId: createCertificateFromYAML + security: + - userToken: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + certificate: + type: string + format: binary + responses: + '201': + description: Created + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CAResponse" + - $ref: "#/components/schemas/CertificateResponse" + '400': + description: Bad Request + '401': + description: Unauthorized + '404': + description: Not Found - Referenced CA not found + '409': + description: Conflict - Certificate or CA already exists + /services: + get: + tags: + - Services + summary: Gets list of services + operationId: getServicesList + security: + - userToken: [] + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Service" + "401": + description: Not Authorized + "500": + description: Internal Server Error + post: + tags: + - Services + summary: Creates a new service + operationId: createService + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + responses: + "201": + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + "400": + description: Bad Request + "401": + description: Not Authorized + "409": + description: Duplicate Name + "500": + description: Internal Server Error + /services/{name}: + get: + tags: + - Services + summary: Gets a service info + operationId: getServiceInfo + parameters: + - in: path + name: name + description: Service name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + delete: + tags: + - Services + summary: Deletes a service + operationId: deleteService + parameters: + - in: path + name: name + description: Service name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "204": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + patch: + tags: + - Services + summary: Patches a service + operationId: patchService + parameters: + - in: path + name: name + description: Service name + required: true + schema: + type: string + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + /services/yaml: + post: + tags: + - Services + summary: Creates a new service from YAML + operationId: createServiceYAML + security: + - userToken: [] + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + service: + type: string + format: binary + responses: + "201": + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + id: + type: number + name: + type: string + type: + type: string + resource: + type: string + targetPort: + type: number + defaultBridge: + type: string + bridgePort: + type: number + updatedAt: + type: string + format: date-time + createdAt: + type: string + format: date-time + "400": + description: Bad Request + "401": + description: Not Authorized + "409": + description: Duplicate Name + "500": + description: Internal Server Error + "/services/yaml/{name}": + patch: + tags: + - Services + summary: Updates a service using YAML + operationId: updateServiceYAML + parameters: + - in: path + name: name + description: Service name + required: true + schema: + type: string + security: + - userToken: [] + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + service: + type: string + format: binary + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/Service" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + /configmaps: + post: + tags: + - ConfigMap + summary: Creates a new ConfigMap + operationId: createConfigMap + security: + - userToken: [] + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapCreate" + responses: + "201": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "409": + description: ConfigMap Already Exists + "500": + description: Internal Server Error + get: + tags: + - ConfigMap + summary: Lists all ConfigMaps + operationId: listConfigMaps + security: + - userToken: [] + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapListResponse" + "401": + description: Not Authorized + "500": + description: Internal Server Error + /configmaps/yaml: + post: + tags: + - ConfigMap + summary: Creates a new ConfigMap from YAML + operationId: createConfigMapFromYaml + security: + - userToken: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + configMap: + type: string + format: binary + responses: + "201": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "409": + description: ConfigMap Already Exists + "500": + description: Internal Server Error + /configmaps/{name}: + get: + tags: + - ConfigMap + summary: Gets a ConfigMap by name + operationId: getConfigMap + security: + - userToken: [] + parameters: + - in: path + name: name + required: true + schema: + type: string + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapResponse" + "401": + description: Not Authorized + "404": + description: ConfigMap Not Found + "500": + description: Internal Server Error + patch: + tags: + - ConfigMap + summary: Updates a ConfigMap + operationId: updateConfigMap + security: + - userToken: [] + parameters: + - in: path + name: name + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapUpdate" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: ConfigMap Not Found + "500": + description: Internal Server Error + delete: + tags: + - ConfigMap + summary: Deletes a ConfigMap + operationId: deleteConfigMap + security: + - userToken: [] + parameters: + - in: path + name: name + required: true + schema: + type: string + responses: + "200": + description: Success + "401": + description: Not Authorized + "404": + description: ConfigMap Not Found + "500": + description: Internal Server Error + /configmaps/yaml/{name}: + patch: + tags: + - ConfigMap + summary: Updates a ConfigMap from YAML + operationId: updateConfigMapFromYaml + security: + - userToken: [] + parameters: + - in: path + name: name + required: true + schema: + type: string + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + configMap: + type: string + format: binary + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/ConfigMapResponse" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: ConfigMap Not Found + "500": + description: Internal Server Error + /volumeMounts: + get: + tags: + - VolumeMounts + summary: Returns list of volume mounts + operationId: listVolumeMounts + security: + - userToken: [] + responses: + "200": + description: List of volume mounts + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/VolumeMount" + "401": + description: Not Authorized + "500": + description: Internal Server Error + post: + tags: + - VolumeMounts + summary: Creates a new volume mount + operationId: createVolumeMount + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMountCreate" + responses: + "200": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMount" + "400": + description: Bad Request + "401": + description: Not Authorized + "500": + description: Internal Server Error + + /volumeMounts/yaml: + post: + tags: + - VolumeMounts + summary: Creates a new volume mount from YAML + operationId: createVolumeMountYaml + security: + - userToken: [] + requestBody: + required: true + content: + application/x-yaml: + schema: + type: string + responses: + "200": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMount" + "400": + description: Bad Request + "401": + description: Not Authorized + "500": + description: Internal Server Error + + "/volumeMounts/{name}": + get: + tags: + - VolumeMounts + summary: Gets volume mount info + operationId: getVolumeMount + parameters: + - in: path + name: name + description: Volume mount name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMount" + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + patch: + tags: + - VolumeMounts + summary: Updates existing volume mount + operationId: updateVolumeMount + parameters: + - in: path + name: name + description: Volume mount name + required: true + schema: + type: string + security: + - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMountUpdate" + responses: + "200": + description: Updated + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMount" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + delete: + tags: + - VolumeMounts + summary: Deletes a volume mount + operationId: deleteVolumeMount + parameters: + - in: path + name: name + description: Volume mount name + required: true + schema: + type: string + security: + - userToken: [] + responses: + "202": + description: Accepted + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error + + "/volumeMounts/yaml/{name}": + patch: + tags: + - VolumeMounts + summary: Updates existing volume mount from YAML + operationId: updateVolumeMountYaml + parameters: + - in: path + name: name + description: Volume mount name required: true schema: type: string security: - userToken: [] + requestBody: + required: true + content: + application/x-yaml: + schema: + type: string responses: - '200': - description: Success - '401': - description: Unauthorized - '404': - description: Certificate not found - '500': + "200": + description: Updated + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMount" + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Not Found + "500": description: Internal Server Error - - /certificates/{name}/renew: + + "/volumeMounts/{name}/link": post: tags: - - Certificates - summary: Renew a certificate - operationId: renewCertificate + - VolumeMounts + summary: Links volume mount to fog nodes + operationId: linkVolumeMount parameters: - in: path name: name - description: Certificate name + description: Volume mount name required: true schema: type: string security: - userToken: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/VolumeMountLink" responses: - '200': + "200": description: Success content: application/json: schema: - $ref: "#/components/schemas/CertificateRenewResponse" - '400': + $ref: "#/components/schemas/VolumeMount" + "400": description: Bad Request - '401': - description: Unauthorized - '404': - description: Certificate not found - '500': + "401": + description: Not Authorized + "404": + description: Not Found + "500": description: Internal Server Error - /certificates/yaml: - post: + delete: tags: - - Certificates - summary: Create a certificate or CA from YAML file - operationId: createCertificateFromYAML + - VolumeMounts + summary: Unlinks volume mount from fog nodes + operationId: unlinkVolumeMount + parameters: + - in: path + name: name + description: Volume mount name + required: true + schema: + type: string security: - userToken: [] requestBody: + required: true content: - multipart/form-data: + application/json: schema: - type: object - properties: - certificate: - type: string - format: binary + $ref: "#/components/schemas/VolumeMountUnlink" responses: - '201': - description: Created + "200": + description: Success content: application/json: schema: - oneOf: - - $ref: "#/components/schemas/CAResponse" - - $ref: "#/components/schemas/CertificateResponse" - '400': + $ref: "#/components/schemas/VolumeMount" + "400": description: Bad Request - '401': - description: Unauthorized - '404': - description: Not Found - Referenced CA not found - '409': - description: Conflict - Certificate or CA already exists + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error tags: - name: Controller description: Manage your controller @@ -3661,6 +4508,12 @@ tags: description: Manage your secrets - name: Certificates description: Manage your certificates + - name: Services + description: Manage your services + - name: VolumeMounts + description: Manage your volume mounts + - name: ConfigMap + description: Manage your config maps servers: - url: http://localhost:51121/api/v3 components: @@ -4231,6 +5084,10 @@ components: type: string dockerUrl: type: string + containerEngine: + type: string + deploymentType: + type: string diskLimit: type: number diskDirectory: @@ -4307,6 +5164,12 @@ components: dockerUrl: type: string default: unix:///var/run/docker.sock + containerEngine: + type: string + default: docker + deploymentType: + type: string + default: native diskLimit: type: number default: 50 @@ -4470,6 +5333,10 @@ components: type: string dockerUrl: type: string + containerEngine: + type: string + deploymentType: + type: string diskLimit: type: number diskDirectory: @@ -4513,6 +5380,10 @@ components: type: string dockerUrl: type: string + containerEngine: + type: string + deploymentType: + type: string diskLimit: type: number diskDirectory: @@ -4787,31 +5658,6 @@ components: enum: - tcp - udp - public: - type: object - properties: - enabled: - type: boolean - schemes: - type: array - items: - type: string - protocol: - type: string - enum: - - tcp - - http - router: - type: object - properties: - host: - type: string - port: - type: number - required: [] - required: - - schemes - - protocol required: - internal - external @@ -4828,31 +5674,6 @@ components: enum: - tcp - udp - public: - type: object - properties: - enabled: - type: boolean - schemes: - type: array - items: - type: string - protocol: - type: string - enum: - - tcp - - http - router: - type: object - properties: - host: - type: string - port: - type: number - required: [] - required: - - schemes - - protocol required: - internal - external @@ -4868,31 +5689,6 @@ components: enum: - tcp - udp - public: - type: object - properties: - enabled: - type: boolean - schemes: - type: array - items: - type: string - protocol: - type: string - enum: - - tcp - - http - router: - type: object - properties: - host: - type: string - port: - type: number - required: [] - required: - - schemes - - protocol required: - internal - external @@ -4916,31 +5712,6 @@ components: enum: - tcp - udp - public: - type: object - properties: - enabled: - type: boolean - schemes: - type: array - items: - type: string - protocol: - type: string - enum: - - tcp - - http - router: - type: object - properties: - host: - type: string - port: - type: number - required: [] - required: - - schemes - - protocol required: - internal - external @@ -5820,4 +6591,159 @@ components: description: New validity end date renewed: type: boolean - description: True if certificate was successfully renewed \ No newline at end of file + description: True if certificate was successfully renewed + Service: + type: object + properties: + name: + type: string + type: + type: string + resource: + type: string + defaultBridge: + type: string + bridgePort: + type: number + targetPort: + type: number + tags: + type: array + items: + type: string + ConfigMapCreate: + type: object + required: + - name + - data + properties: + name: + type: string + minLength: 1 + maxLength: 255 + data: + type: object + ConfigMapUpdate: + type: object + required: + - data + properties: + data: + type: object + ConfigMapResponse: + type: object + required: + - id + - name + - data + - created_at + - updated_at + properties: + id: + type: integer + name: + type: string + data: + type: object + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + ConfigMapListResponse: + type: object + required: + - configMaps + properties: + configMaps: + type: array + items: + type: object + required: + - id + - name + - created_at + - updated_at + properties: + id: + type: integer + name: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + VolumeMount: + type: object + properties: + uuid: + type: string + name: + type: string + secretName: + type: string + configMapName: + type: string + version: + type: integer + required: + - uuid + - name + - version + + VolumeMountCreate: + type: object + properties: + name: + type: string + secretName: + type: string + configMapName: + type: string + required: + - name + oneOf: + - required: + - secretName + - required: + - configMapName + + VolumeMountUpdate: + type: object + properties: + name: + type: string + secretName: + type: string + configMapName: + type: string + oneOf: + - required: + - secretName + - required: + - configMapName + + VolumeMountLink: + type: object + properties: + fogUuids: + type: array + items: + type: string + minItems: 1 + required: + - fogUuids + + VolumeMountUnlink: + type: object + properties: + fogUuids: + type: array + items: + type: string + minItems: 1 + required: + - fogUuids \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index db97afda..f6878851 100644 --- a/package-lock.json +++ b/package-lock.json @@ -19,6 +19,7 @@ "@opentelemetry/resources": "^1.8.0", "@opentelemetry/sdk-node": "^0.200.0", "axios": "1.8.4", + "bignumber.js": "^9.3.0", "body-parser": "^1.20.3", "child_process": "1.0.2", "command-line-args": "5.2.1", @@ -64,6 +65,7 @@ "string-format": "2.0.0", "umzug": "^3.7.0", "underscore": "1.13.6", + "uuid": "11.1.0", "xss-clean": "0.1.1" }, "bin": { @@ -3555,6 +3557,14 @@ } } }, + "node_modules/bignumber.js": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.0.tgz", + "integrity": "sha512-EM7aMFTXbptt/wZdMlBv2t8IViwQL+h6SLHosp8Yf0dqJMTnY6iL32opnAB6kAdL0SZPuvcAzFr31o0c/R3/RA==", + "engines": { + "node": "*" + } + }, "node_modules/binary-extensions": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", @@ -8070,6 +8080,15 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/istanbul-lib-processinfo/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/istanbul-lib-report": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", @@ -10847,6 +10866,15 @@ "node": ">=0.10.0" } }, + "node_modules/postman-collection/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/postman-request": { "version": "2.88.1-postman.39", "resolved": "https://registry.npmjs.org/postman-request/-/postman-request-2.88.1-postman.39.tgz", @@ -10889,6 +10917,15 @@ "node": ">=0.6" } }, + "node_modules/postman-request/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/postman-runtime": { "version": "7.41.2", "resolved": "https://registry.npmjs.org/postman-runtime/-/postman-runtime-7.41.2.tgz", @@ -10928,6 +10965,15 @@ "url": "https://github.com/sponsors/panva" } }, + "node_modules/postman-runtime/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "dev": true, + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/postman-sandbox": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/postman-sandbox/-/postman-sandbox-5.1.1.tgz", @@ -12099,6 +12145,14 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, + "node_modules/sequelize/node_modules/uuid": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", + "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, "node_modules/serialised-error": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/serialised-error/-/serialised-error-1.1.3.tgz", @@ -14646,11 +14700,15 @@ } }, "node_modules/uuid": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", - "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], "bin": { - "uuid": "dist/bin/uuid" + "uuid": "dist/esm/bin/uuid" } }, "node_modules/uvm": { diff --git a/package.json b/package.json index d7473c81..9660f935 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@datasance/iofogcontroller", "version": "3.5.0", - "description": "ioFog Controller project for Eclipse IoFog @ iofog.org \\nCopyright (c) 2023 Datasance Teknoloji A.S.", + "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", "contributors": [ @@ -59,6 +59,7 @@ "@kubernetes/client-node": "^0.22.3", "axios": "1.8.4", "body-parser": "^1.20.3", + "bignumber.js": "^9.3.0", "child_process": "1.0.2", "command-line-args": "5.2.1", "command-line-usage": "7.0.3", @@ -102,6 +103,7 @@ "sqlite3": "^5.1.7", "string-format": "2.0.0", "umzug": "^3.7.0", + "uuid": "11.1.0", "underscore": "1.13.6", "xss-clean": "0.1.1", "@opentelemetry/api": "^1.9.0", diff --git a/src/config/controller.yaml b/src/config/controller.yaml index 1ff9e67f..236bccf5 100644 --- a/src/config/controller.yaml +++ b/src/config/controller.yaml @@ -12,10 +12,10 @@ server: # key: "" # SSL key file path # cert: "" # SSL certificate file path # intermediateCert: "" # Intermediate certificate file path - # # base64: - # # key: # SSL key in base64 format - # # cert: # SSL certificate in base64 format - # # intermediateCert: # Intermediate certificate in base64 format + # base64: + # key: # SSL key in base64 format + # cert: # SSL certificate in base64 format + # intermediateCert: # Intermediate certificate in base64 format # Viewer Configuration viewer: @@ -43,13 +43,13 @@ database: # port: 3306 # MySQL port # username: "" # MySQL username # password: "" # MySQL password - # database: "" # MySQL database name + # databaseName: "" # MySQL database name # postgres: # host: "" # PostgreSQL host # port: 5432 # PostgreSQL port # username: "" # PostgreSQL username # password: "" # PostgreSQL password - # database: "" # PostgreSQL database name + # databaseName: "" # PostgreSQL database name sqlite: databaseName: dev_database.sqlite # SQLite database file name logging: false # Enable SQLite query logging @@ -71,18 +71,15 @@ database: # secret: # ControllerClient Client secret # viewerClient: # Viewer client ID -# Public Ports Configuration -publicPorts: - range: "6001-7999" # Public ports range +# Bridge Ports Configuration for Services +bridgePorts: + range: "10024-65535" # Bridge ports range # System Images Configuration systemImages: router: "1": "ghcr.io/datasance/router:latest" "2": "ghcr.io/datasance/router:latest" - proxy: - "1": "ghcr.io/datasance/proxy:latest" - "2": "ghcr.io/datasance/proxy:latest" # Diagnostics Configuration diagnostics: diff --git a/src/config/env-mapping.js b/src/config/env-mapping.js index 5398bdf0..8418a043 100644 --- a/src/config/env-mapping.js +++ b/src/config/env-mapping.js @@ -7,6 +7,14 @@ module.exports = { 'SERVER_PORT': 'server.port', 'SERVER_DEV_MODE': 'server.devMode', + // SSL Configuration + 'SSL_PATH_KEY': 'server.ssl.path.key', + 'SSL_PATH_CERT': 'server.ssl.path.cert', + 'SSL_PATH_INTERMEDIATE_CERT': 'server.ssl.path.intermediateCert', + 'SSL_BASE64_KEY': 'server.ssl.base64.key', + 'SSL_BASE64_CERT': 'server.ssl.base64.cert', + 'SSL_BASE64_INTERMEDIATE_CERT': 'server.ssl.base64.intermediateCert', + // Viewer Configuration 'VIEWER_PORT': 'viewer.port', 'VIEWER_URL': 'viewer.url', @@ -49,14 +57,12 @@ module.exports = { 'KC_CLIENT_SECRET': 'auth.client.secret', 'KC_VIEWER_CLIENT': 'auth.viewerClient', - // Public Ports Configuration - 'PUBLIC_PORTS_RANGE': 'publicPorts.range', + // Bridge Ports Configuration + 'BRIDGE_PORTS_RANGE': 'bridgePorts.range', // System Images Configuration 'ROUTER_IMAGE_1': 'systemImages.router.1', 'ROUTER_IMAGE_2': 'systemImages.router.2', - 'PROXY_IMAGE_1': 'systemImages.proxy.1', - 'PROXY_IMAGE_2': 'systemImages.proxy.2', // Diagnostics Configuration 'DIAGNOSTICS_DIRECTORY': 'diagnostics.directory', diff --git a/src/config/index.js b/src/config/index.js index 34cfadec..c6e7da05 100644 --- a/src/config/index.js +++ b/src/config/index.js @@ -41,10 +41,7 @@ class Config { // Clear any existing configuration nconf.reset() - // Set the entire config as defaults - nconf.defaults(this.config) - - // Set environment variables + // First set environment variables nconf.env({ separator: '_', parseValues: true, @@ -75,7 +72,7 @@ class Config { } }) - // Get all environment overrides + // Get environment overrides first const envOverrides = nconf.get() // Create a deep copy of the base config @@ -98,13 +95,6 @@ class Config { } }) - // Remove any nconf internal keys and the type field - Object.keys(finalConfig).forEach(key => { - if (key.includes(':') || key === 'type') { - delete finalConfig[key] - } - }) - // Reset nconf and set the final merged config nconf.reset() nconf.defaults(finalConfig) diff --git a/src/controllers/agent-controller.js b/src/controllers/agent-controller.js index 223ef9aa..c77ca4ca 100644 --- a/src/controllers/agent-controller.js +++ b/src/controllers/agent-controller.js @@ -58,6 +58,10 @@ const getAgentLinkedEdgeResourcesEndpoint = async function (req, fog) { return { edgeResources: await AgentService.getAgentLinkedEdgeResources(fog) } } +const getAgentLinkedVolumeMountsEndpoint = async function (req, fog) { + return { volumeMounts: await AgentService.getAgentLinkedVolumeMounts(fog) } +} + const getAgentMicroserviceEndPoint = async function (req, fog) { const microserviceUuid = req.params.microserviceUuid @@ -135,5 +139,6 @@ module.exports = { putImageSnapshotEndPoint: AuthDecorator.checkFogToken(putImageSnapshotEndPoint), resetAgentConfigChangesEndPoint: AuthDecorator.checkFogToken(resetAgentConfigChangesEndPoint), getAgentLinkedEdgeResourcesEndpoint: AuthDecorator.checkFogToken(getAgentLinkedEdgeResourcesEndpoint), + getAgentLinkedVolumeMountsEndpoint: AuthDecorator.checkFogToken(getAgentLinkedVolumeMountsEndpoint), getControllerCAEndPoint: AuthDecorator.checkFogToken(getControllerCAEndPoint) } diff --git a/src/controllers/config-map-controller.js b/src/controllers/config-map-controller.js new file mode 100644 index 00000000..ec79358e --- /dev/null +++ b/src/controllers/config-map-controller.js @@ -0,0 +1,66 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const ConfigMapService = require('../services/config-map-service') +const YamlParserService = require('../services/yaml-parser-service') + +const createConfigMapEndpoint = async function (req) { + const configMap = req.body + return ConfigMapService.createConfigMapEndpoint(configMap) +} + +const updateConfigMapEndpoint = async function (req) { + const configMap = req.body + const configMapName = req.params.name + return ConfigMapService.updateConfigMapEndpoint(configMapName, configMap) +} + +const getConfigMapEndpoint = async function (req) { + const configMapName = req.params.name + return ConfigMapService.getConfigMapEndpoint(configMapName) +} + +const listConfigMapsEndpoint = async function (req) { + return ConfigMapService.listConfigMapsEndpoint() +} + +const deleteConfigMapEndpoint = async function (req) { + const configMapName = req.params.name + return ConfigMapService.deleteConfigMapEndpoint(configMapName) +} + +const createConfigMapFromYamlEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const configMapData = await YamlParserService.parseConfigMapFile(fileContent) + return ConfigMapService.createConfigMapEndpoint(configMapData) +} + +const updateConfigMapFromYamlEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const configMapName = req.params.name + const configMapData = await YamlParserService.parseConfigMapFile(fileContent, { + isUpdate: true, + configMapName: configMapName + }) + return ConfigMapService.updateConfigMapEndpoint(configMapName, configMapData) +} + +module.exports = { + createConfigMapEndpoint, + updateConfigMapEndpoint, + getConfigMapEndpoint, + listConfigMapsEndpoint, + deleteConfigMapEndpoint, + createConfigMapFromYamlEndpoint, + updateConfigMapFromYamlEndpoint +} diff --git a/src/controllers/iofog-controller.js b/src/controllers/iofog-controller.js index 3d2f86cc..b8f876ac 100644 --- a/src/controllers/iofog-controller.js +++ b/src/controllers/iofog-controller.js @@ -41,9 +41,10 @@ async function getFogEndPoint (req) { } async function getFogListEndPoint (req) { - const isSystem = req.query && req.query.system ? req.query.system === 'true' : false + // const isSystem = req.query && req.query.system ? req.query.system === 'true' : false const query = qs.parse(req.query) - return FogService.getFogListEndPoint(query.filters, false, isSystem) + // return FogService.getFogListEndPoint(query.filters, false, isSystem) + return FogService.getFogListEndPoint(query.filters, false) } async function generateProvisionKeyEndPoint (req) { diff --git a/src/controllers/microservices-controller.js b/src/controllers/microservices-controller.js index 55249c4a..fc8418c2 100644 --- a/src/controllers/microservices-controller.js +++ b/src/controllers/microservices-controller.js @@ -168,8 +168,24 @@ const deleteSystemMicroserviceVolumeMappingEndPoint = async function (req) { return MicroservicesService.deleteSystemVolumeMappingEndPoint(uuid, id, false) } -const listAllPublicPortsEndPoint = async function (req) { - return MicroservicesService.listAllPublicPortsEndPoint() +const createMicroserviceExecEndPoint = async function (req) { + const uuid = req.params.uuid + return MicroservicesService.createExecEndPoint(uuid, false) +} + +const deleteMicroserviceExecEndPoint = async function (req) { + const uuid = req.params.uuid + return MicroservicesService.deleteExecEndPoint(uuid, false) +} + +const createSystemMicroserviceExecEndPoint = async function (req) { + const uuid = req.params.uuid + return MicroservicesService.createSystemExecEndPoint(uuid, false) +} + +const deleteSystemMicroserviceExecEndPoint = async function (req) { + const uuid = req.params.uuid + return MicroservicesService.deleteSystemExecEndPoint(uuid, false) } module.exports = { @@ -195,7 +211,10 @@ module.exports = { listMicroserviceVolumeMappingsEndPoint: (listMicroserviceVolumeMappingsEndPoint), deleteMicroserviceVolumeMappingEndPoint: (deleteMicroserviceVolumeMappingEndPoint), deleteSystemMicroserviceVolumeMappingEndPoint: (deleteSystemMicroserviceVolumeMappingEndPoint), - listAllPublicPortsEndPoint: (listAllPublicPortsEndPoint), createMicroserviceYAMLEndPoint: (createMicroserviceYAMLEndPoint), - updateMicroserviceYAMLEndPoint: (updateMicroserviceYAMLEndPoint) + updateMicroserviceYAMLEndPoint: (updateMicroserviceYAMLEndPoint), + createMicroserviceExecEndPoint: (createMicroserviceExecEndPoint), + deleteMicroserviceExecEndPoint: (deleteMicroserviceExecEndPoint), + createSystemMicroserviceExecEndPoint: (createSystemMicroserviceExecEndPoint), + deleteSystemMicroserviceExecEndPoint: (deleteSystemMicroserviceExecEndPoint) } diff --git a/src/controllers/service-controller.js b/src/controllers/service-controller.js new file mode 100644 index 00000000..b07df17a --- /dev/null +++ b/src/controllers/service-controller.js @@ -0,0 +1,66 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const ServiceService = require('../services/services-service') +const YamlParserService = require('../services/yaml-parser-service') + +const createServiceEndpoint = async function (req) { + const serviceData = req.body + return ServiceService.createServiceEndpoint(serviceData) +} + +const updateServiceEndpoint = async function (req) { + const serviceName = req.params.name + const serviceData = req.body + return ServiceService.updateServiceEndpoint(serviceName, serviceData) +} + +const deleteServiceEndpoint = async function (req) { + const serviceName = req.params.name + return ServiceService.deleteServiceEndpoint(serviceName) +} + +const getServiceEndpoint = async function (req) { + const serviceName = req.params.name + return ServiceService.getServiceEndpoint(serviceName) +} + +const listServicesEndpoint = async function (req) { + return ServiceService.getServicesListEndpoint() +} + +const createServiceYAMLEndpoint = async function (req) { + const fileContent = req.file.buffer.toString() + const serviceData = await YamlParserService.parseServiceFile(fileContent) + return ServiceService.createServiceEndpoint(serviceData) +} + +const updateServiceYAMLEndpoint = async function (req) { + const serviceName = req.params.name + const fileContent = req.file.buffer.toString() + const serviceData = await YamlParserService.parseServiceFile(fileContent, { + isUpdate: true, + serviceName: serviceName + }) + return ServiceService.updateServiceEndpoint(serviceName, serviceData) +} + +module.exports = { + createServiceEndpoint, + updateServiceEndpoint, + deleteServiceEndpoint, + getServiceEndpoint, + listServicesEndpoint, + createServiceYAMLEndpoint, + updateServiceYAMLEndpoint +} diff --git a/src/controllers/volume-mount-controller.js b/src/controllers/volume-mount-controller.js new file mode 100644 index 00000000..5a3ce6be --- /dev/null +++ b/src/controllers/volume-mount-controller.js @@ -0,0 +1,71 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const VolumeMountService = require('../services/volume-mount-service') +const YAMLParserService = require('../services/yaml-parser-service') + +const listVolumeMountsEndpoint = async (req) => { + return VolumeMountService.listVolumeMountsEndpoint() +} + +const getVolumeMountEndpoint = async (req) => { + return VolumeMountService.getVolumeMountEndpoint(req.params.name) +} + +const createVolumeMountEndpoint = async (req) => { + return VolumeMountService.createVolumeMountEndpoint(req.body) +} + +const updateVolumeMountEndpoint = async (req) => { + return VolumeMountService.updateVolumeMountEndpoint(req.params.name, req.body) +} + +const deleteVolumeMountEndpoint = async (req) => { + return VolumeMountService.deleteVolumeMountEndpoint(req.params.name) +} + +const createVolumeMountYamlEndpoint = async (req) => { + const fileContent = req.file.buffer.toString() + const volumeMountData = await YAMLParserService.parseVolumeMountFile(fileContent) + return VolumeMountService.createVolumeMountEndpoint(volumeMountData) +} + +const updateVolumeMountYamlEndpoint = async (req) => { + const fileContent = req.file.buffer.toString() + const name = req.params.name + const volumeMountData = await YAMLParserService.parseVolumeMountFile(fileContent, { + isUpdate: true, + volumeMountName: name + }) + return VolumeMountService.updateVolumeMountEndpoint(name, volumeMountData) +} + +const linkVolumeMountEndpoint = async (req) => { + return VolumeMountService.linkVolumeMountEndpoint(req.params.name, req.body.fogUuids) +} + +const unlinkVolumeMountEndpoint = async (req) => { + return VolumeMountService.unlinkVolumeMountEndpoint(req.params.name, req.body.fogUuids) +} + +module.exports = { + listVolumeMountsEndpoint, + getVolumeMountEndpoint, + createVolumeMountEndpoint, + updateVolumeMountEndpoint, + deleteVolumeMountEndpoint, + createVolumeMountYamlEndpoint, + updateVolumeMountYamlEndpoint, + linkVolumeMountEndpoint, + unlinkVolumeMountEndpoint +} diff --git a/src/data/constants.js b/src/data/constants.js index 3ba4ae4f..1d17f71e 100644 --- a/src/data/constants.js +++ b/src/data/constants.js @@ -1,4 +1,3 @@ module.exports = { - ROUTER_CATALOG_NAME: 'Router', - PROXY_CATALOG_NAME: 'Proxy' + ROUTER_CATALOG_NAME: 'Router' } diff --git a/src/data/managers/config-map-manager.js b/src/data/managers/config-map-manager.js new file mode 100644 index 00000000..ee7b6d30 --- /dev/null +++ b/src/data/managers/config-map-manager.js @@ -0,0 +1,54 @@ +const BaseManager = require('./base-manager') +const SecretHelper = require('../../helpers/secret-helper') +const models = require('../models') +const ConfigMap = models.ConfigMap + +class ConfigMapManager extends BaseManager { + getEntity () { + return ConfigMap + } + + async createConfigMap (name, immutable, data, transaction) { + return this.create({ + name, + immutable: immutable, + data: data + }, transaction) + } + + async updateConfigMap (name, immutable, data, transaction) { + const encryptedData = await SecretHelper.encryptSecret(data, name) + return this.update( + { name }, + { immutable: immutable, data: encryptedData }, + transaction + ) + } + + async getConfigMap (name, transaction) { + const configMap = await this.findOne({ name }, transaction) + if (!configMap) { + return null + } + return { + ...configMap.toJSON(), + data: configMap.data + } + } + + async listConfigMaps (transaction) { + const configMaps = await this.findAll({}, transaction) + return configMaps.map(configMap => ({ + id: configMap.id, + name: configMap.name, + created_at: configMap.created_at, + updated_at: configMap.updated_at + })) + } + + async deleteConfigMap (name, transaction) { + return this.delete({ name }, transaction) + } +} + +module.exports = new ConfigMapManager() diff --git a/src/data/managers/iofog-access-token-manager.js b/src/data/managers/iofog-access-token-manager.js deleted file mode 100644 index 0e0bd837..00000000 --- a/src/data/managers/iofog-access-token-manager.js +++ /dev/null @@ -1,36 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -const BaseManager = require('./base-manager') -const models = require('../models') -const FogAccessToken = models.FogAccessToken - -class FogAccessTokenManager extends BaseManager { - getEntity () { - return FogAccessToken - } - - // no transaction required here, used by auth decorator - updateExpirationTime (id, newTime) { - return FogAccessToken.update({ - expirationTime: newTime - }, { - where: { - id: id - } - }) - } -} - -const instance = new FogAccessTokenManager() -module.exports = instance diff --git a/src/data/managers/microservice-port-manager.js b/src/data/managers/microservice-port-manager.js index deedb758..84c7c22f 100644 --- a/src/data/managers/microservice-port-manager.js +++ b/src/data/managers/microservice-port-manager.js @@ -14,27 +14,11 @@ const BaseManager = require('./base-manager') const models = require('../models') const MicroservicePort = models.MicroservicePort -const MicroservicePublicPort = models.MicroservicePublicPort class MicroservicePortManager extends BaseManager { getEntity () { return MicroservicePort } - - findAllPublicPorts (transaction) { - return MicroservicePort.findAll({ - include: [ - { - model: MicroservicePublicPort, - as: 'publicPort', - required: true, - attributes: ['queueName', 'publicPort', 'protocol', 'isTcp', 'hostId'] - } - ], - where: { isPublic: true }, - attributes: ['microserviceUuid'] - }, { transaction: transaction }) - } } const instance = new MicroservicePortManager() diff --git a/src/data/managers/microservice-public-port-manager.js b/src/data/managers/microservice-public-port-manager.js deleted file mode 100644 index e5e43d97..00000000 --- a/src/data/managers/microservice-public-port-manager.js +++ /dev/null @@ -1,25 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -const BaseManager = require('./base-manager') -const models = require('../models') -const MicroservicePublicPort = models.MicroservicePublicPort - -class MicroservicePublicPortManager extends BaseManager { - getEntity () { - return MicroservicePublicPort - } -} - -const instance = new MicroservicePublicPortManager() -module.exports = instance diff --git a/src/data/managers/service-manager.js b/src/data/managers/service-manager.js new file mode 100644 index 00000000..9942b36a --- /dev/null +++ b/src/data/managers/service-manager.js @@ -0,0 +1,90 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const Service = models.Service +const Tags = models.Tags +const ServiceTag = models.ServiceTag + +class ServiceManager extends BaseManager { + getEntity () { + return Service + } + + async findAllWithTags (where, transaction) { + return Service.findAll({ + where: where, + order: [ [ 'name', 'ASC' ] ], + include: [ + { model: Tags, + as: 'tags', + through: { + attributes: [] + } + } + ] + }, { + transaction: transaction + }) + } + + async findOneWithTags (where, transaction) { + return Service.findOne({ + where, + include: [ + { model: Tags, + as: 'tags', + through: { + attributes: [] + } + } + ] + }, { transaction }) + } + + async setTags (serviceId, tagIds, transaction) { + // First remove all existing tags + await ServiceTag.destroy({ + where: { service_id: serviceId } + }, { transaction }) + + // Then add new tags + if (tagIds && tagIds.length > 0) { + const serviceTags = tagIds.map(tagId => ({ + service_id: serviceId, + tag_id: tagId + })) + await ServiceTag.bulkCreate(serviceTags, { transaction }) + } + } + + async addTag (serviceId, tagId, transaction) { + await ServiceTag.create({ + service_id: serviceId, + tag_id: tagId + }, { transaction }) + } + + async removeTag (serviceId, tagId, transaction) { + await ServiceTag.destroy({ + where: { + service_id: serviceId, + tag_id: tagId + } + }, { transaction }) + } +} + +const instance = new ServiceManager() +module.exports = instance diff --git a/src/data/managers/volume-mounting-manager.js b/src/data/managers/volume-mounting-manager.js new file mode 100644 index 00000000..b4f44384 --- /dev/null +++ b/src/data/managers/volume-mounting-manager.js @@ -0,0 +1,62 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const VolumeMount = models.VolumeMount + +const volumeMountExcludedFields = [ + 'created_at', + 'updated_at' +] + +class VolumeMountingManager extends BaseManager { + getEntity () { + return VolumeMount + } + + getAllExcludeFields (where, transaction) { + return this.findAllWithAttributes(where, { exclude: volumeMountExcludedFields }, transaction) + } + + getAll (where, transaction) { + return VolumeMount.findAll({ + where: where, + attributes: ['uuid', 'name', 'configMapName', 'secretName'] + }, { transaction: transaction }) + } + + getOne (where, transaction) { + return VolumeMount.findOne({ + where: where, + attributes: ['uuid', 'name', 'configMapName', 'secretName', 'version'] + }, { transaction: transaction }) + } + + findOne (where, transaction) { + return VolumeMount.findOne({ + where: where, + attributes: ['uuid', 'name', 'configMapName', 'secretName', 'version'] + }, { transaction: transaction }) + } + + findAll (where, transaction) { + return VolumeMount.findAll({ + where: where, + attributes: ['uuid', 'name', 'configMapName', 'secretName', 'version'] + }, { transaction: transaction }) + } +} + +const instance = new VolumeMountingManager() +module.exports = instance diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql index 5cb25c7d..f3fed896 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql @@ -68,7 +68,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( longitude FLOAT, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'UNKNOWN', + daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage FLOAT DEFAULT 0.000, @@ -77,9 +77,9 @@ CREATE TABLE IF NOT EXISTS Fogs ( memory_violation TEXT, disk_violation TEXT, cpu_violation TEXT, - `system-available-disk` BIGINT, - `system-available-memory` BIGINT, - `system-total-cpu` FLOAT, + system_available_disk BIGINT, + system_available_memory BIGINT, + system_total_cpu FLOAT, security_status VARCHAR(32) DEFAULT 'OK', security_violation_info VARCHAR(32) DEFAULT 'No violation', catalog_item_status TEXT, @@ -112,8 +112,8 @@ CREATE TABLE IF NOT EXISTS Fogs ( change_frequency INT DEFAULT 20, device_scan_frequency INT DEFAULT 20, tunnel VARCHAR(255) DEFAULT '', - isolated_docker_container BOOLEAN DEFAULT TRUE, - docker_pruning_freq INT DEFAULT 1, + isolated_docker_container BOOLEAN DEFAULT FALSE, + docker_pruning_freq INT DEFAULT 0, available_disk_threshold FLOAT DEFAULT 20, log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, @@ -568,18 +568,6 @@ ADD COLUMN run_as_user TEXT DEFAULT NULL, ADD COLUMN platform TEXT DEFAULT NULL, ADD COLUMN runtime TEXT DEFAULT NULL; -ALTER TABLE Fogs -RENAME COLUMN `system-available-disk` TO system_available_disk, -RENAME COLUMN `system-available-memory` TO system_available_memory, -RENAME COLUMN `system-total-cpu` TO system_total_cpu; - -ALTER TABLE Routers DROP COLUMN IF EXISTS require_ssl; -ALTER TABLE Routers DROP COLUMN IF EXISTS ssl_profile; -ALTER TABLE Routers DROP COLUMN IF EXISTS sasl_mechanisms; -ALTER TABLE Routers DROP COLUMN IF EXISTS authenticate_peer; -ALTER TABLE Routers DROP COLUMN IF EXISTS ca_cert; -ALTER TABLE Routers DROP COLUMN IF EXISTS tls_cert; -ALTER TABLE Routers DROP COLUMN IF EXISTS tls_key; CREATE TABLE IF NOT EXISTS MicroservicePubTags ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, @@ -678,8 +666,8 @@ CREATE TABLE IF NOT EXISTS Certificates ( FOREIGN KEY (secret_id) REFERENCES Secrets (id) ON DELETE CASCADE ); -CREATE UNIQUE INDEX idx_certificates_name_unique ON Certificates ((name(255))); -CREATE INDEX idx_certificates_valid_to ON Certificates ((valid_to)); +CREATE UNIQUE INDEX idx_certificates_name_unique ON Certificates (name(255)); +CREATE INDEX idx_certificates_valid_to ON Certificates (valid_to); CREATE INDEX idx_certificates_is_ca ON Certificates (is_ca); CREATE INDEX idx_certificates_signed_by_id ON Certificates (signed_by_id); CREATE INDEX idx_certificates_secret_id ON Certificates (secret_id); @@ -691,10 +679,12 @@ CREATE TABLE IF NOT EXISTS Services ( resource TEXT NOT NULL, target_port INT NOT NULL, service_port INT, + k8s_type TEXT, bridge_port INT, + default_bridge TEXT, service_endpoint TEXT, created_at DATETIME, - updated_at DATETIME, + updated_at DATETIME ); CREATE INDEX idx_services_name ON Services (name); @@ -713,4 +703,75 @@ CREATE TABLE IF NOT EXISTS ServiceTags ( CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); +ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(32); + +ALTER TABLE MicroserviceExtraHost DROP COLUMN public_port; +ALTER TABLE MicroservicePorts DROP COLUMN is_public; +ALTER TABLE MicroservicePorts DROP COLUMN is_proxy; + +DROP TABLE IF EXISTS MicroservicePublicPorts; + +ALTER TABLE MicroserviceEnvs ADD COLUMN value_from_secret TEXT; +ALTER TABLE MicroserviceEnvs ADD COLUMN value_from_config_map TEXT; + +CREATE TABLE IF NOT EXISTS ConfigMaps ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + immutable BOOLEAN DEFAULT false, + data TEXT NOT NULL, + created_at DATETIME, + updated_at DATETIME +); + +CREATE INDEX idx_config_maps_name ON ConfigMaps (name); + +CREATE TABLE IF NOT EXISTS VolumeMounts ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + name VARCHAR(255) NOT NULL, + config_map_name VARCHAR(255), + secret_name VARCHAR(255), + version INT DEFAULT 1, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (config_map_name) REFERENCES ConfigMaps (name) ON DELETE CASCADE, + FOREIGN KEY (secret_name) REFERENCES Secrets (name) ON DELETE CASCADE +); + +CREATE INDEX idx_volume_mounts_uuid ON VolumeMounts (uuid); +CREATE INDEX idx_volume_mounts_config_map_name ON VolumeMounts (config_map_name); +CREATE INDEX idx_volume_mounts_secret_name ON VolumeMounts (secret_name); + +CREATE TABLE IF NOT EXISTS FogVolumeMounts ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + volume_mount_uuid VARCHAR(32), + FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, + FOREIGN KEY (volume_mount_uuid) REFERENCES VolumeMounts (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_volume_mounts_fog_uuid ON FogVolumeMounts (fog_uuid); +CREATE INDEX idx_fog_volume_mounts_volume_mount_uuid ON FogVolumeMounts (volume_mount_uuid); + +ALTER TABLE Fogs ADD COLUMN active_volume_mounts BIGINT DEFAULT 0; +ALTER TABLE Fogs ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; + +ALTER TABLE ChangeTrackings ADD COLUMN volume_mounts BOOLEAN DEFAULT false; +ALTER TABLE ChangeTrackings ADD COLUMN exec_sessions BOOLEAN DEFAULT false; + +ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE Services ADD COLUMN provisioning_error TEXT; + +ALTER TABLE Fogs ADD COLUMN warning_message TEXT; +ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN gps_scan_frequency INT DEFAULT 60; +ALTER TABLE Fogs ADD COLUMN edge_guard_frequency INT DEFAULT 0; + +ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; + +ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_id TEXT; + + COMMIT; \ No newline at end of file diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql index 29faea83..2a0c682a 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS "Fogs" ( longitude DOUBLE PRECISION, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'UNKNOWN', + daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage DOUBLE PRECISION DEFAULT 0.000, @@ -110,8 +110,8 @@ CREATE TABLE IF NOT EXISTS "Fogs" ( change_frequency INT DEFAULT 20, device_scan_frequency INT DEFAULT 20, tunnel VARCHAR(255) DEFAULT '', - isolated_docker_container BOOLEAN DEFAULT TRUE, - docker_pruning_freq INT DEFAULT 1, + isolated_docker_container BOOLEAN DEFAULT FALSE, + docker_pruning_freq INT DEFAULT 0, available_disk_threshold DOUBLE PRECISION DEFAULT 20, log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, @@ -566,14 +566,6 @@ ADD COLUMN run_as_user TEXT DEFAULT NULL, ADD COLUMN platform TEXT DEFAULT NULL, ADD COLUMN runtime TEXT DEFAULT NULL; -ALTER TABLE "Routers" -ADD COLUMN require_ssl TEXT, -ADD COLUMN ssl_profile TEXT, -ADD COLUMN sasl_mechanisms TEXT, -ADD COLUMN authenticate_peer TEXT, -ADD COLUMN ca_cert TEXT, -ADD COLUMN tls_cert TEXT, -ADD COLUMN tls_key TEXT; CREATE TABLE IF NOT EXISTS "MicroservicePubTags" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, @@ -686,7 +678,9 @@ CREATE TABLE IF NOT EXISTS "Services" ( resource TEXT NOT NULL, target_port INTEGER NOT NULL, service_port INTEGER, + k8s_type TEXT, bridge_port INTEGER, + default_bridge TEXT, service_endpoint TEXT, created_at TIMESTAMP(0), updated_at TIMESTAMP(0) @@ -708,10 +702,73 @@ CREATE TABLE IF NOT EXISTS "ServiceTags" ( CREATE INDEX idx_service_tags_service_id ON "ServiceTags" (service_id); CREATE INDEX idx_service_tags_tag_id ON "ServiceTags" (tag_id); -ALTER TABLE "Routers" DROP COLUMN IF EXISTS require_ssl; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS ssl_profile; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS sasl_mechanisms; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS authenticate_peer; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS ca_cert; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS tls_cert; -ALTER TABLE "Routers" DROP COLUMN IF EXISTS tls_key; \ No newline at end of file + +ALTER TABLE "Fogs" ADD COLUMN container_engine VARCHAR(32); +ALTER TABLE "Fogs" ADD COLUMN deployment_type VARCHAR(32); + +ALTER TABLE "MicroserviceExtraHost" DROP COLUMN IF EXISTS public_port; +ALTER TABLE "MicroservicePorts" DROP COLUMN IF EXISTS is_public; +ALTER TABLE "MicroservicePorts" DROP COLUMN IF EXISTS is_proxy; + +DROP TABLE IF EXISTS "MicroservicePublicPorts"; + +ALTER TABLE "MicroserviceEnvs" ADD COLUMN value_from_secret TEXT; +ALTER TABLE "MicroserviceEnvs" ADD COLUMN value_from_config_map TEXT; + +CREATE TABLE IF NOT EXISTS "ConfigMaps" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + immutable BOOLEAN DEFAULT false, + data TEXT NOT NULL, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0) +); + +CREATE INDEX idx_config_maps_name ON "ConfigMaps" (name); + +CREATE TABLE IF NOT EXISTS "VolumeMounts" ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + name VARCHAR(255) NOT NULL, + config_map_name VARCHAR(255), + secret_name VARCHAR(255), + version INT DEFAULT 1, + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (config_map_name) REFERENCES "ConfigMaps" (name) ON DELETE CASCADE, + FOREIGN KEY (secret_name) REFERENCES "Secrets" (name) ON DELETE CASCADE +); + +CREATE INDEX idx_volume_mounts_uuid ON "VolumeMounts" (uuid); +CREATE INDEX idx_volume_mounts_config_map_name ON "VolumeMounts" (config_map_name); +CREATE INDEX idx_volume_mounts_secret_name ON "VolumeMounts" (secret_name); + +CREATE TABLE IF NOT EXISTS "FogVolumeMounts" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + fog_uuid VARCHAR(32), + volume_mount_uuid VARCHAR(32), + FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, + FOREIGN KEY (volume_mount_uuid) REFERENCES "VolumeMounts" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_volume_mounts_fog_uuid ON "FogVolumeMounts" (fog_uuid); +CREATE INDEX idx_fog_volume_mounts_volume_mount_uuid ON "FogVolumeMounts" (volume_mount_uuid); + +ALTER TABLE "Fogs" ADD COLUMN active_volume_mounts BIGINT DEFAULT 0; +ALTER TABLE "Fogs" ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; + +ALTER TABLE "ChangeTrackings" ADD COLUMN volume_mounts BOOLEAN DEFAULT false; +ALTER TABLE "ChangeTrackings" ADD COLUMN exec_sessions BOOLEAN DEFAULT false; + +ALTER TABLE "Services" ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE "Services" ADD COLUMN provisioning_error TEXT; + +ALTER TABLE "Fogs" ADD COLUMN warning_message TEXT DEFAULT 'HEALTHY'; +ALTER TABLE "Fogs" ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE "Fogs" ADD COLUMN gps_scan_frequency INT DEFAULT 60; +ALTER TABLE "Fogs" ADD COLUMN edge_guard_frequency INT DEFAULT 0; + +ALTER TABLE "Microservices" ADD COLUMN pid_mode VARCHAR(32); +ALTER TABLE "Microservices" ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE "Microservices" ADD COLUMN exec_enabled BOOLEAN DEFAULT false; + +ALTER TABLE "MicroserviceStatuses" ADD COLUMN exec_session_id TEXT; diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql index 7a8a825d..42e98b3b 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( longitude FLOAT, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'UNKNOWN', + daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage FLOAT DEFAULT 0.000, @@ -110,8 +110,8 @@ CREATE TABLE IF NOT EXISTS Fogs ( change_frequency INT DEFAULT 20, device_scan_frequency INT DEFAULT 20, tunnel VARCHAR(255) DEFAULT '', - isolated_docker_container BOOLEAN DEFAULT TRUE, - docker_pruning_freq INT DEFAULT 1, + isolated_docker_container BOOLEAN DEFAULT FALSE, + docker_pruning_freq INT DEFAULT 0, available_disk_threshold FLOAT DEFAULT 20, log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, @@ -268,7 +268,6 @@ CREATE TABLE IF NOT EXISTS MicroserviceExtraHost ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, template_type TEXT, name TEXT, - public_port INT, template TEXT, `value` TEXT, microservice_uuid VARCHAR(32), @@ -288,8 +287,6 @@ CREATE TABLE IF NOT EXISTS MicroservicePorts ( port_internal INT, port_external INT, is_udp BOOLEAN, - is_public BOOLEAN, - is_proxy BOOLEAN, created_at DATETIME, updated_at DATETIME, microservice_uuid VARCHAR(32), @@ -673,10 +670,12 @@ CREATE TABLE IF NOT EXISTS Services ( resource TEXT NOT NULL, target_port INTEGER NOT NULL, service_port INTEGER, + k8s_type TEXT, bridge_port INTEGER, + default_bridge TEXT, service_endpoint TEXT, created_at DATETIME, - updated_at DATETIME, + updated_at DATETIME ); CREATE INDEX idx_services_id ON Services (id); @@ -695,3 +694,68 @@ CREATE TABLE IF NOT EXISTS ServiceTags ( CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); +ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(32); + +DROP TABLE IF EXISTS MicroservicePublicPorts; + +ALTER TABLE MicroserviceEnvs ADD COLUMN value_from_secret TEXT; +ALTER TABLE MicroserviceEnvs ADD COLUMN value_from_config_map TEXT; + +CREATE TABLE IF NOT EXISTS ConfigMaps ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) UNIQUE NOT NULL, + immutable BOOLEAN DEFAULT false, + data TEXT NOT NULL, + created_at DATETIME, + updated_at DATETIME +); + +CREATE INDEX idx_config_maps_name ON ConfigMaps (name); + +CREATE TABLE IF NOT EXISTS VolumeMounts ( + uuid VARCHAR(32) PRIMARY KEY NOT NULL, + name VARCHAR(255) NOT NULL, + config_map_name VARCHAR(255), + secret_name VARCHAR(255), + version INTEGER DEFAULT 1, + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (config_map_name) REFERENCES ConfigMaps (name) ON DELETE CASCADE, + FOREIGN KEY (secret_name) REFERENCES Secrets (name) ON DELETE CASCADE +); + +CREATE INDEX idx_volume_mounts_uuid ON VolumeMounts (uuid); +CREATE INDEX idx_volume_mounts_config_map_name ON VolumeMounts (config_map_name); +CREATE INDEX idx_volume_mounts_secret_name ON VolumeMounts (secret_name); + +CREATE TABLE IF NOT EXISTS FogVolumeMounts ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fog_uuid VARCHAR(32), + volume_mount_uuid VARCHAR(32), + FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, + FOREIGN KEY (volume_mount_uuid) REFERENCES VolumeMounts (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_fog_volume_mounts_fog_uuid ON FogVolumeMounts (fog_uuid); +CREATE INDEX idx_fog_volume_mounts_volume_mount_uuid ON FogVolumeMounts (volume_mount_uuid); + +ALTER TABLE Fogs ADD COLUMN active_volume_mounts BIGINT DEFAULT 0; +ALTER TABLE Fogs ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; + +ALTER TABLE ChangeTrackings ADD COLUMN volume_mounts BOOLEAN DEFAULT false; +ALTER TABLE ChangeTrackings ADD COLUMN exec_sessions BOOLEAN DEFAULT false; + +ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE Services ADD COLUMN provisioning_error TEXT; + +ALTER TABLE Fogs ADD COLUMN warning_message TEXT DEFAULT 'HEALTHY'; +ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN gps_scan_frequency INT DEFAULT 60; +ALTER TABLE Fogs ADD COLUMN edge_guard_frequency INT DEFAULT 0; + +ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; + +ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_id TEXT; \ No newline at end of file diff --git a/src/data/models/changetracking.js b/src/data/models/changetracking.js index ee4d34d9..a3932eb9 100644 --- a/src/data/models/changetracking.js +++ b/src/data/models/changetracking.js @@ -91,6 +91,16 @@ module.exports = (sequelize, DataTypes) => { field: 'linked_edge_resources', defaultValue: false }, + volumeMounts: { + type: DataTypes.BOOLEAN, + field: 'volume_mounts', + defaultValue: false + }, + execSessions: { + type: DataTypes.BOOLEAN, + field: 'exec_sessions', + defaultValue: false + }, lastUpdated: { type: DataTypes.STRING, field: 'last_updated', diff --git a/src/data/models/configMap.js b/src/data/models/configMap.js new file mode 100644 index 00000000..6f0c6541 --- /dev/null +++ b/src/data/models/configMap.js @@ -0,0 +1,77 @@ +'use strict' + +const SecretHelper = require('../../helpers/secret-helper') + +module.exports = (sequelize, DataTypes) => { + const ConfigMap = sequelize.define('ConfigMap', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + name: { + type: DataTypes.TEXT, + allowNull: false, + field: 'name', + unique: true + }, + immutable: { + type: DataTypes.BOOLEAN, + allowNull: false, + field: 'immutable', + defaultValue: false + }, + data: { + type: DataTypes.TEXT, + allowNull: false, + field: 'data', + defaultValue: '{}', + get () { + const rawValue = this.getDataValue('data') + return rawValue ? JSON.parse(rawValue) : {} + }, + set (value) { + this.setDataValue('data', JSON.stringify(value)) + } + } + }, { + tableName: 'ConfigMaps', + timestamps: true, + underscored: true, + indexes: [ + { + unique: true, + fields: ['name'] + } + ], + hooks: { + beforeSave: async (configMap) => { + if (configMap.changed('data')) { + const encryptedData = await SecretHelper.encryptSecret( + configMap.data, + configMap.name + ) + configMap.data = encryptedData + } + }, + afterFind: async (configMap) => { + if (configMap && configMap.data) { + try { + const decryptedData = await SecretHelper.decryptSecret( + configMap.data, + configMap.name + ) + configMap.data = decryptedData + } catch (error) { + console.error('Error decrypting ConfigMap data:', error) + configMap.data = {} + } + } + } + } + }) + + return ConfigMap +} diff --git a/src/data/models/fog.js b/src/data/models/fog.js index 3b8a02c8..72d956e2 100644 --- a/src/data/models/fog.js +++ b/src/data/models/fog.js @@ -23,6 +23,15 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.TEXT, field: 'gps_mode' }, + gpsDevice: { + type: DataTypes.TEXT, + field: 'gps_device' + }, + gpsScanFrequency: { + type: DataTypes.INTEGER, + field: 'gps_scan_frequency', + defaultValue: 60 + }, latitude: { type: DataTypes.FLOAT, field: 'latitude' @@ -44,7 +53,7 @@ module.exports = (sequelize, DataTypes) => { }, daemonStatus: { type: DataTypes.TEXT, - defaultValue: 'UNKNOWN', + defaultValue: 'NOT_PROVISIONED', field: 'daemon_status' }, daemonOperatingDuration: { @@ -193,6 +202,18 @@ module.exports = (sequelize, DataTypes) => { defaultValue: 'unix:///var/run/docker.sock', field: 'docker_url' }, + containerEngine: { + type: DataTypes.ENUM('docker', 'podman'), + allowNull: false, + field: 'container_engine', + defaultValue: 'docker' + }, + deploymentType: { + type: DataTypes.ENUM('native', 'container'), + allowNull: false, + field: 'deployment_type', + defaultValue: 'native' + }, diskLimit: { type: DataTypes.FLOAT, defaultValue: 50, @@ -277,12 +298,17 @@ module.exports = (sequelize, DataTypes) => { }, watchdogEnabled: { type: DataTypes.BOOLEAN, - defaultValue: true, + defaultValue: false, field: 'isolated_docker_container' }, + edgeGuardFrequency: { + type: DataTypes.INTEGER, + defaultValue: 0, + field: 'edge_guard_frequency' + }, dockerPruningFrequency: { type: DataTypes.INTEGER, - defaultValue: 1, + defaultValue: 0, field: 'docker_pruning_freq' }, availableDiskThreshold: { @@ -308,6 +334,26 @@ module.exports = (sequelize, DataTypes) => { timeZone: { type: DataTypes.TEXT, field: 'time_zone' + }, + activeVolumeMounts: { + type: DataTypes.BIGINT, + defaultValue: 0, + get () { + return convertToInt(this.getDataValue('activeVolumeMounts'), 0) + }, + field: 'active_volume_mounts' + }, + volumeMountLastUpdate: { + type: DataTypes.BIGINT, + get () { + return convertToInt(this.getDataValue('volumeMountLastUpdate'), 0) + }, + field: 'volume_mount_last_update' + }, + warningMessage: { + type: DataTypes.TEXT, + field: 'warning_message', + defaultValue: 'HEALTHY' } }, { tableName: 'Fogs', @@ -324,10 +370,10 @@ module.exports = (sequelize, DataTypes) => { defaultValue: 0 }) - Fog.hasOne(models.FogAccessToken, { - foreignKey: 'iofog_uuid', - as: 'accessToken' - }) + // Fog.hasOne(models.FogAccessToken, { + // foreignKey: 'iofog_uuid', + // as: 'accessToken' + // }) Fog.hasOne(models.FogPublicKey, { foreignKey: 'iofog_uuid', @@ -351,6 +397,7 @@ module.exports = (sequelize, DataTypes) => { Fog.belongsToMany(models.Tags, { through: 'IofogTags', as: 'tags' }) Fog.belongsToMany(models.EdgeResource, { through: 'AgentEdgeResources', as: 'edgeResources' }) + Fog.belongsToMany(models.VolumeMount, { through: 'FogVolumeMounts', as: 'volumeMounts' }) } return Fog diff --git a/src/data/models/fogVolumeMounts.js b/src/data/models/fogVolumeMounts.js new file mode 100644 index 00000000..25566a44 --- /dev/null +++ b/src/data/models/fogVolumeMounts.js @@ -0,0 +1,9 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const FogVolumeMounts = sequelize.define('FogVolumeMounts', {}, { + tableName: 'FogVolumeMounts', + timestamps: false, + underscored: true + }) + return FogVolumeMounts +} diff --git a/src/data/models/fogaccesstoken.js b/src/data/models/fogaccesstoken.js deleted file mode 100644 index 9eeb2943..00000000 --- a/src/data/models/fogaccesstoken.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' - -const { convertToInt } = require('../../helpers/app-helper') - -module.exports = (sequelize, DataTypes) => { - const FogAccessToken = sequelize.define('FogAccessToken', { - id: { - type: DataTypes.INTEGER, - primaryKey: true, - autoIncrement: true, - allowNull: false, - field: 'id' - }, - expirationTime: { - type: DataTypes.BIGINT, - get () { - return convertToInt(this.getDataValue('expirationTime')) - }, - field: 'expiration_time' - }, - token: { - type: DataTypes.TEXT, - field: 'token' - } - }, { - tableName: 'FogAccessTokens', - timestamps: false, - underscored: true - }) - FogAccessToken.associate = function (models) { - FogAccessToken.belongsTo(models.Fog, { - foreignKey: { - name: 'iofogUuid', - field: 'iofog_uuid' - }, - as: 'iofog', - onDelete: 'cascade' - }) - } - return FogAccessToken -} diff --git a/src/data/models/index.js b/src/data/models/index.js index 30b833b7..483a3c8a 100644 --- a/src/data/models/index.js +++ b/src/data/models/index.js @@ -71,7 +71,6 @@ db.initDB = async (isStart) => { // Configure system images const fogTypes = await db.FogType.findAll({}) await configureImage(db, constants.ROUTER_CATALOG_NAME, fogTypes, config.get('systemImages.router', {})) - await configureImage(db, constants.PROXY_CATALOG_NAME, fogTypes, config.get('systemImages.proxy', {})) } } diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index bc6141cc..f414fa89 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -65,11 +65,26 @@ module.exports = (sequelize, DataTypes) => { field: 'log_size', defaultValue: 0 }, + pidMode: { + type: DataTypes.TEXT, + field: 'pid_mode', + defaultValue: '' + }, + ipcMode: { + type: DataTypes.TEXT, + field: 'ipc_mode', + defaultValue: '' + }, imageSnapshot: { type: DataTypes.TEXT, field: 'image_snapshot', defaultValue: '' }, + execEnabled: { + type: DataTypes.BOOLEAN, + field: 'exec_enabled', + defaultValue: false + }, delete: { type: DataTypes.BOOLEAN, field: 'delete', @@ -158,6 +173,11 @@ module.exports = (sequelize, DataTypes) => { as: 'env' }) + Microservice.hasMany(models.VolumeMount, { + foreignKey: 'microservice_uuid', + as: 'volumeMounts' + }) + Microservice.hasMany(models.MicroserviceArg, { foreignKey: 'microservice_uuid', as: 'cmd' diff --git a/src/data/models/microserviceExtraHost.js b/src/data/models/microserviceExtraHost.js index 676f62ac..2f747623 100644 --- a/src/data/models/microserviceExtraHost.js +++ b/src/data/models/microserviceExtraHost.js @@ -14,9 +14,6 @@ module.exports = (sequelize, DataTypes) => { name: { type: DataTypes.TEXT }, - publicPort: { - type: DataTypes.INTEGER - }, // Only if type is Apps template: { type: DataTypes.TEXT }, // Contains the template string diff --git a/src/data/models/microservicePublicPort.js b/src/data/models/microservicePublicPort.js deleted file mode 100644 index d1f9bc75..00000000 --- a/src/data/models/microservicePublicPort.js +++ /dev/null @@ -1,88 +0,0 @@ -'use strict' - -module.exports = (sequelize, DataTypes) => { - const MicroservicePublicPort = sequelize.define('MicroservicePublicPort', { - id: { - type: DataTypes.INTEGER, - primaryKey: true, - autoIncrement: true, - allowNull: false, - field: 'id' - }, - portId: { - type: DataTypes.INTEGER, - field: 'port_id' - }, - hostId: { - type: DataTypes.TEXT, - field: 'host_id' - }, - localProxyId: { - type: DataTypes.TEXT, - field: 'local_proxy_id' - }, - remoteProxyId: { - type: DataTypes.TEXT, - field: 'remote_proxy_id' - }, - publicPort: { - type: DataTypes.INTEGER, - field: 'public_port' - }, - queueName: { - type: DataTypes.TEXT, - field: 'queue_name' - }, - schemes: { - type: DataTypes.TEXT, // JSON stringified array of strings - field: 'schemes', - defaultValue: JSON.stringify(['https']) - }, - isTcp: { - type: DataTypes.BOOLEAN, - field: 'is_tcp', - defaultValue: false - }, - protocol: { - type: DataTypes.VIRTUAL, - get () { - return this.getDataValue('isTcp') ? 'tcp' : 'http' - } - } - }, { - tableName: 'MicroservicePublicPorts', - timestamps: true, - underscored: true - }) - MicroservicePublicPort.associate = function (models) { - MicroservicePublicPort.belongsTo(models.MicroservicePort, { - foreignKey: { - name: 'portId', - field: 'port_id' - }, - as: 'port', - onDelete: 'cascade' - }) - - MicroservicePublicPort.belongsTo(models.Fog, { - foreignKey: { - name: 'hostId', - field: 'host_id' - }, - as: 'host', - onDelete: 'cascade' - }) - - MicroservicePublicPort.hasOne(models.Microservice, { - foreignKey: 'uuid', - as: 'localProxy' - }) - - MicroservicePublicPort.hasOne(models.Microservice, { - foreignKey: 'uuid', - as: 'remoteProxy' - }) - } - - return MicroservicePublicPort -} diff --git a/src/data/models/microserviceenv.js b/src/data/models/microserviceenv.js index 837275f5..977aa554 100644 --- a/src/data/models/microserviceenv.js +++ b/src/data/models/microserviceenv.js @@ -15,6 +15,14 @@ module.exports = (sequelize, DataTypes) => { value: { type: DataTypes.TEXT, field: 'value' + }, + valueFromSecret: { + type: DataTypes.TEXT, + field: 'value_from_secret' + }, + valueFromConfigMap: { + type: DataTypes.TEXT, + field: 'value_from_config_map' } }, { tableName: 'MicroserviceEnvs', diff --git a/src/data/models/microserviceport.js b/src/data/models/microserviceport.js index a6cee0b2..6fbb67f8 100644 --- a/src/data/models/microserviceport.js +++ b/src/data/models/microserviceport.js @@ -19,14 +19,6 @@ module.exports = (sequelize, DataTypes) => { isUdp: { type: DataTypes.BOOLEAN, field: 'is_udp' - }, - isPublic: { - type: DataTypes.BOOLEAN, - field: 'is_public' - }, - isProxy: { - type: DataTypes.BOOLEAN, - field: 'is_proxy' } }, { tableName: 'MicroservicePorts', @@ -42,11 +34,6 @@ module.exports = (sequelize, DataTypes) => { as: 'microservice', onDelete: 'cascade' }) - - MicroservicePort.hasOne(models.MicroservicePublicPort, { - foreignKey: 'port_id', - as: 'publicPort' - }) } return MicroservicePort } diff --git a/src/data/models/microservicestatus.js b/src/data/models/microservicestatus.js index 136fda26..fde87048 100644 --- a/src/data/models/microservicestatus.js +++ b/src/data/models/microservicestatus.js @@ -65,6 +65,11 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.TEXT, defaultValue: '', field: 'ip_address' + }, + execSessionId: { + type: DataTypes.TEXT, + defaultValue: '', + field: 'exec_session_id' } }, { tableName: 'MicroserviceStatuses', diff --git a/src/data/models/service.js b/src/data/models/service.js new file mode 100644 index 00000000..86570b49 --- /dev/null +++ b/src/data/models/service.js @@ -0,0 +1,93 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const Service = sequelize.define('Service', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + name: { + type: DataTypes.TEXT, + allowNull: false, + field: 'name', + unique: true, + index: true + }, + type: { + type: DataTypes.ENUM('microservice', 'k8s', 'agent', 'external'), + allowNull: false, + field: 'type' + }, + resource: { + type: DataTypes.TEXT, + allowNull: false + }, + targetPort: { + type: DataTypes.INTEGER, + allowNull: false, + field: 'target_port' + }, + // protocol: { + // type: DataTypes.ENUM('tcp', 'http'), + // defaultValue: 'tcp', + // allowNull: false + // }, + servicePort: { + type: DataTypes.INTEGER, + allowNull: true, + field: 'service_port' + }, + k8sType: { + type: DataTypes.ENUM('LoadBalancer', 'ClusterIP', 'NodePort'), + allowNull: true, + field: 'k8s_type' + }, + bridgePort: { + type: DataTypes.INTEGER, + allowNull: true, + field: 'bridge_port' + }, + defaultBridge: { + type: DataTypes.TEXT, + allowNull: false, + field: 'default_bridge', + defaultValue: 'default-router' + }, + serviceEndpoint: { + type: DataTypes.TEXT, + field: 'service_endpoint' + }, + provisioningStatus: { + type: DataTypes.ENUM('pending', 'ready', 'failed'), + allowNull: false, + field: 'provisioning_status', + defaultValue: 'pending' + }, + provisioningError: { + type: DataTypes.TEXT, + field: 'provisioning_error' + } + }, { + tableName: 'Services', + timestamps: true, + underscored: true + }) + + Service.associate = function (models) { + Service.belongsTo(models.Microservice, { + foreignKey: { + name: 'resource', + field: 'resource' + }, + as: 'microservice', + // We don't want to enforce this constraint since resource could be various types + constraints: false + }) + // Relationship with tags + Service.belongsToMany(models.Tags, { as: 'tags', through: 'ServiceTags' }) + } + + return Service +} diff --git a/src/data/models/serviceTags.js b/src/data/models/serviceTags.js new file mode 100644 index 00000000..6019d6ea --- /dev/null +++ b/src/data/models/serviceTags.js @@ -0,0 +1,9 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const ServiceTags = sequelize.define('ServiceTags', {}, { + tableName: 'ServiceTags', + timestamps: false, + underscored: true + }) + return ServiceTags +} diff --git a/src/data/models/tags.js b/src/data/models/tags.js index 356cf462..9c795772 100644 --- a/src/data/models/tags.js +++ b/src/data/models/tags.js @@ -24,6 +24,7 @@ module.exports = (sequelize, DataTypes) => { Tags.belongsToMany(models.EdgeResource, { through: 'EdgeResourceOrchestrationTags', as: 'edgeResources' }) Tags.belongsToMany(models.Microservice, { through: 'MicroservicePubTags', as: 'pubMicroservices' }) Tags.belongsToMany(models.Microservice, { through: 'MicroserviceSubTags', as: 'subMicroservices' }) + Tags.belongsToMany(models.Service, { through: 'ServiceTags', as: 'services' }) } return Tags } diff --git a/src/data/models/volumeMount.js b/src/data/models/volumeMount.js new file mode 100644 index 00000000..0bdf2f2f --- /dev/null +++ b/src/data/models/volumeMount.js @@ -0,0 +1,43 @@ +'use strict' + +module.exports = (sequelize, DataTypes) => { + const VolumeMount = sequelize.define('VolumeMount', { + uuid: { + type: DataTypes.STRING(32), + primaryKey: true, + allowNull: false, + field: 'uuid' + }, + name: { + type: DataTypes.STRING, + allowNull: false, + field: 'name' + }, + configMapName: { + type: DataTypes.STRING, + allowNull: true, + field: 'config_map_name' + }, + secretName: { + type: DataTypes.STRING, + allowNull: true, + field: 'secret_name' + }, + version: { + type: DataTypes.INTEGER, + allowNull: false, + defaultValue: 1, + field: 'version' + } + }, { + tableName: 'VolumeMounts', + timestamps: true, + underscored: true + }) + + VolumeMount.associate = function (models) { + VolumeMount.belongsToMany(models.Fog, { through: 'FogVolumeMounts', as: 'fogs' }) + } + + return VolumeMount +} diff --git a/src/data/providers/database-provider.js b/src/data/providers/database-provider.js index df1ef454..2035eb1d 100644 --- a/src/data/providers/database-provider.js +++ b/src/data/providers/database-provider.js @@ -352,10 +352,19 @@ class DatabaseProvider { try { await db.query(query) } catch (err) { - if (err.code === 'ER_TABLE_EXISTS_ERROR' || - err.code === 'ER_DUP_FIELDNAME' || - err.code === 'ER_DUP_KEYNAME') { - logger.warn(`Ignored MySQL error: ${err.message}`) + // Check both the error and its parent (for Sequelize errors) + const errorToCheck = err.parent || err + if (errorToCheck.code === 'ER_TABLE_EXISTS_ERROR' || + errorToCheck.code === 'ER_DUP_FIELDNAME' || + errorToCheck.code === 'ER_DUP_KEYNAME' || + errorToCheck.code === 'ER_BLOB_KEY_WITHOUT_LENGTH' || + errorToCheck.code === 'ER_CANT_DROP_FIELD_OR_KEY' || + errorToCheck.code === 'duplicate_key' || + errorToCheck.code === 'already_exists' || + errorToCheck.errno === 1091 || + errorToCheck.errno === 1061 || + errorToCheck.errno === 1170) { + logger.warn(`Ignored MySQL error: ${errorToCheck.message}`) } else { await db.query('ROLLBACK') throw err @@ -404,10 +413,33 @@ class DatabaseProvider { try { await db.query(query) } catch (err) { - if (err.code === '42P07' || // duplicate_table - err.code === '42701' || // duplicate_column - err.code === '42P06') { // duplicate_schema - logger.warn(`Ignored PostgreSQL error: ${err.message}`) + // Check both the error and its parent (for Sequelize errors) + const errorToCheck = err.parent || err + + // If transaction is aborted, rollback and start new transaction + if (errorToCheck.code === '25P02') { + logger.warn('Transaction aborted, rolling back and starting new transaction...') + await db.query('ROLLBACK') + await db.query('BEGIN') + continue + } + + if (errorToCheck.code === '42P07' || // duplicate_table + errorToCheck.code === '42701' || // duplicate_column + errorToCheck.code === '42P06' || // duplicate_schema + errorToCheck.code === '23505' || // unique_violation + errorToCheck.code === '23503' || // foreign_key_violation + errorToCheck.code === '42P01' || // undefined_table + errorToCheck.code === '42703' || // undefined_column + errorToCheck.code === '42P16' || // invalid_table_definition + errorToCheck.code === '42P17' || // invalid_table_definition + errorToCheck.code === '42P18' || // invalid_table_definition + (errorToCheck.message && ( + errorToCheck.message.includes('already exists') || + errorToCheck.message.includes('duplicate key') || + errorToCheck.message.includes('relation') + ))) { + logger.warn(`Ignored PostgreSQL error: ${errorToCheck.message}`) } else { await db.query('ROLLBACK') throw err diff --git a/src/data/providers/mysql.js b/src/data/providers/mysql.js index 4225a204..8cf6e9cc 100644 --- a/src/data/providers/mysql.js +++ b/src/data/providers/mysql.js @@ -45,7 +45,10 @@ class MySqlDatabaseProvider extends DatabaseProvider { const connection = await mysql.createConnection(this.connectionOptions) await connection.end() } catch (err) { - if (err.code === 'ER_BAD_DB_ERROR') { + // Check both the error and its parent (for Sequelize errors) + const errorToCheck = err.parent || err + + if (errorToCheck.code === 'ER_BAD_DB_ERROR') { // Database doesn't exist, try to create it logger.info('Database does not exist, attempting to create it...') const { database, ...connectionConfig } = this.connectionOptions @@ -56,6 +59,15 @@ class MySqlDatabaseProvider extends DatabaseProvider { } finally { await tempConnection.end() } + } else if (errorToCheck.code === 'ER_DUP_KEYNAME' || + errorToCheck.errno === 1061 || + (errorToCheck.message && ( + errorToCheck.message.includes('Error 1050') || // Table already exists + errorToCheck.message.includes('Error 1060') || // Duplicate column name + errorToCheck.message.includes('Error 1054') || // Unknown column + errorToCheck.message.includes('Error 1061') // Duplicate key name + ))) { + logger.info(`Ignoring known MySQL error: ${errorToCheck.message}`) } else { throw err } diff --git a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql index bfa21802..c9e84aa9 100644 --- a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql +++ b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql @@ -11,8 +11,7 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Proxy', 'The built-in proxy for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -35,9 +34,7 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/proxy:latest'), - (6, 2, 'ghcr.io/datasance/proxy:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql index aaa987d9..629a028c 100644 --- a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql +++ b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql @@ -11,8 +11,7 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Proxy', 'The built-in proxy for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO "FogTypes" (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -35,8 +34,6 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/proxy:latest'), - (6, 2, 'ghcr.io/datasance/proxy:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql index 9a2cdccd..3835917c 100644 --- a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql +++ b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql @@ -9,8 +9,7 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Proxy', 'The built-in proxy for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -33,6 +32,4 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/proxy:latest'), - (6, 2, 'ghcr.io/datasance/proxy:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'); diff --git a/src/enums/fog-state.js b/src/enums/fog-state.js index f33d5da4..8c3a4d09 100644 --- a/src/enums/fog-state.js +++ b/src/enums/fog-state.js @@ -13,7 +13,13 @@ const fogState = { UNKNOWN: 'UNKNOWN', - RUNNING: 'RUNNING' + RUNNING: 'RUNNING', + STOPPED: 'STOPPED', + WAITING: 'WAITING', + WARNING: 'WARNING', + DEPROVISIONED: 'DEPROVISIONED', + ERROR: 'ERROR', + NOT_PROVISIONED: 'NOT_PROVISIONED' } module.exports = fogState diff --git a/src/helpers/app-helper.js b/src/helpers/app-helper.js index 8a9a26bf..070b97e0 100644 --- a/src/helpers/app-helper.js +++ b/src/helpers/app-helper.js @@ -13,6 +13,7 @@ const crypto = require('crypto') const Errors = require('./errors') +const { v4: uuidv4 } = require('uuid') const logger = require('../logger') const fs = require('fs') @@ -60,6 +61,10 @@ function generateRandomString (size) { return randString } +function generateUUID () { + return uuidv4() +} + // Checks the status of a single port // returns 'closed' if port is available // returns 'open' if port is not available @@ -194,6 +199,7 @@ module.exports = { encryptText, decryptText, generateRandomString, + generateUUID, isFileExists, isValidPort, isValidDomain, diff --git a/src/helpers/error-messages.js b/src/helpers/error-messages.js index af41a2c5..1e373ab6 100644 --- a/src/helpers/error-messages.js +++ b/src/helpers/error-messages.js @@ -25,6 +25,7 @@ module.exports = { INVALID_IOFOG_UUID: 'Invalid ioFog UUID \'{}\'', INVALID_USER_EMAIL: 'Invalid user email', INVALID_MICROSERVICE_UUID: 'Invalid microservice UUID \'{}\'', + INVALID_MICROSERVICE_NAME: 'Invalid microservice NAME \'{}\'', INVALID_SOURCE_MICROSERVICE_UUID: 'Invalid source microservice UUID \'{}\'', INVALID_DEST_MICROSERVICE_UUID: 'Invalid destination microservice UUID \'{}\'', INVALID_SOURCE_MICROSERVICE_NAME: 'Invalid source microservice name \'{}\'', @@ -102,6 +103,7 @@ module.exports = { PORT_RESERVED: 'Port \'{}\' is reserved for internal use', INVALID_HOST_TEMPLATE: '{} is not a valid host template', NOT_FOUND_HOST_TEMPLATE: 'Could not find {} host template', + NOT_FOUND_APPS_TEMPLATE: 'The microservice you would like to add as an extra host is not in the same fog as the microservice you are adding it to', MISSING_IMAGE: 'Microservice {} does not have a valid image for its Agent type', DUPLICATE_RESOURCE_NAME_VERSION: 'Resource {} version {} already exists', NOT_FOUND_RESOURCE_NAME_VERSION: 'Could not find resource {} version {}', @@ -115,8 +117,8 @@ module.exports = { INVALID_MICROSERVICE_SUB_TAG: 'Invalid microservice Sub Tag \'{}\'', NOTFOUND_MICROSERVICE_PUB_TAG: 'No microservice found for Pub Tag \'{}\'', NOTFOUND_MICROSERVICE_SUB_TAG: 'No microservice found for Sub Tag \'{}\'', - SECRET_ALREADY_EXISTS: 'Secret with name "{0}" already exists', - SECRET_NOT_FOUND: 'Secret with name "{0}" not found', + SECRET_ALREADY_EXISTS: 'Secret with name "{}" already exists', + SECRET_NOT_FOUND: 'Secret with name "{}" not found', // Certificate related error messages CA_ALREADY_EXISTS: 'CA with name %s already exists', CA_NOT_FOUND: 'CA with name %s not found', @@ -125,5 +127,16 @@ module.exports = { INVALID_CERTIFICATE: 'Invalid certificate: %s', INVALID_CA: 'Invalid CA: %s', NOT_KUBERNETES_ENV: 'Controller is not running in Kubernetes environment', - K8S_SECRET_NOT_ALLOWED: 'Kubernetes secret type is not allowed in non-Kubernetes environment' + K8S_SECRET_NOT_ALLOWED: 'Kubernetes secret type is not allowed in non-Kubernetes environment', + INVALID_DEFAULT_BRIDGE: 'Invalid default bridge, If service type is not microservice, defaultBridge must be default-router\'{}\'', + INVALID_ROUTER_CONNECTION: 'Invalid router connection, router {} is not connected to {}', + NO_AVAILABLE_BRIDGE_PORT: 'No bridge port available in range for {}', + CONFIGMAP_ALREADY_EXISTS: 'ConfigMap with name {} already exists', + CONFIGMAP_NOT_FOUND: 'ConfigMap with name {} not found', + INVALID_SECRET_REFERENCE: 'Invalid secret reference: {}', + INVALID_CONFIGMAP_REFERENCE: 'Invalid configmap reference: {}', + SECRET_KEY_NOT_FOUND: 'Secret key {} not found in secret {}', + CONFIGMAP_KEY_NOT_FOUND: 'Configmap key {} not found in configmap {}', + CONFIGMAP_IMMUTABLE: 'Configmap {} is immutable and cannot be updated. If you want to update it, please delete it and create a new configmap.', + VOLUME_MOUNT_NOT_FOUND: 'Volume mount with name {} not found' } diff --git a/src/helpers/template-helper.js b/src/helpers/template-helper.js index 9ecdcf9d..a75ba093 100755 --- a/src/helpers/template-helper.js +++ b/src/helpers/template-helper.js @@ -138,7 +138,7 @@ const rvaluesVarSubstition = async (subjects, templateContext) => { let context = templateContext // Due to the recursive nature of this function, user will only be defined on the first iteration context = { - ...templateContext, + ...templateContext // Private context // _user: user // need by edge resource and every on demand request } @@ -169,7 +169,7 @@ const substitutionMiddleware = async (req, res, next) => { if (['POST', 'PUT', 'PATCH'].indexOf(req.method) > -1) { // let user let tmplContext = { - self: req.body, + self: req.body // Private context // _user: user // need by edge resource and every on demand request } diff --git a/src/jobs/fog-status-job.js b/src/jobs/fog-status-job.js index e0737080..8842d118 100644 --- a/src/jobs/fog-status-job.js +++ b/src/jobs/fog-status-job.js @@ -42,7 +42,10 @@ async function updateFogsConnectionStatus (transaction) { async function _updateFogStatus (transaction) { const statusUpdateTolerance = Config.get('settings.fogStatusUpdateTolerance') - const fogs = await FogManager.findAll({ daemonStatus: FogStates.RUNNING }, transaction) + const fogs = [ + ...await FogManager.findAll({ daemonStatus: FogStates.RUNNING }, transaction), + ...await FogManager.findAll({ daemonStatus: FogStates.WARNING }, transaction) + ] const unknownFogUuids = fogs .filter((fog) => { const statusUpdateToleranceMs = fog.statusFrequency * 1000 * statusUpdateTolerance diff --git a/src/routes/agent.js b/src/routes/agent.js index f43836fb..9196b4e5 100644 --- a/src/routes/agent.js +++ b/src/routes/agent.js @@ -245,6 +245,31 @@ module.exports = [ logger.apiRes({ req: req, res: res, responseObject: responseObject }) } }, + { + method: 'get', + path: '/api/v3/agent/volumeMounts', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + const getAgentLinkedVolumeMountsEndpoint = ResponseDecorator.handleErrors(AgentController.getAgentLinkedVolumeMountsEndpoint, + successCode, errorCodes) + const responseObject = await getAgentLinkedVolumeMountsEndpoint(req) + + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, res: res, responseObject: responseObject }) + } + }, { method: 'get', path: '/api/v3/agent/microservices', diff --git a/src/routes/configMap.js b/src/routes/configMap.js new file mode 100644 index 00000000..4d648419 --- /dev/null +++ b/src/routes/configMap.js @@ -0,0 +1,246 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const constants = require('../helpers/constants') +const ConfigMapController = require('../controllers/config-map-controller') +const ResponseDecorator = require('../decorators/response-decorator') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const keycloak = require('../config/keycloak.js').initKeycloak() + +module.exports = [ + { + method: 'post', + path: '/api/v3/configmaps', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createConfigMapEndpoint = ResponseDecorator.handleErrors(ConfigMapController.createConfigMapEndpoint, successCode, errorCodes) + const responseObject = await createConfigMapEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/configmaps/yaml', + fileInput: 'configMap', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_CONFLICT, + errors: [Errors.ConflictError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createConfigMapFromYamlEndpoint = ResponseDecorator.handleErrors(ConfigMapController.createConfigMapFromYamlEndpoint, successCode, errorCodes) + const responseObject = await createConfigMapFromYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/configmaps/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateConfigMapEndpoint = ResponseDecorator.handleErrors(ConfigMapController.updateConfigMapEndpoint, successCode, errorCodes) + const responseObject = await updateConfigMapEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/configmaps/yaml/:name', + fileInput: 'configMap', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateConfigMapFromYamlEndpoint = ResponseDecorator.handleErrors(ConfigMapController.updateConfigMapFromYamlEndpoint, successCode, errorCodes) + const responseObject = await updateConfigMapFromYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/configmaps/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getConfigMapEndpoint = ResponseDecorator.handleErrors(ConfigMapController.getConfigMapEndpoint, successCode, errorCodes) + const responseObject = await getConfigMapEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/configmaps', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listConfigMapsEndpoint = ResponseDecorator.handleErrors(ConfigMapController.listConfigMapsEndpoint, successCode, errorCodes) + const responseObject = await listConfigMapsEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/configmaps/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteConfigMapEndpoint = ResponseDecorator.handleErrors(ConfigMapController.deleteConfigMapEndpoint, successCode, errorCodes) + const responseObject = await deleteConfigMapEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + } +] diff --git a/src/routes/microservices.js b/src/routes/microservices.js index 38bef8d6..7c1d1f02 100644 --- a/src/routes/microservices.js +++ b/src/routes/microservices.js @@ -18,36 +18,6 @@ const logger = require('../logger') const keycloak = require('../config/keycloak.js').initKeycloak() module.exports = [ - { - method: 'get', - path: '/api/v3/microservices/public-ports', - middleware: async (req, res) => { - logger.apiReq(req) - - const successCode = constants.HTTP_CODE_SUCCESS - const errorCodes = [ - { - code: constants.HTTP_CODE_UNAUTHORIZED, - errors: [Errors.AuthenticationError] - } - ] - - await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { - const listAllPublicPortsEndPoint = ResponseDecorator.handleErrors( - MicroservicesController.listAllPublicPortsEndPoint, - successCode, - errorCodes - ) - const responseObject = await listAllPublicPortsEndPoint(req) - const user = req.kauth.grant.access_token.content.preferred_username - res - .status(responseObject.code) - .send(responseObject.body) - - logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) - }) - } - }, { method: 'get', path: '/api/v3/microservices/', @@ -861,5 +831,157 @@ module.exports = [ logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } - } + }, + { + method: 'post', + path: '/api/v3/microservices/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createMicroserviceExecEndPoint = ResponseDecorator.handleErrors( + MicroservicesController.createMicroserviceExecEndPoint, + successCode, + errorCodes + ) + const responseObject = await createMicroserviceExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/microservices/system/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const createSystemMicroserviceExecEndPoint = ResponseDecorator.handleErrors( + MicroservicesController.createSystemMicroserviceExecEndPoint, + successCode, + errorCodes + ) + const responseObject = await createSystemMicroserviceExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/microservices/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteMicroserviceExecEndPoint = ResponseDecorator.handleErrors( + MicroservicesController.deleteMicroserviceExecEndPoint, + successCode, + errorCodes + ) + const responseObject = await deleteMicroserviceExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/microservices/system/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const deleteSystemMicroserviceExecEndPoint = ResponseDecorator.handleErrors( + MicroservicesController.deleteSystemMicroserviceExecEndPoint, + successCode, + errorCodes + ) + const responseObject = await deleteSystemMicroserviceExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, ] diff --git a/src/routes/secret.js b/src/routes/secret.js index 54289adf..b6c6f55a 100644 --- a/src/routes/secret.js +++ b/src/routes/secret.js @@ -89,7 +89,7 @@ module.exports = [ } }, { - method: 'put', + method: 'patch', path: '/api/v3/secrets/:name', middleware: async (req, res) => { logger.apiReq(req) @@ -123,7 +123,7 @@ module.exports = [ } }, { - method: 'put', + method: 'patch', path: '/api/v3/secrets/yaml/:name', fileInput: 'secret', middleware: async (req, res) => { diff --git a/src/routes/service.js b/src/routes/service.js new file mode 100644 index 00000000..e3f198ba --- /dev/null +++ b/src/routes/service.js @@ -0,0 +1,274 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const constants = require('../helpers/constants') +const ServiceController = require('../controllers/service-controller') +const ResponseDecorator = require('../decorators/response-decorator') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const keycloak = require('../config/keycloak.js').initKeycloak() + +module.exports = [ + { + method: 'get', + path: '/api/v3/services', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const listServicesEndpoint = ResponseDecorator.handleErrors( + ServiceController.listServicesEndpoint, + successCode, + errorCodes + ) + const responseObject = await listServicesEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/services/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getServiceEndpoint = ResponseDecorator.handleErrors( + ServiceController.getServiceEndpoint, + successCode, + errorCodes + ) + const responseObject = await getServiceEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/services', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.DuplicatePropertyError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createServiceEndpoint = ResponseDecorator.handleErrors( + ServiceController.createServiceEndpoint, + successCode, + errorCodes + ) + const responseObject = await createServiceEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/services/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateServiceEndpoint = ResponseDecorator.handleErrors( + ServiceController.updateServiceEndpoint, + successCode, + errorCodes + ) + const responseObject = await updateServiceEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/services/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const deleteServiceEndpoint = ResponseDecorator.handleErrors( + ServiceController.deleteServiceEndpoint, + successCode, + errorCodes + ) + const responseObject = await deleteServiceEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/services/yaml', + fileInput: 'service', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_CREATED + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.DuplicatePropertyError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const createServiceYAMLEndpoint = ResponseDecorator.handleErrors( + ServiceController.createServiceYAMLEndpoint, + successCode, + errorCodes + ) + const responseObject = await createServiceYAMLEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/services/yaml/:name', + fileInput: 'service', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const updateServiceYAMLEndpoint = ResponseDecorator.handleErrors( + ServiceController.updateServiceYAMLEndpoint, + successCode, + errorCodes + ) + const responseObject = await updateServiceYAMLEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + } +] diff --git a/src/routes/volumeMount.js b/src/routes/volumeMount.js new file mode 100644 index 00000000..05c20b50 --- /dev/null +++ b/src/routes/volumeMount.js @@ -0,0 +1,312 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ +const constants = require('../helpers/constants') +const VolumeMountController = require('../controllers/volume-mount-controller') +const ResponseDecorator = require('../decorators/response-decorator') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const keycloak = require('../config/keycloak.js').initKeycloak() + +module.exports = [ + { + method: 'get', + path: '/api/v3/volumeMounts', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getVolumeMountsEndpoint = ResponseDecorator.handleErrors(VolumeMountController.listVolumeMountsEndpoint, successCode, errorCodes) + const responseObject = await getVolumeMountsEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'get', + path: '/api/v3/volumeMounts/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.getVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await getVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/volumeMounts/:name', + supportSubstitution: true, + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE'])(req, res, async () => { + const updateVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.updateVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await updateVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/volumeMounts/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_ACCEPTED + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE'])(req, res, async () => { + const deleteVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.deleteVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await deleteVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/volumeMounts', + supportSubstitution: true, + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE'])(req, res, async () => { + const createVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.createVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await createVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/volumeMounts/yaml', + fileInput: 'volumeMount', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE'])(req, res, async () => { + const createVolumeMountYamlEndpoint = ResponseDecorator.handleErrors(VolumeMountController.createVolumeMountYamlEndpoint, successCode, errorCodes) + const responseObject = await createVolumeMountYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'patch', + path: '/api/v3/volumeMounts/yaml/:name', + fileInput: 'volumeMount', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE'])(req, res, async () => { + const updateVolumeMountYamlEndpoint = ResponseDecorator.handleErrors(VolumeMountController.updateVolumeMountYamlEndpoint, successCode, errorCodes) + const responseObject = await updateVolumeMountYamlEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/volumeMounts/:name/link', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const linkVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.linkVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await linkVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/volumeMounts/:name/link', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const unlinkVolumeMountEndpoint = ResponseDecorator.handleErrors(VolumeMountController.unlinkVolumeMountEndpoint, successCode, errorCodes) + const responseObject = await unlinkVolumeMountEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + } +] diff --git a/src/schemas/agent.js b/src/schemas/agent.js index d14255a2..e42787db 100644 --- a/src/schemas/agent.js +++ b/src/schemas/agent.js @@ -55,6 +55,9 @@ const updateAgentConfig = { 'latitude': { 'type': 'number', 'minimum': -90, 'maximum': 90 }, 'longitude': { 'type': 'number', 'minimum': -180, 'maximum': 180 }, 'gpsMode': { 'type': 'string' }, + 'gpsDevice': { 'type': 'string' }, + 'gpsScanFrequency': { 'type': 'integer', 'minimum': 0 }, + 'edgeGuardFrequency': { 'type': 'integer', 'minimum': 0 }, 'dockerPruningFrequency': { 'type': 'integer', 'minimum': 0 }, 'availableDiskThreshold': { 'type': 'integer', 'minimum': 0 }, 'logLevel': { 'type': 'string' }, @@ -68,6 +71,7 @@ const updateAgentStatus = { 'type': 'object', 'properties': { 'daemonStatus': { 'type': 'string' }, + 'warningMessage': { 'type': 'string' }, 'daemonOperatingDuration': { 'type': 'integer', 'minimum': 0 }, 'daemonLastStart': { 'type': 'integer', 'minimum': 0 }, 'memoryUsage': { 'type': 'number', 'minimum': 0 }, @@ -92,6 +96,10 @@ const updateAgentStatus = { 'microserviceMessageCounts': { 'type': 'string' }, 'messageSpeed': { 'type': 'number', 'minimum': 0 }, 'lastCommandTime': { 'type': 'integer', 'minimum': 0 }, + 'gpsMode': { 'type': 'string' }, + 'gpsDevice': { 'type': 'string' }, + 'gpsScanFrequency': { 'type': 'integer', 'minimum': 0 }, + 'edgeGuardFrequency': { 'type': 'integer', 'minimum': 0 }, 'tunnelStatus': { 'type': 'string' }, 'version': { 'type': 'string' }, 'isReadyToUpgrade': { 'type': 'boolean' }, @@ -134,7 +142,10 @@ const microserviceStatus = { 'startTime': { 'type': 'integer' }, 'operatingDuration': { 'type': 'integer' }, 'cpuUsage': { 'type': 'number' }, - 'memoryUsage': { 'type': 'number' } + 'memoryUsage': { 'type': 'number' }, + 'ipAddress': { 'type': 'string' }, + 'ipAddressExternal': { 'type': 'string' }, + 'execSessionId': { 'type': 'string' } }, 'required': ['id'], 'additionalProperties': true diff --git a/src/schemas/certificate.js b/src/schemas/certificate.js index 28591cbd..07e53892 100644 --- a/src/schemas/certificate.js +++ b/src/schemas/certificate.js @@ -11,7 +11,7 @@ const caCreate = { }, secretName: { type: 'string' } }, - required: ['type'], + required: ['type', 'name'], additionalProperties: false, allOf: [ { @@ -43,9 +43,9 @@ const certificateCreate = { type: 'object', properties: { type: { type: 'string', enum: ['k8s-secret', 'direct', 'self-signed'] }, - secretName: { type: 'string' }, - cert: { type: 'string' }, - key: { type: 'string' } + secretName: { type: 'string' } + // cert: { type: 'string' }, + // key: { type: 'string' } }, required: ['type'] } @@ -141,4 +141,3 @@ module.exports = { ], innerSchemas: [] } - \ No newline at end of file diff --git a/src/schemas/config-map.js b/src/schemas/config-map.js new file mode 100644 index 00000000..bccfa961 --- /dev/null +++ b/src/schemas/config-map.js @@ -0,0 +1,66 @@ +const configMapCreate = { + id: '/configMapCreate', + type: 'object', + properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, + immutable: { type: 'boolean' }, + data: { type: 'object' } + }, + required: ['name', 'data'], + additionalProperties: false +} + +const configMapUpdate = { + id: '/configMapUpdate', + type: 'object', + properties: { + immutable: { type: 'boolean' }, + data: { type: 'object' } + }, + required: ['data'], + additionalProperties: false +} + +const configMapResponse = { + id: '/configMapResponse', + type: 'object', + properties: { + id: { type: 'integer' }, + name: { type: 'string' }, + immutable: { type: 'boolean' }, + data: { type: 'object' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['id', 'name', 'data', 'created_at', 'updated_at'], + additionalProperties: false +} + +const configMapListResponse = { + id: '/configMapListResponse', + type: 'object', + properties: { + configMaps: { + type: 'array', + items: { + type: 'object', + properties: { + id: { type: 'integer' }, + name: { type: 'string' }, + immutable: { type: 'boolean' }, + created_at: { type: 'string', format: 'date-time' }, + updated_at: { type: 'string', format: 'date-time' } + }, + required: ['id', 'name', 'created_at', 'updated_at'], + additionalProperties: false + } + } + }, + required: ['configMaps'], + additionalProperties: false +} + +module.exports = { + mainSchemas: [configMapCreate, configMapUpdate, configMapResponse, configMapListResponse], + innerSchemas: [] +} diff --git a/src/schemas/iofog.js b/src/schemas/iofog.js index 80ad9004..ca423c12 100644 --- a/src/schemas/iofog.js +++ b/src/schemas/iofog.js @@ -22,6 +22,8 @@ const iofogCreate = { 'description': { 'type': 'string' }, 'networkInterface': { 'type': 'string' }, 'dockerUrl': { 'type': 'string' }, + 'containerEngine': { 'type': 'string', 'enum': ['docker', 'podman'] }, + 'deploymentType': { 'type': 'string', 'enum': ['native', 'container'] }, 'diskLimit': { 'type': 'integer', 'minimum': 0 }, 'diskDirectory': { 'type': 'string' }, 'memoryLimit': { 'type': 'integer', 'minimum': 0 }, @@ -92,6 +94,8 @@ const iofogUpdate = { 'description': { 'type': 'string' }, 'networkInterface': { 'type': 'string' }, 'dockerUrl': { 'type': 'string' }, + 'containerEngine': { 'type': 'string', 'enum': ['docker', 'podman'] }, + 'deploymentType': { 'type': 'string', 'enum': ['native', 'container'] }, 'diskLimit': { 'type': 'integer', 'minimum': 0 }, 'diskDirectory': { 'type': 'string' }, 'memoryLimit': { 'type': 'integer', 'minimum': 0 }, diff --git a/src/schemas/microservice.js b/src/schemas/microservice.js index 386ad3f4..e3bed274 100644 --- a/src/schemas/microservice.js +++ b/src/schemas/microservice.js @@ -153,9 +153,22 @@ const env = { 'type': 'object', 'properties': { 'key': { 'type': 'string' }, - 'value': { 'type': 'string' } + 'value': { 'type': 'string' }, + 'valueFromSecret': { 'type': 'string' }, + 'valueFromConfigMap': { 'type': 'string' } }, - 'required': ['key', 'value'], + 'required': ['key'], + 'oneOf': [ + { + 'required': ['value'] + }, + { + 'required': ['valueFromSecret'] + }, + { + 'required': ['valueFromConfigMap'] + } + ], 'additionalProperties': true } @@ -176,43 +189,19 @@ const ports = { 'properties': { 'internal': { 'type': 'integer' }, 'external': { 'type': 'integer' }, - 'protocol': { 'enum': ['tcp', 'udp'] }, - 'public': { '$ref': '/publicPort' } + 'protocol': { 'enum': ['tcp', 'udp'] } }, 'required': ['internal', 'external'], 'additionalProperties': true } -const publicPort = { - 'id': '/publicPort', - type: 'object', - properties: { - enabled: { type: 'boolean' }, - schemes: { type: 'array', items: { type: 'string' } }, - protocol: { 'enum': ['tcp', 'http'] }, - router: { '$ref': '/publicPortRouter' } - }, - required: ['schemes', 'protocol'] -} - -const publicPortRouter = { - 'id': '/publicPortRouter', - type: 'object', - properties: { - host: { type: 'string' }, - port: { type: 'number' } - }, - required: [] -} - const portsCreate = { 'id': '/portsCreate', 'type': 'object', 'properties': { 'internal': { 'type': 'integer' }, 'external': { 'type': 'integer' }, - 'protocol': { 'enum': ['tcp', 'udp'] }, - 'public': { '$ref': '/publicPort' } + 'protocol': { 'enum': ['tcp', 'udp'] } }, 'required': ['internal', 'external'], 'additionalProperties': true @@ -232,6 +221,6 @@ const volumeMappings = { } module.exports = { - mainSchemas: [microserviceCreate, microserviceUpdate, env, ports, publicPort, publicPortRouter, extraHosts, portsCreate, microserviceDelete, volumeMappings], - innerSchemas: [volumeMappings, ports, publicPort, publicPortRouter, env, extraHosts, microserviceCreate] + mainSchemas: [microserviceCreate, microserviceUpdate, env, ports, extraHosts, portsCreate, microserviceDelete, volumeMappings], + innerSchemas: [volumeMappings, ports, env, extraHosts, microserviceCreate] } diff --git a/src/schemas/service.js b/src/schemas/service.js new file mode 100644 index 00000000..77066149 --- /dev/null +++ b/src/schemas/service.js @@ -0,0 +1,108 @@ +const { serviceNameRegex } = require('./utils/utils') + +const serviceCreate = { + id: '/serviceCreate', + type: 'object', + required: ['name', 'type', 'resource', 'targetPort'], + properties: { + name: { + type: 'string', + pattern: serviceNameRegex + }, + type: { + type: 'string', + enum: ['microservice', 'k8s', 'agent', 'external'] + }, + resource: { + type: 'string', + required: true + }, + targetPort: { + type: 'integer' + }, + defaultBridge: { + type: 'string' + }, + servicePort: { + type: 'integer' + }, + k8sType: { + type: 'string', + enum: ['LoadBalancer', 'ClusterIP', 'NodePort'] + }, + tags: { + type: 'array', + items: { '$ref': '/serviceTag' } + } + } + // allOf: [ + // { + // if: { + // properties: { type: { const: 'k8s' } } + // }, + // then: { + // required: ['servicePort', 'k8sType'] + // } + // } + // ] +} + +const serviceUpdate = { + id: '/serviceUpdate', + type: 'object', + required: ['name'], + properties: { + name: { + type: 'string', + pattern: serviceNameRegex + }, + type: { + type: 'string', + enum: ['microservice', 'k8s', 'agent', 'external'] + }, + resource: { + type: 'string' + }, + targetPort: { + type: 'integer' + }, + defaultBridge: { + type: 'string' + }, + servicePort: { + type: 'integer' + }, + k8sType: { + type: 'string', + enum: ['LoadBalancer', 'ClusterIP', 'NodePort'] + }, + tags: { + type: 'array', + items: { '$ref': '/serviceTag' } + } + } + // allOf: [ + // { + // if: { + // properties: { type: { const: 'k8s' } } + // }, + // then: { + // required: ['servicePort', 'k8sType'] + // } + // } + // ] +} + +const serviceTag = { + id: '/serviceTag', + type: 'string' +} + +module.exports = { + mainSchemas: [ + serviceCreate, + serviceUpdate, + serviceTag + ], + innerSchemas: [serviceTag] +} diff --git a/src/schemas/utils/utils.js b/src/schemas/utils/utils.js index 8f77b872..9fcd98e9 100644 --- a/src/schemas/utils/utils.js +++ b/src/schemas/utils/utils.js @@ -1,6 +1,7 @@ /* eslint-disable no-useless-escape */ module.exports = { nameRegex: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$', + serviceNameRegex: '^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', // Supports hex, rgb, and rgba colorRegex: '^(#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{8}))|(rgb\(\s*(?:(\d{1,3})\s*,?){3}\))|(rgba\(\s*(?:(\d{1,3})\s*,?){4}\))|$', // https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string diff --git a/src/schemas/volume-mount.js b/src/schemas/volume-mount.js new file mode 100644 index 00000000..7aabc0ed --- /dev/null +++ b/src/schemas/volume-mount.js @@ -0,0 +1,91 @@ +const { serviceNameRegex } = require('./utils/utils') + +const volumeMountCreate = { + 'id': '/volumeMountCreate', + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'pattern': serviceNameRegex + }, + 'secretName': { + 'type': 'string' + }, + 'configMapName': { + 'type': 'string' + } + }, + 'required': ['name'], + 'oneOf': [ + { + 'required': ['secretName'] + }, + { + 'required': ['configMapName'] + } + ], + 'additionalProperties': false +} + +const volumeMountUpdate = { + 'id': '/volumeMountUpdate', + 'type': 'object', + 'properties': { + 'name': { + 'type': 'string', + 'pattern': serviceNameRegex + }, + 'secretName': { + 'type': 'string' + }, + 'configMapName': { + 'type': 'string' + } + }, + 'oneOf': [ + { + 'required': ['secretName'] + }, + { + 'required': ['configMapName'] + } + ], + 'additionalProperties': false +} + +const volumeMountLink = { + 'id': '/volumeMountLink', + 'type': 'object', + 'properties': { + 'fogUuids': { + 'type': 'array', + 'items': { + 'type': 'string' + }, + 'minItems': 1 + } + }, + 'required': ['fogUuids'], + 'additionalProperties': false +} + +const volumeMountUnlink = { + 'id': '/volumeMountUnlink', + 'type': 'object', + 'properties': { + 'fogUuids': { + 'type': 'array', + 'items': { + 'type': 'string' + }, + 'minItems': 1 + } + }, + 'required': ['fogUuids'], + 'additionalProperties': false +} + +module.exports = { + mainSchemas: [volumeMountCreate, volumeMountUpdate, volumeMountLink, volumeMountUnlink], + innerSchemas: [volumeMountCreate, volumeMountUpdate] +} diff --git a/src/server.js b/src/server.js index 5a0b8039..2d180fba 100755 --- a/src/server.js +++ b/src/server.js @@ -186,14 +186,14 @@ initialize().then(() => { const viewerURL = process.env.VIEWER_URL || config.get('viewer.url') // File-based SSL configuration - const sslKey = process.env.SSL_KEY || config.get('server.ssl.path.key') - const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') - const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') + const sslKey = process.env.SSL_PATH_KEY || config.get('server.ssl.path.key') + const sslCert = process.env.SSL_PATH_CERT || config.get('server.ssl.path.cert') + const intermedKey = process.env.SSL_PATH_INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') // Base64 SSL configuration - const sslKeyBase64 = config.get('server.ssl.base64.key') - const sslCertBase64 = config.get('server.ssl.base64.cert') - const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') + const sslKeyBase64 = process.env.SSL_BASE64_KEY || config.get('server.ssl.base64.key') + const sslCertBase64 = process.env.SSL_BASE64_CERT || config.get('server.ssl.base64.cert') + const intermedKeyBase64 = process.env.SSL_BASE64_INTERMEDIATE_CERT || config.get('server.ssl.base64.intermediateCert') const hasFileBasedSSL = !devMode && sslKey && sslCert const hasBase64SSL = !devMode && sslKeyBase64 && sslCertBase64 diff --git a/src/services/agent-service.js b/src/services/agent-service.js index ec416cc9..3d0d3bd4 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -43,9 +43,10 @@ const RouterManager = require('../data/managers/router-manager') const EdgeResourceService = require('./edge-resource-service') const constants = require('../helpers/constants') const SecretManager = require('../data/managers/secret-manager') +const ConfigMapManager = require('../data/managers/config-map-manager') const IncomingForm = formidable.IncomingForm const CHANGE_TRACKING_DEFAULT = {} -const CHANGE_TRACKING_KEYS = ['config', 'version', 'reboot', 'deleteNode', 'microserviceList', 'microserviceConfig', 'routing', 'registries', 'tunnel', 'diagnostics', 'isImageSnapshot', 'prune', 'routerChanged', 'linkedEdgeResources'] +const CHANGE_TRACKING_KEYS = ['config', 'version', 'reboot', 'deleteNode', 'microserviceList', 'microserviceConfig', 'routing', 'registries', 'tunnel', 'diagnostics', 'isImageSnapshot', 'prune', 'routerChanged', 'linkedEdgeResources', 'volumeMounts', 'execSessions'] for (const key of CHANGE_TRACKING_KEYS) { CHANGE_TRACKING_DEFAULT[key] = false } @@ -112,10 +113,10 @@ const agentDeprovision = async function (deprovisionData, fog, transaction) { const _invalidateFogNode = async function (fog, transaction) { const where = { uuid: fog.uuid } - const data = { daemonStatus: FogStates.UNKNOWN, ipAddress: '0.0.0.0', ipAddressExternal: '0.0.0.0' } + const data = { daemonStatus: FogStates.DEPROVISIONED, ipAddress: '0.0.0.0', ipAddressExternal: '0.0.0.0' } await FogManager.update(where, data, transaction) const updatedFog = Object.assign({}, fog) - updatedFog.daemonStatus = FogStates.UNKNOWN + updatedFog.daemonStatus = FogStates.DEPROVISIONED updatedFog.ipAddress = '0.0.0.0' updatedFog.ipAddressExternal = '0.0.0.0' return updatedFog @@ -137,6 +138,10 @@ const getAgentConfig = async function (fog, transaction) { logLimit: fog.logLimit, logDirectory: fog.logDirectory, logFileCount: fog.logFileCount, + gpsMode: fog.gpsMode, + gpsDevice: fog.gpsDevice, + gpsScanFrequency: fog.gpsScanFrequency, + edgeGuardFrequency: fog.edgeGuardFrequency, statusFrequency: fog.statusFrequency, changeFrequency: fog.changeFrequency, deviceScanFrequency: fog.deviceScanFrequency, @@ -175,6 +180,9 @@ const updateAgentConfig = async function (updateData, fog, transaction) { latitude: updateData.latitude, longitude: updateData.longitude, gpsMode: updateData.gpsMode, + gpsDevice: updateData.gpsDevice, + gpsScanFrequency: updateData.gpsScanFrequency, + edgeGuardFrequency: updateData.edgeGuardFrequency, dockerPruningFrequency: updateData.dockerPruningFrequency, availableDiskThreshold: updateData.availableDiskThreshold, logLevel: updateData.logLevel, @@ -198,7 +206,6 @@ const getAgentConfigChanges = async function (ioFog, transaction) { } res.lastUpdated = changeTracking.lastUpdated } - return res } @@ -238,11 +245,31 @@ const updateAgentStatus = async function (agentStatus, fog, transaction) { tunnelStatus: agentStatus.tunnelStatus, version: agentStatus.version, isReadyToUpgrade: agentStatus.isReadyToUpgrade, - isReadyToRollback: agentStatus.isReadyToRollback + isReadyToRollback: agentStatus.isReadyToRollback, + activeVolumeMounts: agentStatus.activeVolumeMounts, + volumeMountLastUpdate: agentStatus.volumeMountLastUpdate } fogStatus = AppHelper.deleteUndefinedFields(fogStatus) + const existingFog = await FogManager.findOne({ + uuid: fog.uuid + }, transaction) + + if (!existingFog.warningMessage.includes('Background orchestration')) { + fogStatus.daemonStatus = agentStatus.daemonStatus + } else { + fogStatus.daemonStatus = FogStates.WARNING + } + + if (agentStatus.warningMessage.includes('HW signature changed') || agentStatus.warningMessage.includes('HW signature mismatch')) { + fogStatus.securityStatus = 'WARNING' + fogStatus.securityViolationInfo = 'HW signature mismatch' + } else { + fogStatus.securityStatus = 'OK' + fogStatus.securityViolationInfo = 'No violation' + } + await FogManager.update({ uuid: fog.uuid }, fogStatus, transaction) @@ -262,7 +289,8 @@ const _updateMicroserviceStatuses = async function (microserviceStatus, fog, tra memoryUsage: status.memoryUsage, percentage: status.percentage, errorMessage: status.errorMessage, - ipAddress: status.ipAddress + ipAddress: status.ipAddress, + execSessionId: status.execSessionId } microserviceStatus = AppHelper.deleteUndefinedFields(microserviceStatus) const microservice = await MicroserviceManager.findOne({ @@ -296,6 +324,7 @@ const getAgentMicroservices = async function (fog, transaction) { const routes = await MicroserviceService.getReceiverMicroservices(microservice, transaction) const isConsumer = await MicroserviceService.isMicroserviceConsumer(microservice, transaction) + const isRouter = await MicroserviceService.isMicroserviceRouter(microservice, transaction) const env = microservice.env && microservice.env.map((it) => { return { @@ -318,6 +347,8 @@ const getAgentMicroservices = async function (fog, transaction) { annotations: microservice.annotations, rebuild: microservice.rebuild, rootHostAccess: microservice.rootHostAccess, + pidMode: microservice.pidMode, + ipcMode: microservice.ipcMode, runAsUser: microservice.runAsUser, platform: microservice.platform, runtime: microservice.runtime, @@ -335,7 +366,9 @@ const getAgentMicroservices = async function (fog, transaction) { capAdd, capDrop, routes, - isConsumer + isConsumer, + isRouter, + execEnabled: microservice.execEnabled } response.push(responseMicroservice) @@ -628,6 +661,59 @@ const getControllerCA = async function (fog, transaction) { throw new Errors.ValidationError('No valid SSL certificate configuration found') } +const getAgentLinkedVolumeMounts = async function (fog, transaction) { + const volumeMounts = [] + const resourceAttributes = [ + 'uuid', + 'name', + 'version', + 'configMapName', + 'secretName' + ] + const resources = await fog.getVolumeMounts({ attributes: resourceAttributes }) + for (const resource of resources) { + const resourceObject = resource.toJSON() + let data = {} + + if (resourceObject.configMapName) { + // Handle ConfigMap + const configMap = await ConfigMapManager.getConfigMap(resourceObject.configMapName, transaction) + if (configMap) { + // For configmaps, we need to base64 encode all values + data = Object.entries(configMap.data).reduce((acc, [key, value]) => { + acc[key] = Buffer.from(value).toString('base64') + return acc + }, {}) + } + } else if (resourceObject.secretName) { + // Handle Secret + const secret = await SecretManager.getSecret(resourceObject.secretName, transaction) + if (secret) { + if (secret.type === 'tls') { + // For TLS secrets, values are already base64 encoded + data = secret.data + } else { + // For opaque secrets, we need to base64 encode all values + data = Object.entries(secret.data).reduce((acc, [key, value]) => { + acc[key] = Buffer.from(value).toString('base64') + return acc + }, {}) + } + } + } + + // Create final response object without configMapName and secretName + const responseObject = { + uuid: resourceObject.uuid, + name: resourceObject.name, + version: resourceObject.version, + data: data + } + volumeMounts.push(responseObject) + } + return volumeMounts +} + module.exports = { agentProvision: TransactionDecorator.generateTransaction(agentProvision), agentDeprovision: TransactionDecorator.generateTransaction(agentDeprovision), @@ -649,5 +735,6 @@ module.exports = { getImageSnapshot: TransactionDecorator.generateTransaction(getImageSnapshot), putImageSnapshot: TransactionDecorator.generateTransaction(putImageSnapshot), getAgentLinkedEdgeResources: TransactionDecorator.generateTransaction(getAgentLinkedEdgeResources), + getAgentLinkedVolumeMounts: TransactionDecorator.generateTransaction(getAgentLinkedVolumeMounts), getControllerCA: TransactionDecorator.generateTransaction(getControllerCA) } diff --git a/src/services/certificate-service.js b/src/services/certificate-service.js index 59d5f139..97bdc578 100644 --- a/src/services/certificate-service.js +++ b/src/services/certificate-service.js @@ -12,7 +12,8 @@ const forge = require('node-forge') // Helper function to check Kubernetes environment function checkKubernetesEnvironment () { - const isKubernetes = process.env.CONTROL_PLANE || config.get('app.ControlPlane') === 'kubernetes' + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + const isKubernetes = controlPlane && controlPlane.toLowerCase() === 'kubernetes' if (!isKubernetes) { throw new Errors.ValidationError(ErrorMessages.NOT_KUBERNETES_ENV) } @@ -113,7 +114,10 @@ async function createCAEndpoint (caData, transaction) { ca = await require('../utils/cert').getCAFromK8sSecret(caData.secretName) certDetails = parseCertificate(ca.certificate) // Store the CA locally with the same name as the secret - await storeCA({ cert: ca.certificate, key: ca.key }, caData.secretName) + const checkedSecret = await SecretManager.findOne({ name: caData.secretName || caData.name }, transaction) + if (!checkedSecret) { + await storeCA({ cert: ca.certificate, key: ca.key }, caData.secretName) + } } else if (caData.type === 'direct') { // Load from internal secret const caObj = await require('../utils/cert').loadCA(caData.secretName) @@ -126,16 +130,18 @@ async function createCAEndpoint (caData, transaction) { // Get the secret that was just created or referenced const secret = await SecretManager.findOne({ name: caData.secretName || caData.name }, transaction) - // Create certificate record in database - await CertificateManager.createCertificateRecord({ - name: caData.secretName || caData.name, // Use secretName if available, otherwise use provided name - subject: certDetails.subject, - isCA: true, - validFrom: certDetails.validFrom, - validTo: certDetails.validTo, - serialNumber: certDetails.serialNumber, - secretId: secret ? secret.id : null - }, transaction) + if (caData.type !== 'k8s-secret') { + // Create certificate record in database + await CertificateManager.createCertificateRecord({ + name: caData.secretName || caData.name, // Use secretName if available, otherwise use provided name + subject: certDetails.subject, + isCA: true, + validFrom: certDetails.validFrom, + validTo: certDetails.validTo, + serialNumber: certDetails.serialNumber, + secretId: secret ? secret.id : null + }, transaction) + } return { name: caData.secretName || caData.name, // Use secretName if available, otherwise use provided name @@ -160,16 +166,20 @@ async function getCAEndpoint (name, transaction) { throw new Errors.NotFoundError(`CA with name ${name} not found`) } + // Normalize line endings in the certificate and private key + const certificate = normalizeLineEndings(Buffer.from(secret.data['tls.crt'], 'base64').toString()) + const privateKey = normalizeLineEndings(Buffer.from(secret.data['tls.key'], 'base64').toString()) + return { name: certRecord.name, subject: certRecord.subject, - is_ca: certRecord.isCA, - valid_from: certRecord.validFrom, - valid_to: certRecord.validTo, - serial_number: certRecord.serialNumber, + isCA: certRecord.isCA, + validFrom: certRecord.validFrom, + validTo: certRecord.validTo, + serialNumber: certRecord.serialNumber, data: { - certificate: Buffer.from(secret.data['tls.crt'], 'base64').toString(), - private_key: Buffer.from(secret.data['tls.key'], 'base64').toString() + certificate, + privateKey: privateKey } } } @@ -360,21 +370,25 @@ async function getCertificateEndpoint (name, transaction) { ? certChain.slice(1).map(c => ({ name: c.name, subject: c.subject })) : [] + // Normalize line endings in the certificate and private key + const certificate = normalizeLineEndings(Buffer.from(secret.data['tls.crt'], 'base64').toString()) + const privateKey = normalizeLineEndings(Buffer.from(secret.data['tls.key'], 'base64').toString()) + return { name: certRecord.name, subject: certRecord.subject, hosts: certRecord.hosts, - is_ca: certRecord.isCA, - valid_from: certRecord.validFrom, - valid_to: certRecord.validTo, - serial_number: certRecord.serialNumber, - ca_name: certRecord.signingCA ? certRecord.signingCA.name : null, - certificate_chain: chainInfo, - days_remaining: certRecord.getDaysUntilExpiration(), - is_expired: certRecord.isExpired(), + isCA: certRecord.isCA, + validFrom: certRecord.validFrom, + validTo: certRecord.validTo, + serialNumber: certRecord.serialNumber, + caName: certRecord.signingCA ? certRecord.signingCA.name : null, + certificateChain: chainInfo, + daysRemaining: certRecord.getDaysUntilExpiration(), + isExpired: certRecord.isExpired(), data: { - certificate: Buffer.from(secret.data['tls.crt'], 'base64').toString(), - private_key: Buffer.from(secret.data['tls.key'], 'base64').toString() + certificate, + privateKey: privateKey } } } @@ -387,12 +401,12 @@ async function listCertificatesEndpoint (transaction) { name: cert.name, subject: cert.subject, hosts: cert.hosts, - is_ca: cert.isCA, - valid_from: cert.validFrom, - valid_to: cert.validTo, - days_remaining: cert.getDaysUntilExpiration(), - is_expired: cert.isExpired(), - ca_name: cert.signingCA ? cert.signingCA.name : null + isCA: cert.isCA, + validFrom: cert.validFrom, + validTo: cert.validTo, + daysRemaining: cert.getDaysUntilExpiration(), + isExpired: cert.isExpired(), + caName: cert.signingCA ? cert.signingCA.name : null })) } } @@ -591,6 +605,18 @@ async function listExpiringCertificatesEndpoint (days = 30, transaction) { } } +/** + * Normalizes line endings to Unix style (\n) + * Handles both \r\n and \n cases to ensure consistent output + * @param {string} str - String to normalize + * @returns {string} - String with normalized line endings + */ +function normalizeLineEndings (str) { + // First replace all \r\n with \n + // Then replace any remaining \r with \n + return str.replace(/\r\n/g, '\n').replace(/\r/g, '\n') +} + module.exports = { createCAEndpoint: TransactionDecorator.generateTransaction(createCAEndpoint), getCAEndpoint: TransactionDecorator.generateTransaction(getCAEndpoint), diff --git a/src/services/change-tracking-service.js b/src/services/change-tracking-service.js index 9fe0885c..d7e64aeb 100644 --- a/src/services/change-tracking-service.js +++ b/src/services/change-tracking-service.js @@ -29,7 +29,9 @@ const events = Object.freeze({ diagnostics: false, isImageSnapshot: false, prune: false, - routerChanged: false + routerChanged: false, + volumeMounts: false, + execSessions: false }, diagnostics: { diagnostics: true @@ -81,6 +83,12 @@ const events = Object.freeze({ }, prune: { prune: true + }, + volumeMounts: { + volumeMounts: true + }, + microserviceExecSessions: { + execSessions: true } }) diff --git a/src/services/config-map-service.js b/src/services/config-map-service.js new file mode 100644 index 00000000..1ccf0f0d --- /dev/null +++ b/src/services/config-map-service.js @@ -0,0 +1,127 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const TransactionDecorator = require('../decorators/transaction-decorator') +const ConfigMapManager = require('../data/managers/config-map-manager') +const AppHelper = require('../helpers/app-helper') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const Validator = require('../schemas/index') +const VolumeMountService = require('./volume-mount-service') +const VolumeMountingManager = require('../data/managers/volume-mounting-manager') + +async function createConfigMapEndpoint (configMapData, transaction) { + const validation = await Validator.validate(configMapData, Validator.schemas.configMapCreate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + + const existingConfigMap = await ConfigMapManager.findOne({ name: configMapData.name }, transaction) + if (existingConfigMap) { + throw new Errors.ConflictError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_ALREADY_EXISTS, configMapData.name)) + } + + const configMap = await ConfigMapManager.createConfigMap(configMapData.name, configMapData.immutable, configMapData.data, transaction) + return { + id: configMap.id, + name: configMap.name, + immutable: configMap.immutable, + created_at: configMap.created_at, + updated_at: configMap.updated_at + } +} + +async function updateConfigMapEndpoint (configMapName, configMapData, transaction) { + const validation = await Validator.validate(configMapData, Validator.schemas.configMapUpdate) + if (!validation.valid) { + throw new Errors.ValidationError(validation.error) + } + + const existingConfigMap = await ConfigMapManager.findOne({ name: configMapName }, transaction) + if (!existingConfigMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, configMapName)) + } + + if (existingConfigMap.immutable === true) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_IMMUTABLE, configMapName)) + } + + const configMap = await ConfigMapManager.updateConfigMap(configMapName, configMapData.immutable, configMapData.data, transaction) + await _updateChangeTrackingForFogs(configMapName, transaction) + return { + id: configMap.id, + name: configMap.name, + created_at: configMap.created_at, + updated_at: configMap.updated_at + } +} + +async function getConfigMapEndpoint (configMapName, transaction) { + const configMap = await ConfigMapManager.getConfigMap(configMapName, transaction) + if (!configMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, configMapName)) + } + + return { + id: configMap.id, + name: configMap.name, + data: configMap.data, + immutable: configMap.immutable, + created_at: configMap.created_at, + updated_at: configMap.updated_at + } +} + +async function listConfigMapsEndpoint (transaction) { + const configMaps = await ConfigMapManager.listConfigMaps(transaction) + return { + configMaps: configMaps.map(configMap => ({ + id: configMap.id, + name: configMap.name, + immutable: configMap.immutable, + created_at: configMap.created_at, + updated_at: configMap.updated_at + })) + } +} + +async function deleteConfigMapEndpoint (configMapName, transaction) { + const existingConfigMap = await ConfigMapManager.findOne({ name: configMapName }, transaction) + if (!existingConfigMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, configMapName)) + } + + await ConfigMapManager.deleteConfigMap(configMapName, transaction) + return {} +} + +async function _updateChangeTrackingForFogs (configMapName, transaction) { + const configMapVolumeMounts = await VolumeMountingManager.findAll({ configMapName: configMapName }, transaction) + if (configMapVolumeMounts.length > 0) { + for (const configMapVolumeMount of configMapVolumeMounts) { + const volumeMountObj = { + name: configMapVolumeMount.name, + configMapName: configMapName + } + await VolumeMountService.updateVolumeMountEndpoint(configMapVolumeMount.name, volumeMountObj, transaction) + } + } +} + +module.exports = { + createConfigMapEndpoint: TransactionDecorator.generateTransaction(createConfigMapEndpoint), + updateConfigMapEndpoint: TransactionDecorator.generateTransaction(updateConfigMapEndpoint), + getConfigMapEndpoint: TransactionDecorator.generateTransaction(getConfigMapEndpoint), + listConfigMapsEndpoint: TransactionDecorator.generateTransaction(listConfigMapsEndpoint), + deleteConfigMapEndpoint: TransactionDecorator.generateTransaction(deleteConfigMapEndpoint) +} diff --git a/src/services/iofog-access-token-service.js b/src/services/iofog-access-token-service.js deleted file mode 100644 index 529fdb6b..00000000 --- a/src/services/iofog-access-token-service.js +++ /dev/null @@ -1,53 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -const AppHelper = require('../helpers/app-helper') -const FogAccessTokenManager = require('../data/managers/iofog-access-token-manager') - -const Config = require('../config') - -const generateAccessToken = async function (transaction) { - while (true) { - const newAccessToken = AppHelper.generateRandomString(16) - const exists = await FogAccessTokenManager.findOne({ - token: newAccessToken - }, transaction) - if (!exists) { - const accessTokenExpiryTime = Date.now() + Config.get('Settings:FogTokenExpirationIntervalSeconds') * 1000 - return { - token: newAccessToken, - expirationTime: accessTokenExpiryTime - } - } - } -} - -async function updateAccessToken (fogUuid, newAccessToken, transaction) { - return FogAccessTokenManager.updateOrCreate({ - iofogUuid: fogUuid - }, { - iofogUuid: fogUuid, - token: newAccessToken.token, - expirationTime: newAccessToken.expirationTime - }, transaction) -} - -async function all (transaction) { - return FogAccessTokenManager.findAll(null, transaction) -} - -module.exports = { - generateAccessToken, - updateAccessToken, - all -} diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index 7172c1ab..a4ffbf31 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -40,103 +40,198 @@ const Op = require('sequelize').Op const lget = require('lodash/get') const CertificateService = require('./certificate-service') const logger = require('../logger') +const ServiceManager = require('../data/managers/service-manager') +const FogStates = require('../enums/fog-state') const SITE_CA_CERT = 'pot-site-ca' const DEFAULT_ROUTER_LOCAL_CA = 'default-router-local-ca' +const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' + +async function checkKubernetesEnvironment () { + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + return controlPlane && controlPlane.toLowerCase() === 'kubernetes' +} + +async function getLocalCertificateHosts (isKubernetes, namespace) { + if (isKubernetes) { + return `router-local,router-local.${namespace},router-local.${namespace}.svc.cluster.local` + } + return '127.0.0.1,localhost,host.docker.internal,host.containers.internal' +} + +async function getSiteCertificateHosts (fogData, transaction) { + const hosts = new Set() + // Add existing hosts if isSystem + if (fogData.isSystem) { + if (fogData.host) hosts.add(fogData.host) + if (fogData.ipAddress) hosts.add(fogData.ipAddress) + if (fogData.ipAddressExternal) hosts.add(fogData.ipAddressExternal) + } + // Add default router host if not system + if (!fogData.isSystem) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (defaultRouter.host) hosts.add(defaultRouter.host) + } + // Add upstream router hosts + // const upstreamRouters = (fogData.upstreamRouters || []).filter(uuid => uuid !== 'default-router') + // if (upstreamRouters.length) { + // for (const uuid of upstreamRouters) { + // const routerHost = await FogManager.findOne({ uuid: uuid }, transaction) + // if (routerHost.host) hosts.add(routerHost.host) + // if (routerHost.ipAddress) hosts.add(routerHost.ipAddress) + // } + // } + return Array.from(hosts).join(',') || 'localhost' +} + +async function _handleRouterCertificates (fogData, uuid, isRouterModeChanged, transaction) { + logger.debug('Starting _handleRouterCertificates for fog: ' + JSON.stringify({ uuid: uuid, host: fogData.host })) + + // Check if we're in Kubernetes environment + const isKubernetes = await checkKubernetesEnvironment() + const namespace = isKubernetes ? process.env.CONTROLLER_NAMESPACE : null -async function _handleRouterCertificates (fogData, transaction) { // Helper to check CA existence async function ensureCA (name, subject) { + logger.debug('Checking CA existence: ' + JSON.stringify({ name, subject })) try { await CertificateService.getCAEndpoint(name, transaction) + logger.debug('CA already exists: ' + name) // CA exists } catch (err) { if (err.name === 'NotFoundError') { + logger.debug('CA not found, creating new CA: ' + JSON.stringify({ name, subject })) await CertificateService.createCAEndpoint({ name, subject: `${subject}`, expiration: 60, // months type: 'self-signed' }, transaction) + logger.debug('Successfully created CA: ' + name) } else if (err.name === 'ConflictError') { + logger.debug('CA already exists (conflict): ' + name) // Already exists, ignore } else { + logger.error('Error in ensureCA - Name: ' + name + ', Subject: ' + subject + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) + logger.error('Stack trace: ' + err.stack) throw err } } } // Helper to check cert existence - async function ensureCert (name, subject, hosts, ca) { + async function ensureCert (name, subject, hosts, ca, shouldRecreate = false) { + logger.debug('Checking certificate existence: ' + JSON.stringify({ name, subject, hosts, ca })) try { - await CertificateService.getCertificateEndpoint(name, transaction) - // Cert exists + const existingCert = await CertificateService.getCertificateEndpoint(name, transaction) + if (shouldRecreate && existingCert) { + logger.debug('Certificate exists and needs recreation: ' + name) + await CertificateService.deleteCertificateEndpoint(name, transaction) + logger.debug('Deleted existing certificate: ' + name) + // Create new certificate + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + logger.debug('Successfully recreated certificate: ' + name) + } else if (!existingCert) { + logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + logger.debug('Successfully created certificate: ' + name) + } else { + logger.debug('Certificate already exists: ' + name) + } } catch (err) { if (err.name === 'NotFoundError') { + logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) await CertificateService.createCertificateEndpoint({ name, subject: `${subject}`, hosts, ca }, transaction) + logger.debug('Successfully created certificate: ' + name) } else if (err.name === 'ConflictError') { + logger.debug('Certificate already exists (conflict): ' + name) // Already exists, ignore } else { + logger.error('Error in ensureCert - Name: ' + name + ', Subject: ' + subject + ', Hosts: ' + hosts + ', CA: ' + JSON.stringify(ca) + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) + logger.error('Stack trace: ' + err.stack) throw err } } } - // Build hosts string from available fields - const hosts = [ - fogData.host, - fogData.ipAddress, - fogData.ipAddressExternal - ].filter(Boolean).join(',') || 'localhost' - try { // Always ensure SITE_CA_CERT exists + logger.debug('Ensuring SITE_CA_CERT exists') await ensureCA(SITE_CA_CERT, SITE_CA_CERT) + // If routerMode is 'none', only ensure DEFAULT_ROUTER_LOCAL_CA and its signed certificate + if (fogData.routerMode === 'none') { + logger.debug('Router mode is none, ensuring DEFAULT_ROUTER_LOCAL_CA exists') + await ensureCA(DEFAULT_ROUTER_LOCAL_CA, DEFAULT_ROUTER_LOCAL_CA) + logger.debug('Ensuring local-agent certificate signed by DEFAULT_ROUTER_LOCAL_CA') + const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) + await ensureCert( + `${uuid}-local-agent`, + `${uuid}-local-agent`, + localHosts, + { type: 'direct', secretName: DEFAULT_ROUTER_LOCAL_CA }, + isRouterModeChanged + ) + logger.debug('Successfully completed _handleRouterCertificates for routerMode none') + return + } + + // For other router modes, ensure all other certificates // Always ensure site-server cert exists + logger.debug('Ensuring site-server certificate exists') + const siteHosts = await getSiteCertificateHosts(fogData, transaction) await ensureCert( - `${fogData.uuid}-site-server`, - `${fogData.uuid}-site-server`, - hosts, - { type: 'direct', secretName: SITE_CA_CERT } + `${uuid}-site-server`, + `${uuid}-site-server`, + siteHosts, + { type: 'direct', secretName: SITE_CA_CERT }, + false ) // Always ensure local-ca exists - await ensureCA(`${fogData.uuid}-local-ca`, `${fogData.uuid}-local-ca`) + logger.debug('Ensuring local-ca exists') + await ensureCA(`${uuid}-local-ca`, `${uuid}-local-ca`) // Always ensure local-server cert exists + logger.debug('Ensuring local-server certificate exists') + const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) await ensureCert( - `${fogData.uuid}-local-server`, - `${fogData.uuid}-local-server`, - hosts, - { type: 'direct', secretName: `${fogData.uuid}-local-ca` } + `${uuid}-local-server`, + `${uuid}-local-server`, + localHosts, + { type: 'direct', secretName: `${uuid}-local-ca` }, + isRouterModeChanged ) // Always ensure local-agent cert exists + logger.debug('Ensuring local-agent certificate exists') await ensureCert( - `${fogData.uuid}-local-agent`, - `${fogData.uuid}-local-agent`, - hosts, - { type: 'direct', secretName: `${fogData.uuid}-local-ca` } + `${uuid}-local-agent`, + `${uuid}-local-agent`, + localHosts, + { type: 'direct', secretName: `${uuid}-local-ca` }, + isRouterModeChanged ) - // If routerMode is 'none', also ensure DEFAULT_ROUTER_LOCAL_CA and local-agent signed by it - if (fogData.routerMode === 'none') { - await ensureCA(DEFAULT_ROUTER_LOCAL_CA, DEFAULT_ROUTER_LOCAL_CA) - await ensureCert( - `${fogData.uuid}-local-agent`, - `${fogData.uuid}-local-agent`, - hosts, - { type: 'direct', secretName: DEFAULT_ROUTER_LOCAL_CA } - ) - } + logger.debug('Successfully completed _handleRouterCertificates') } catch (error) { - logger.error('Certificate operation failed:', error) + logger.error('Certificate operation failed - UUID: ' + uuid + ', RouterMode: ' + fogData.routerMode + ', Error: ' + error.message + ', Type: ' + error.name + ', Code: ' + error.code) + logger.error('Stack trace: ' + error.stack) } } @@ -144,7 +239,7 @@ async function createFogEndPoint (fogData, isCLI, transaction) { await Validator.validate(fogData, Validator.schemas.iofogCreate) let createFogData = { - uuid: AppHelper.generateRandomString(32), + uuid: AppHelper.generateUUID(), name: fogData.name, location: fogData.location, latitude: fogData.latitude, @@ -153,6 +248,8 @@ async function createFogEndPoint (fogData, isCLI, transaction) { description: fogData.description, networkInterface: fogData.networkInterface, dockerUrl: fogData.dockerUrl, + containerEngine: fogData.containerEngine, + deploymentType: fogData.deploymentType, diskLimit: fogData.diskLimit, diskDirectory: fogData.diskDirectory, memoryLimit: fogData.memoryLimit, @@ -176,9 +273,6 @@ async function createFogEndPoint (fogData, isCLI, transaction) { timeZone: fogData.timeZone } - // Add certificate handling - await _handleRouterCertificates(fogData, transaction) - createFogData = AppHelper.deleteUndefinedFields(createFogData) // Default router is edge @@ -211,32 +305,75 @@ async function createFogEndPoint (fogData, isCLI, transaction) { const fog = await FogManager.create(createFogData, transaction) - // Set tags + // Set tags (synchronously, as this is a simple DB op) await _setTags(fog, fogData.tags, transaction) - if (fogData.routerMode !== 'none') { - if (!fogData.host && !isCLI) { - throw new Errors.ValidationError(ErrorMessages.HOST_IS_REQUIRED) - } - - await RouterService.createRouterForFog(fogData, fog.uuid, upstreamRouters) - } - - const res = { - uuid: fog.uuid - } - - await ChangeTrackingService.create(fog.uuid, transaction) + // Return fog UUID immediately + const res = { uuid: fog.uuid } - if (fogData.abstractedHardwareEnabled) { - await _createHalMicroserviceForFog(fog, null, transaction) - } - - if (fogData.bluetoothEnabled) { - await _createBluetoothMicroserviceForFog(fog, null, transaction) - } + // Start background orchestration + setImmediate(() => { + (async () => { + try { + // --- Begin orchestration logic (previously inside runWithRetries) --- + await _handleRouterCertificates(fogData, createFogData.uuid, false, transaction) + + if (fogData.routerMode !== 'none') { + if (!fogData.host && !isCLI) { + throw new Errors.ValidationError(ErrorMessages.HOST_IS_REQUIRED) + } + await RouterService.createRouterForFog(fogData, fog.uuid, upstreamRouters) + + // Service Distribution Logic + const serviceTags = await _extractServiceTags(fogData.tags) + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + if (services.length > 0) { + const routerName = `router-${fog.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fog.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + } + } + } - await ChangeTrackingService.update(createFogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) + await ChangeTrackingService.create(fog.uuid, transaction) + if (fogData.abstractedHardwareEnabled) { + await _createHalMicroserviceForFog(fog, null, transaction) + } + if (fogData.bluetoothEnabled) { + await _createBluetoothMicroserviceForFog(fog, null, transaction) + } + await ChangeTrackingService.update(createFogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) + // --- End orchestration logic --- + // Set fog node as healthy + await FogManager.update({ uuid: fog.uuid }, { warningMessage: 'HEALTHY' }, transaction) + } catch (err) { + logger.error('Background orchestration failed in createFogEndPoint:', err) + // Set fog node as warning with error message + await FogManager.update( + { uuid: fog.uuid }, + { + daemonStatus: FogStates.WARNING, + warningMessage: `Background orchestration error: ${err.message}` + }, + transaction + ) + } + })() + }) return res } @@ -269,6 +406,8 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { description: fogData.description, networkInterface: fogData.networkInterface, dockerUrl: fogData.dockerUrl, + containerEngine: fogData.containerEngine, + deploymentType: fogData.deploymentType, diskLimit: fogData.diskLimit, diskDirectory: fogData.diskDirectory, memoryLimit: fogData.memoryLimit, @@ -297,9 +436,6 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) } - // Add certificate handling - await _handleRouterCertificates(fogData, transaction) - // Update tags await _setTags(oldFog, fogData.tags, transaction) @@ -313,7 +449,6 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { } } - // Update router // Get all router config informations const router = await oldFog.getRouter() const host = fogData.host || lget(router, 'host') @@ -325,74 +460,156 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { const edgeRouterPort = fogData.edgeRouterPort || (router ? router.edgeRouterPort : null) let networkRouter - // const isSystem = updateFogData.isSystem === undefined ? oldFog.isSystem : updateFogData.isSystem - // if (isSystem && routerMode !== 'interior') { - // throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) - // } - - if (routerMode === 'none') { - networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) - if (!networkRouter) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) - } - // Only delete previous router if there is a network router - if (router) { - // New router mode is none, delete existing router - await _deleteFogRouter(fogData, transaction) - } - } else { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - const upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(upstreamRoutersIofogUuid, oldFog.isSystem, defaultRouter) - if (!router) { - // Router does not exist yet - networkRouter = await RouterService.createRouterForFog(fogData, oldFog.uuid, upstreamRouters) - } else { - // Update existing router - networkRouter = await RouterService.updateRouter(router, { - messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host - }, upstreamRouters) - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) - } + const isSystem = updateFogData.isSystem === undefined ? oldFog.isSystem : updateFogData.isSystem + if (isSystem && routerMode !== 'interior') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) } - updateFogData.routerId = networkRouter.id - // If router changed, set routerChanged flag - if (updateFogData.routerId !== oldFog.routerId || updateFogData.routerMode !== oldFog.routerMode) { - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceList, transaction) + let isRouterModeChanged = false + const oldRouterMode = (router ? (router.isEdge ? 'edge' : 'interior') : 'none') + if (fogData.routerMode && fogData.routerMode !== oldRouterMode) { + if (fogData.routerMode === 'none' || oldRouterMode === 'none') { + isRouterModeChanged = true + } } await FogManager.update(queryFogData, updateFogData, transaction) await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.config, transaction) - let msChanged = false + // Return immediately + const res = { uuid: fogData.uuid } - // Update Microservice extra hosts - if (updateFogData.host && updateFogData.host !== oldFog.host) { - await _updateMicroserviceExtraHosts(fogData.uuid, updateFogData.host, transaction) - } + // Start background orchestration + setImmediate(() => { + (async () => { + try { + // --- Begin orchestration logic --- + await _handleRouterCertificates(fogData, fogData.uuid, isRouterModeChanged, transaction) + + if (routerMode === 'none') { + networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) + if (!networkRouter) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) + } + if (router) { + await _deleteFogRouter(fogData, transaction) + } + } else { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + const upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(upstreamRoutersIofogUuid, oldFog.isSystem, defaultRouter) + if (!router) { + networkRouter = await RouterService.createRouterForFog(fogData, oldFog.uuid, upstreamRouters) + // --- Service Distribution Logic --- + const serviceTags = await _extractServiceTags(fogData.tags) + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + if (services.length > 0) { + const routerName = `router-${fogData.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + } + } + } else { + const existingConnectors = await _extractExistingTcpConnectors(fogData.uuid, transaction) + networkRouter = await RouterService.updateRouter(router, { + messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host + }, upstreamRouters, fogData.containerEngine) + // --- Service Distribution Logic --- + const serviceTags = await _extractServiceTags(fogData.tags) + const routerName = `router-${fogData.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + if (services.length > 0) { + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + } + } + // Merge back existing connectors if any + if (existingConnectors && Object.keys(existingConnectors).length > 0) { + for (const connectorName in existingConnectors) { + const connectorObj = existingConnectors[connectorName] + config = _mergeTcpConnector(config, connectorObj) + } + } + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) + } + } + updateFogData.routerId = networkRouter.id - if (oldFog.abstractedHardwareEnabled === true && fogData.abstractedHardwareEnabled === false) { - await _deleteHalMicroserviceByFog(fogData, transaction) - msChanged = true - } - if (oldFog.abstractedHardwareEnabled === false && fogData.abstractedHardwareEnabled === true) { - await _createHalMicroserviceForFog(fogData, oldFog, transaction) - msChanged = true - } + // If router changed, set routerChanged flag + if (updateFogData.routerId !== oldFog.routerId || updateFogData.routerMode !== oldFog.routerMode) { + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceList, transaction) + } - if (oldFog.bluetoothEnabled === true && fogData.bluetoothEnabled === false) { - await _deleteBluetoothMicroserviceByFog(fogData, transaction) - msChanged = true - } - if (oldFog.bluetoothEnabled === false && fogData.bluetoothEnabled === true) { - await _createBluetoothMicroserviceForFog(fogData, oldFog, transaction) - msChanged = true - } + let msChanged = false + if (updateFogData.host && updateFogData.host !== oldFog.host) { + await _updateMicroserviceExtraHosts(fogData.uuid, updateFogData.host, transaction) + } + if (oldFog.abstractedHardwareEnabled === true && fogData.abstractedHardwareEnabled === false) { + await _deleteHalMicroserviceByFog(fogData, transaction) + msChanged = true + } + if (oldFog.abstractedHardwareEnabled === false && fogData.abstractedHardwareEnabled === true) { + await _createHalMicroserviceForFog(fogData, oldFog, transaction) + msChanged = true + } + if (oldFog.bluetoothEnabled === true && fogData.bluetoothEnabled === false) { + await _deleteBluetoothMicroserviceByFog(fogData, transaction) + msChanged = true + } + if (oldFog.bluetoothEnabled === false && fogData.bluetoothEnabled === true) { + await _createBluetoothMicroserviceForFog(fogData, oldFog, transaction) + msChanged = true + } + if (msChanged) { + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) + } + // --- End orchestration logic --- + // Set fog node as healthy + await FogManager.update({ uuid: fogData.uuid }, { warningMessage: 'HEALTHY' }, transaction) + } catch (err) { + logger.error('Background orchestration failed in updateFogEndPoint:', err) + await FogManager.update( + { uuid: fogData.uuid }, + { + daemonStatus: FogStates.WARNING, + warningMessage: `Background orchestration error: ${err.message}` + }, + transaction + ) + } + })() + }) - if (msChanged) { - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) - } + // Return immediately + return res } async function _updateMicroserviceExtraHosts (fogUuid, host, transaction) { @@ -445,7 +662,7 @@ async function _deleteFogRouter (fogData, transaction) { } // Update router config - await RouterService.updateConfig(router.id, transaction) + await RouterService.updateConfig(router.id, fogData.containerEngine, transaction) // Set routerChanged flag await ChangeTrackingService.update(router.iofogUuid, ChangeTrackingService.events.routerChanged, transaction) } @@ -531,14 +748,33 @@ async function _getFogEdgeResources (fog, transaction) { return resources.map(EdgeResourceService.buildGetObject) } +async function _getFogVolumeMounts (fog, transaction) { + const volumeMountAttributes = [ + 'name', + 'version', + 'configMapName', + 'secretName' + ] + const volumeMounts = await fog.getVolumeMounts({ attributes: volumeMountAttributes }) + return volumeMounts.map(vm => { + return { + name: vm.name, + version: vm.version, + configMapName: vm.configMapName, + secretName: vm.secretName + } + }) +} + async function _getFogExtraInformation (fog, transaction) { const routerConfig = await _getFogRouterConfig(fog, transaction) const edgeResources = await _getFogEdgeResources(fog, transaction) + const volumeMounts = await _getFogVolumeMounts(fog, transaction) // Transform to plain JS object if (fog.toJSON && typeof fog.toJSON === 'function') { fog = fog.toJSON() } - return { ...fog, tags: _mapTags(fog), ...routerConfig, edgeResources } + return { ...fog, tags: _mapTags(fog), ...routerConfig, edgeResources, volumeMounts } } // Map tags to string array @@ -547,6 +783,34 @@ function _mapTags (fog) { return fog.tags ? fog.tags.map(t => t.value) : [] } +/** + * Extracts service-related tags from fog node tags + * @param {Array} fogTags - Array of tags from fog node + * @returns {Array} Array of service tags (e.g., ["all", "foo", "bar"]) + */ +async function _extractServiceTags (fogTags) { + if (!fogTags || !Array.isArray(fogTags)) { + return [] + } + + // Filter tags that start with SERVICE_ANNOTATION_TAG + const serviceTags = fogTags + .filter(tag => tag.startsWith(SERVICE_ANNOTATION_TAG)) + .map(tag => { + // Extract the value after the colon + const parts = tag.split(':') + return parts.length > 1 ? parts[1].trim() : '' + }) + .filter(tag => tag !== '') // Remove empty tags + + // If we have "all" tag, return just that + if (serviceTags.includes('all')) { + return ['all'] + } + + return serviceTags +} + async function getFog (fogData, isCLI, transaction) { await Validator.validate(fogData, Validator.schemas.iofogGet) @@ -564,7 +828,8 @@ async function getFogEndPoint (fogData, isCLI, transaction) { return getFog(fogData, isCLI, transaction) } -async function getFogListEndPoint (filters, isCLI, isSystem, transaction) { +// async function getFogListEndPoint (filters, isCLI, isSystem, transaction) { +async function getFogListEndPoint (filters, isCLI, transaction) { await Validator.validate(filters, Validator.schemas.iofogFilters) // // If listing system agent through REST API, make sure user is authenticated @@ -572,7 +837,8 @@ async function getFogListEndPoint (filters, isCLI, isSystem, transaction) { // throw new Errors.AuthenticationError('Unauthorized') // } - const queryFogData = isSystem ? { isSystem } : (isCLI ? {} : { isSystem: false }) + // const queryFogData = isSystem ? { isSystem } : (isCLI ? {} : { isSystem: false }) + const queryFogData = {} let fogs = await FogManager.findAllWithTags(queryFogData, transaction) fogs = _filterFogs(fogs, filters) @@ -751,7 +1017,7 @@ async function _createHalMicroserviceForFog (fogData, oldFog, transaction) { const halItem = await CatalogService.getHalCatalogItem(transaction) const halMicroserviceData = { - uuid: AppHelper.generateRandomString(32), + uuid: AppHelper.generateUUID(), name: `hal-${fogData.uuid.toLowerCase()}`, config: '{}', catalogItemId: halItem.id, @@ -783,7 +1049,7 @@ async function _createBluetoothMicroserviceForFog (fogData, oldFog, transaction) const bluetoothItem = await CatalogService.getBluetoothCatalogItem(transaction) const bluetoothMicroserviceData = { - uuid: AppHelper.generateRandomString(32), + uuid: AppHelper.generateUUID(), name: `ble-${fogData.uuid.toLowerCase()}`, config: '{}', catalogItemId: bluetoothItem.id, @@ -824,6 +1090,135 @@ async function setFogPruneCommandEndPoint (fogData, isCLI, transaction) { await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.prune, transaction) } +/** + * Finds services that match the fog node's service tags + * @param {Array} serviceTags - Array of service tags from fog node + * @param {Object} transaction - Database transaction + * @returns {Promise>} Array of matching services + */ +async function _findMatchingServices (serviceTags, transaction) { + if (!serviceTags || serviceTags.length === 0) { + return [] + } + + // If 'all' tag is present, get all services + if (serviceTags.includes('all')) { + return ServiceManager.findAllWithTags({}, transaction) + } + + // For each service tag, find matching services + const servicesPromises = serviceTags.map(async (tag) => { + const queryData = { + '$tags.value$': `${tag}` + } + return ServiceManager.findAllWithTags(queryData, transaction) + }) + + // Wait for all queries to complete + const servicesArrays = await Promise.all(servicesPromises) + + // Flatten arrays and remove duplicates based on service name + const seen = new Set() + const uniqueServices = servicesArrays + .flat() + .filter(service => { + if (seen.has(service.name)) { + return false + } + seen.add(service.name) + return true + }) + + return uniqueServices +} + +/** + * Builds TCP listener configuration for a service on a specific fog node + * @param {Object} service - Service object containing name and bridgePort + * @param {string} fogNodeUuid - UUID of the fog node + * @returns {Object} TCP listener configuration + */ +function _buildTcpListenerForFog (service, fogNodeUuid) { + return { + name: `${service.name}-listener`, + port: service.bridgePort.toString(), + address: service.name, + siteId: fogNodeUuid + } +} + +/** + * Gets the router microservice configuration for a fog node + * @param {string} fogNodeUuid - UUID of the fog node + * @param {Object} transaction - Database transaction + * @returns {Promise} Router microservice configuration + */ +async function _getRouterMicroserviceConfig (fogNodeUuid, transaction) { + const routerName = `router-${fogNodeUuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + const routerConfig = JSON.parse(routerMicroservice.config || '{}') + return routerConfig +} + +/** + * Extracts existing TCP connectors from router configuration + * @param {string} fogNodeUuid - UUID of the fog node + * @param {Object} transaction - Database transaction + * @returns {Promise} Object containing TCP connectors + */ +async function _extractExistingTcpConnectors (fogNodeUuid, transaction) { + const routerConfig = await _getRouterMicroserviceConfig(fogNodeUuid, transaction) + // Return empty object if no bridges or tcpConnectors exist + if (!routerConfig.bridges || !routerConfig.bridges.tcpConnectors) { + return {} + } + + return routerConfig.bridges.tcpConnectors +} + +/** + * Merges a single TCP connector into router configuration + * @param {Object} routerConfig - Base router configuration + * @param {Object} connectorObj - TCP connector object (must have 'name' property) + * @returns {Object} Updated router configuration + */ +function _mergeTcpConnector (routerConfig, connectorObj) { + if (!connectorObj || !connectorObj.name) { + throw new Error('Connector object must have a name property') + } + if (!routerConfig.bridges) { + routerConfig.bridges = {} + } + if (!routerConfig.bridges.tcpConnectors) { + routerConfig.bridges.tcpConnectors = {} + } + routerConfig.bridges.tcpConnectors[connectorObj.name] = connectorObj + return routerConfig +} + +/** + * Merges a single TCP listener into router configuration + * @param {Object} routerConfig - Base router configuration + * @param {Object} listenerObj - TCP listener object (must have 'name' property) + * @returns {Object} Updated router configuration + */ +function _mergeTcpListener (routerConfig, listenerObj) { + if (!listenerObj || !listenerObj.name) { + throw new Error('Listener object must have a name property') + } + if (!routerConfig.bridges) { + routerConfig.bridges = {} + } + if (!routerConfig.bridges.tcpListeners) { + routerConfig.bridges.tcpListeners = {} + } + routerConfig.bridges.tcpListeners[listenerObj.name] = listenerObj + return routerConfig +} + module.exports = { createFogEndPoint: TransactionDecorator.generateTransaction(createFogEndPoint), updateFogEndPoint: TransactionDecorator.generateTransaction(updateFogEndPoint), @@ -836,5 +1231,14 @@ module.exports = { getHalHardwareInfoEndPoint: TransactionDecorator.generateTransaction(getHalHardwareInfoEndPoint), getHalUsbInfoEndPoint: TransactionDecorator.generateTransaction(getHalUsbInfoEndPoint), getFog: getFog, - setFogPruneCommandEndPoint: TransactionDecorator.generateTransaction(setFogPruneCommandEndPoint) + setFogPruneCommandEndPoint: TransactionDecorator.generateTransaction(setFogPruneCommandEndPoint), + _extractServiceTags, + _findMatchingServices: TransactionDecorator.generateTransaction(_findMatchingServices), + _buildTcpListenerForFog, + _getRouterMicroserviceConfig: TransactionDecorator.generateTransaction(_getRouterMicroserviceConfig), + _extractExistingTcpConnectors: TransactionDecorator.generateTransaction(_extractExistingTcpConnectors), + _mergeTcpConnector, + _mergeTcpListener, + checkKubernetesEnvironment, + _handleRouterCertificates: TransactionDecorator.generateTransaction(_handleRouterCertificates) } diff --git a/src/services/microservice-ports/microservice-port.js b/src/services/microservice-ports/microservice-port.js new file mode 100644 index 00000000..e866f191 --- /dev/null +++ b/src/services/microservice-ports/microservice-port.js @@ -0,0 +1,211 @@ +/* only "[a-zA-Z0-9][a-zA-Z0-9_.-]" are allowed + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const MicroservicePortManager = require('../../data/managers/microservice-port-manager') +const MicroserviceManager = require('../../data/managers/microservice-manager') +const ChangeTrackingService = require('../change-tracking-service') +const AppHelper = require('../../helpers/app-helper') +const Errors = require('../../helpers/errors') +const ErrorMessages = require('../../helpers/error-messages') +const Op = require('sequelize').Op +const FogManager = require('../../data/managers/iofog-manager') + +const { RESERVED_PORTS } = require('../../helpers/constants') + +async function _checkForDuplicatePorts (agent, localPort, transaction) { + if (RESERVED_PORTS.find(port => port === localPort)) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.PORT_RESERVED, localPort)) + } + + const microservices = await agent.getMicroservice() + for (const microservice of microservices) { + const ports = await microservice.getPorts() + if (ports.find(port => port.portExternal === localPort)) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.PORT_NOT_AVAILABLE, localPort)) + } + } +} + +// Validate port and populate, mapping.localAgent +async function validatePortMapping (agent, mapping, transaction) { + await _checkForDuplicatePorts(agent, mapping.external, transaction) +} + +async function validatePortMappings (microserviceData, transaction) { + if (!microserviceData.ports || microserviceData.ports.length === 0) { + return + } + + const localAgent = await FogManager.findOne({ uuid: microserviceData.iofogUuid }, transaction) + if (!localAgent) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, microserviceData.iofogUuid)) + } + + // Will be filled by validatePortMapping + for (const mapping of microserviceData.ports) { + await validatePortMapping(localAgent, mapping, transaction) + } +} + +async function createPortMapping (microservice, portMappingData, transaction) { + if (!microservice.iofogUuid) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.REQUIRED_FOG_NODE)) + } + + const msPorts = await MicroservicePortManager.findOne({ + microserviceUuid: microservice.uuid, + [Op.or]: [] + }, transaction) + if (msPorts) { + throw new Errors.ValidationError(ErrorMessages.PORT_MAPPING_ALREADY_EXISTS) + } + + portMappingData.protocol = portMappingData.protocol || '' + + return _createSimplePortMapping(microservice, portMappingData, transaction) +} + +async function _deletePortMapping (microservice, portMapping, transaction) { + await _deleteSimplePortMapping(microservice, portMapping, transaction) +} + +async function _createSimplePortMapping (microservice, portMappingData, transaction) { + // create port mapping + const mappingData = { + portInternal: portMappingData.internal, + portExternal: portMappingData.external, + isUdp: portMappingData.protocol.toLowerCase() === 'udp', + microserviceUuid: microservice.uuid + } + + await MicroservicePortManager.create(mappingData, transaction) + await switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) +} + +async function _deleteSimplePortMapping (microservice, msPorts, transaction) { + await MicroservicePortManager.delete({ id: msPorts.id }, transaction) + + const updateRebuildMs = { + rebuild: true + } + await MicroserviceManager.update({ uuid: microservice.uuid }, updateRebuildMs, transaction) + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceCommon, transaction) +} + +async function _buildPortsList (portsPairs, transaction) { + const res = [] + for (const ports of portsPairs) { + const portMappingResponseData = { + internal: ports.portInternal, + external: ports.portExternal, + protocol: ports.isUdp ? 'udp' : 'tcp' + } + res.push(portMappingResponseData) + } + return res +} + +async function switchOnUpdateFlagsForMicroservicesForPortMapping (microservice, transaction) { + const updateRebuildMs = { + rebuild: true + } + await MicroserviceManager.update({ uuid: microservice.uuid }, updateRebuildMs, transaction) + + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceConfig, transaction) +} + +async function listPortMappings (microserviceUuid, isCLI, transaction) { + const where = isCLI + ? { uuid: microserviceUuid } + : { uuid: microserviceUuid } + const microservice = await MicroserviceManager.findOne(where, transaction) + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + + const portsPairs = await MicroservicePortManager.findAll({ microserviceUuid }, transaction) + return _buildPortsList(portsPairs, transaction) +} + +async function deletePortMapping (microserviceUuid, internalPort, isCLI, transaction) { + const where = isCLI + ? { uuid: microserviceUuid } + : { uuid: microserviceUuid } + + const microservice = await MicroserviceManager.findMicroserviceOnGet(where, transaction) + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + + if (!internalPort) { + throw new Errors.ValidationError(ErrorMessages.PORT_MAPPING_INTERNAL_PORT_NOT_PROVIDED) + } + + const msPorts = await MicroservicePortManager.findOne({ + microserviceUuid: microservice.uuid, + portInternal: internalPort + }, transaction) + if (!msPorts) { + throw new Errors.NotFoundError('port mapping not exists') + } + + await _deletePortMapping(microservice, msPorts, transaction) +} + +async function deleteSystemPortMapping (microserviceUuid, internalPort, isCLI, transaction) { + const where = isCLI + ? { uuid: microserviceUuid } + : { uuid: microserviceUuid } + + const microservice = await MicroserviceManager.findOne(where, transaction) + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + + if (!internalPort) { + throw new Errors.ValidationError(ErrorMessages.PORT_MAPPING_INTERNAL_PORT_NOT_PROVIDED) + } + + const msPorts = await MicroservicePortManager.findOne({ + microserviceUuid: microservice.uuid, + portInternal: internalPort + }, transaction) + if (!msPorts) { + throw new Errors.NotFoundError('port mapping not exists') + } + + await _deletePortMapping(microservice, msPorts, transaction) +} + +async function deletePortMappings (microservice, transaction) { + const portMappings = await MicroservicePortManager.findAll({ microserviceUuid: microservice.uuid }, transaction) + for (const ports of portMappings) { + await _deletePortMapping(microservice, ports, transaction) + } +} + +async function getPortMappings (microserviceUuid, transaction) { + return MicroservicePortManager.findAll({ microserviceUuid }, transaction) +} + +module.exports = { + validatePortMappings, + validatePortMapping, + switchOnUpdateFlagsForMicroservicesForPortMapping, + createPortMapping, + listPortMappings, + deletePortMapping, + deleteSystemPortMapping, + deletePortMappings, + getPortMappings +} diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index 8a725720..c98a918d 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -19,9 +19,10 @@ const MicroserviceCdiDevManager = require('../data/managers/microservice-cdi-dev const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add-manager') const MicroserviceCapDropManager = require('../data/managers/microservice-cap-drop-manager') const MicroserviceEnvManager = require('../data/managers/microservice-env-manager') -const MicroservicePortService = require('../services/microservice-ports/default') +const MicroservicePortService = require('../services/microservice-ports/microservice-port') const CatalogItemImageManager = require('../data/managers/catalog-item-image-manager') const RegistryManager = require('../data/managers/registry-manager') +// const RouterManager = require('../data/managers/router-manager') const MicroserviceStates = require('../enums/microservice-state') const VolumeMappingManager = require('../data/managers/volume-mapping-manager') const ChangeTrackingService = require('./change-tracking-service') @@ -33,6 +34,11 @@ const ApplicationManager = require('../data/managers/application-manager') const CatalogService = require('../services/catalog-service') const RoutingManager = require('../data/managers/routing-manager') const RoutingService = require('../services/routing-service') +const ServiceManager = require('../data/managers/service-manager') +const ServiceServices = require('./services-service') +const ConfigMapManager = require('../data/managers/config-map-manager') +const SecretManager = require('../data/managers/secret-manager') + const Op = require('sequelize').Op const FogManager = require('../data/managers/iofog-manager') const MicroserviceExtraHostManager = require('../data/managers/microservice-extra-host-manager') @@ -130,7 +136,7 @@ function _validateImagesAgainstCatalog (catalogItem, images) { } } -async function _validateLocalAppHostTemplate (extraHost, templateArgs, msvc, transaction) { +async function _validateLocalAppHostTemplate (extraHost, templateArgs, msvc, fogUuid, transaction) { if (templateArgs.length !== 4) { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_HOST_TEMPLATE, templateArgs.join('.'))) } @@ -138,13 +144,17 @@ async function _validateLocalAppHostTemplate (extraHost, templateArgs, msvc, tra if (!fog) { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.NOT_FOUND_HOST_TEMPLATE, templateArgs[2])) } + if (fogUuid !== fog.uuid) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.NOT_FOUND_APPS_TEMPLATE, msvc.name)) + } + extraHost.targetFogUuid = fog.uuid - extraHost.value = fog.host || fog.ipAddress + extraHost.value = `iofog_${msvc.uuid}` return extraHost } -async function _validateAppHostTemplate (extraHost, templateArgs, transaction) { +async function _validateAppHostTemplate (extraHost, templateArgs, fogUuid, transaction) { if (templateArgs.length < 4) { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_HOST_TEMPLATE, templateArgs.join('.'))) } @@ -158,11 +168,8 @@ async function _validateAppHostTemplate (extraHost, templateArgs, transaction) { } extraHost.templateType = 'Apps' extraHost.targetMicroserviceUuid = msvc.uuid - if (templateArgs[3] === 'public') { - return MicroservicePortService.validatePublicPortAppHostTemplate(extraHost, templateArgs, msvc, transaction) - } if (templateArgs[3] === 'local') { - return _validateLocalAppHostTemplate(extraHost, templateArgs, msvc, transaction) + return _validateLocalAppHostTemplate(extraHost, templateArgs, msvc, fogUuid, transaction) } throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_HOST_TEMPLATE, templateArgs.join('.'))) } @@ -183,7 +190,10 @@ async function _validateAgentHostTemplate (extraHost, templateArgs, transaction) return extraHost } -async function _validateExtraHost (extraHostData, transaction) { +async function _validateExtraHost (extraHostData, fogUuid, transaction) { + if (extraHostData.name === 'service.local') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_HOST_TEMPLATE, 'Extra Host name cannot be service.local')) + } const extraHost = { templateType: 'Litteral', name: extraHostData.name, @@ -197,20 +207,20 @@ async function _validateExtraHost (extraHostData, transaction) { const templateArgs = template.split('.') extraHost.templateType = templateArgs[0] if (templateArgs[0] === 'Apps') { - return _validateAppHostTemplate(extraHost, templateArgs, transaction) + return _validateAppHostTemplate(extraHost, templateArgs, fogUuid, transaction) } else if (templateArgs[0] === 'Agents') { return _validateAgentHostTemplate(extraHost, templateArgs, transaction) } throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_HOST_TEMPLATE, template)) } -async function _validateExtraHosts (microserviceData, transaction) { +async function _validateExtraHosts (microserviceData, fogUuid, transaction) { if (!microserviceData.extraHosts || microserviceData.extraHosts.length === 0) { return [] } const extraHosts = [] for (const extraHost of microserviceData.extraHosts) { - extraHosts.push(await _validateExtraHost(extraHost, transaction)) + extraHosts.push(await _validateExtraHost(extraHost, fogUuid, transaction)) } return extraHosts } @@ -270,7 +280,7 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) } // validate extraHosts - const extraHosts = await _validateExtraHosts(microserviceData, transaction) + const extraHosts = await _validateExtraHosts(microserviceData, fog.uuid, transaction) await MicroservicePortService.validatePortMappings(microserviceData, transaction) @@ -282,26 +292,27 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) await _createMicroserviceImages(microservice, microserviceData.images, transaction) } - const publicPorts = [] - const proxyPorts = [] + // const publicPorts = [] + // const proxyPorts = [] if (microserviceData.ports) { for (const mapping of microserviceData.ports) { - const res = await MicroservicePortService.createPortMapping(microservice, mapping, transaction) - if (res) { - if (res.publicLinks) { - publicPorts.push({ - internal: mapping.internal, - external: mapping.external, - publicLinks: res.publicLinks - }) - } else if (res.proxy) { - proxyPorts.push({ - internal: mapping.internal, - external: mapping.external, - proxy: res.proxy - }) - } - } + // const res = await MicroservicePortService.createPortMapping(microservice, mapping, transaction) + await MicroservicePortService.createPortMapping(microservice, mapping, transaction) + // if (res) { + // if (res.publicLinks) { + // publicPorts.push({ + // internal: mapping.internal, + // external: mapping.external, + // publicLinks: res.publicLinks + // }) + // } else if (res.proxy) { + // proxyPorts.push({ + // internal: mapping.internal, + // external: mapping.external, + // proxy: res.proxy + // }) + // } + // } } } @@ -379,12 +390,12 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) uuid: microservice.uuid, name: microservice.name } - if (publicPorts.length) { - res.publicPorts = publicPorts - } - if (proxyPorts.length) { - res.proxies = proxyPorts - } + // if (publicPorts.length) { + // res.publicPorts = publicPorts + // } + // if (proxyPorts.length) { + // res.proxies = proxyPorts + // } return res } @@ -416,11 +427,15 @@ async function _updateRelatedExtraHostTargetFog (extraHost, newFogUuid, transact async function _updateRelatedExtraHosts (updatedMicroservice, transaction) { const extraHosts = await MicroserviceExtraHostManager.findAll({ targetMicroserviceUuid: updatedMicroservice.uuid }, transaction) for (const extraHost of extraHosts) { - if (!extraHost.publicPort) { - // Local port, update target fog and host if microservice moved - if (extraHost.targetFogUuid !== updatedMicroservice.iofogUuid) { - await _updateRelatedExtraHostTargetFog(extraHost, updatedMicroservice.iofogUuid, transaction) - } + // if (!extraHost.publicPort) { + // // Local port, update target fog and host if microservice moved + // if (extraHost.targetFogUuid !== updatedMicroservice.iofogUuid) { + // await _updateRelatedExtraHostTargetFog(extraHost, updatedMicroservice.iofogUuid, transaction) + // } + // } + // Local port, update target fog and host if microservice moved + if (extraHost.targetFogUuid !== updatedMicroservice.iofogUuid) { + await _updateRelatedExtraHostTargetFog(extraHost, updatedMicroservice.iofogUuid, transaction) } } } @@ -436,14 +451,15 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD uuid: microserviceUuid } + const newFog = await _findFog(microserviceData, isCLI, transaction) || {} // validate extraHosts - const extraHosts = microserviceData.extraHosts ? await _validateExtraHosts(microserviceData, transaction) : null + const extraHosts = microserviceData.extraHosts ? await _validateExtraHosts(microserviceData, newFog.uuid, transaction) : null const config = _validateMicroserviceConfig(microserviceData.config) const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) - const newFog = await _findFog(microserviceData, isCLI, transaction) || {} + // const newFog = await _findFog(microserviceData, isCLI, transaction) || {} const microserviceToUpdate = { name: microserviceData.name, config: config, @@ -453,6 +469,8 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + pidMode: microserviceData.pidMode, + ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, capAdd: microserviceData.capAdd, capDrop: microserviceData.capDrop, @@ -568,6 +586,8 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD // Set rebuild flag if needed microserviceDataUpdate.rebuild = microserviceDataUpdate.rebuild || !!( (microserviceDataUpdate.rootHostAccess !== undefined && microservice.rootHostAccess !== microserviceDataUpdate.rootHostAccess) || + microserviceDataUpdate.pidMode || + microserviceDataUpdate.ipcMode || microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || @@ -614,10 +634,6 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) } - if (microserviceDataUpdate.iofogUuid && microserviceDataUpdate.iofogUuid !== microservice.iofogUuid) { - await MicroservicePortService.movePublicPortsToNewFog(updatedMicroservice, transaction) - } - if (needStatusReset) { const microserviceStatus = { status: MicroserviceStates.QUEUED, @@ -658,14 +674,15 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i uuid: microserviceUuid } + const newFog = await _findFog(microserviceData, isCLI, transaction) || {} // validate extraHosts - const extraHosts = microserviceData.extraHosts ? await _validateExtraHosts(microserviceData, transaction) : null + const extraHosts = microserviceData.extraHosts ? await _validateExtraHosts(microserviceData, newFog.uuid, transaction) : null const config = _validateMicroserviceConfig(microserviceData.config) const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) - const newFog = await _findFog(microserviceData, isCLI, transaction) || {} + // const newFog = await _findFog(microserviceData, isCLI, transaction) || {} const microserviceToUpdate = { name: microserviceData.name, config: config, @@ -675,6 +692,8 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + pidMode: microserviceData.pidMode, + ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, capAdd: microserviceData.capAdd, capDrop: microserviceData.capDrop, @@ -794,6 +813,8 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i // Set rebuild flag if needed microserviceDataUpdate.rebuild = microserviceDataUpdate.rebuild || !!( (microserviceDataUpdate.rootHostAccess !== undefined && microservice.rootHostAccess !== microserviceDataUpdate.rootHostAccess) || + microserviceDataUpdate.pidMode || + microserviceDataUpdate.ipcMode || microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || @@ -839,9 +860,10 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i if (microserviceDataUpdate.capDrop) { await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) } - - if (microserviceDataUpdate.iofogUuid && microserviceDataUpdate.iofogUuid !== microservice.iofogUuid) { - await MicroservicePortService.movePublicPortsToNewFog(updatedMicroservice, transaction) + // TODO: Implement moveServiceToNewFog + const existingService = await ServiceManager.findOne({ type: `microservice`, resource: microservice.uuid }, transaction) + if (microserviceDataUpdate.iofogUuid && microserviceDataUpdate.iofogUuid !== microservice.iofogUuid && existingService) { + await ServiceServices.moveMicroserviceTcpBridgeToNewFog(existingService, microserviceDataUpdate.iofogUuid, microservice.iofogUuid, transaction) } // Update tags @@ -990,6 +1012,11 @@ async function deleteMicroserviceEndPoint (microserviceUuid, microserviceData, i await deleteMicroserviceWithRoutesAndPortMappings(microservice, transaction) + const existingService = await ServiceManager.findOne({ type: `microservice`, resource: microservice.uuid }, transaction) + if (existingService) { + logger.info(`Deleting service ${existingService.name}`) + await ServiceServices.deleteServiceEndpoint(existingService.name, transaction) + } await _updateChangeTracking(false, microservice.iofogUuid, transaction) } @@ -1099,8 +1126,51 @@ async function _createEnv (microservice, envData, transaction) { microserviceUuid: microservice.uuid } + // Handle valueFromSecret + if (envData.valueFromSecret) { + const [secretName, dataKey] = envData.valueFromSecret.split('/') + if (!secretName || !dataKey) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_SECRET_REFERENCE, envData.valueFromSecret)) + } + const secret = await SecretManager.getSecret(secretName, transaction) + if (!secret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, secretName)) + } + if (!secret.data[dataKey]) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SECRET_KEY_NOT_FOUND, dataKey, secretName)) + } + // If it's a TLS secret, decode the base64 value + if (secret.type === 'tls') { + try { + msEnvData.value = Buffer.from(secret.data[dataKey], 'base64').toString('utf-8') + } catch (error) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_BASE64_VALUE, dataKey, secretName)) + } + } else { + msEnvData.value = secret.data[dataKey] + } + msEnvData.valueFromSecret = envData.valueFromSecret + } + + // Handle valueFromConfigMap + if (envData.valueFromConfigMap) { + const [configMapName, dataKey] = envData.valueFromConfigMap.split('/') + if (!configMapName || !dataKey) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_CONFIGMAP_REFERENCE, envData.valueFromConfigMap)) + } + const configMap = await ConfigMapManager.getConfigMap(configMapName, transaction) + if (!configMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, configMapName)) + } + if (!configMap.data[dataKey]) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_KEY_NOT_FOUND, dataKey, configMapName)) + } + msEnvData.value = configMap.data[dataKey] + msEnvData.valueFromConfigMap = envData.valueFromConfigMap + } + await MicroserviceEnvManager.create(msEnvData, transaction) - await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) } async function _createArg (microservice, arg, transaction) { @@ -1114,7 +1184,7 @@ async function _createArg (microservice, arg, transaction) { } await MicroserviceArgManager.create(msArgData, transaction) - await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) } async function _createCdiDevices (microservice, cdiDevices, transaction) { @@ -1128,7 +1198,7 @@ async function _createCdiDevices (microservice, cdiDevices, transaction) { } await MicroserviceCdiDevManager.create(msCdiDevicesData, transaction) - await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) } async function _createCapAdd (microservice, capAdd, transaction) { @@ -1142,7 +1212,7 @@ async function _createCapAdd (microservice, capAdd, transaction) { } await MicroserviceCapAddManager.create(msCapAddData, transaction) - await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) } async function _createCapDrop (microservice, capDrop, transaction) { @@ -1156,7 +1226,7 @@ async function _createCapDrop (microservice, capDrop, transaction) { } await MicroserviceCapDropManager.create(msCapDropData, transaction) - await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, false, transaction) + await MicroservicePortService.switchOnUpdateFlagsForMicroservicesForPortMapping(microservice, transaction) } async function deletePortMappingEndPoint (microserviceUuid, internalPort, isCLI, transaction) { @@ -1234,6 +1304,16 @@ async function isMicroserviceConsumer (microservice, transaction) { return false } +async function isMicroserviceRouter (microservice, transaction) { + if (microservice.name === `router-${microservice.iofogUuid.toLowerCase()}`) { + const app = await ApplicationManager.findOne({ id: microservice.applicationId }, transaction) + if (app.isSystem === true) { + return true + } + } + return false +} + async function createVolumeMappingEndPoint (microserviceUuid, volumeMappingData, isCLI, transaction) { await Validator.validate(volumeMappingData, Validator.schemas.volumeMappings) @@ -1388,13 +1468,15 @@ async function _createMicroservice (microserviceData, isCLI, transaction) { const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) let newMicroservice = { - uuid: AppHelper.generateRandomString(32), + uuid: AppHelper.generateUUID(), name: microserviceData.name, config: config, annotations: annotations, catalogItemId: microserviceData.catalogItemId, iofogUuid: microserviceData.iofogUuid, rootHostAccess: microserviceData.rootHostAccess, + pidMode: microserviceData.pidMode, + ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, capAdd: microserviceData.capAdd, capDrop: microserviceData.capDrop, @@ -1538,6 +1620,49 @@ async function _updateEnv (env, microserviceUuid, transaction) { value: envData.value } + // Handle valueFromSecret + if (envData.valueFromSecret) { + const [secretName, dataKey] = envData.valueFromSecret.split('/') + if (!secretName || !dataKey) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_SECRET_REFERENCE, envData.valueFromSecret)) + } + const secret = await SecretManager.getSecret(secretName, transaction) + if (!secret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, secretName)) + } + if (!secret.data[dataKey]) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SECRET_KEY_NOT_FOUND, dataKey, secretName)) + } + // If it's a TLS secret, decode the base64 value + if (secret.type === 'tls') { + try { + envObj.value = Buffer.from(secret.data[dataKey], 'base64').toString('utf-8') + } catch (error) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_BASE64_VALUE, dataKey, secretName)) + } + } else { + envObj.value = secret.data[dataKey] + } + envObj.valueFromSecret = envData.valueFromSecret + } + + // Handle valueFromConfigMap + if (envData.valueFromConfigMap) { + const [configMapName, dataKey] = envData.valueFromConfigMap.split('/') + if (!configMapName || !dataKey) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_CONFIGMAP_REFERENCE, envData.valueFromConfigMap)) + } + const configMap = await ConfigMapManager.getConfigMap(configMapName, transaction) + if (!configMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, configMapName)) + } + if (!configMap.data[dataKey]) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_KEY_NOT_FOUND, dataKey, configMapName)) + } + envObj.value = configMap.data[dataKey] + envObj.valueFromConfigMap = envData.valueFromConfigMap + } + await MicroserviceEnvManager.create(envObj, transaction) } } @@ -1714,7 +1839,7 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { res.ports = [] for (const pm of portMappings) { const mapping = { internal: pm.portInternal, external: pm.portExternal, protocol: pm.isUdp ? 'udp' : 'tcp' } - await MicroservicePortService.buildPublicPortMapping(pm, mapping, transaction) + // await MicroservicePortService.buildPublicPortMapping(pm, mapping, transaction) res.ports.push(mapping) } res.volumeMappings = volumeMappings.map((vm) => vm.dataValues) @@ -1742,10 +1867,6 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { return res } -function listAllPublicPortsEndPoint (transaction) { - return MicroservicePortService.listAllPublicPorts(transaction) -} - async function listMicroserviceByPubTagEndPoint (pubTag, transaction) { const where = { delete: false, @@ -1780,6 +1901,85 @@ async function listMicroserviceBySubTagEndPoint (subTag, transaction) { } } +async function createExecEndPoint (microserviceUuid, transaction) { + const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) + if (microservice.catalogItem && microservice.catalogItem.category === 'SYSTEM') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) + } + if (!microservice) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_MICROSERVICE_USER) + } + + await MicroserviceManager.update({ uuid: microservice.uuid }, { execEnabled: true }, transaction) + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + + const updatedMicroservice = await MicroserviceManager.findOneWithCategory({ uuid: microservice.uuid }, transaction) + + return { + uuid: microservice.uuid, + execEnabled: updatedMicroservice.execEnabled + } +} + +async function deleteExecEndPoint (microserviceUuid, transaction) { + const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) + if (microservice.catalogItem && microservice.catalogItem.category === 'SYSTEM') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) + } + if (!microservice) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_MICROSERVICE_USER) + } + + await MicroserviceManager.update({ uuid: microservice.uuid }, { execEnabled: false }, transaction) + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + + const updatedMicroservice = await MicroserviceManager.findOneWithCategory({ uuid: microservice.uuid }, transaction) + + return { + uuid: microservice.uuid, + execEnabled: updatedMicroservice.execEnabled + } +} + +async function createSystemExecEndPoint (microserviceUuid, isCLI, transaction) { + const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) + // if (microservice.catalogItem && microservice.catalogItem.category !== 'SYSTEM') { + // throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) + // } + if (!microservice) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_MICROSERVICE_USER) + } + await MicroserviceManager.update({ uuid: microservice.uuid }, { execEnabled: true }, transaction) + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + + const updatedMicroservice = await MicroserviceManager.findOneWithCategory({ uuid: microservice.uuid }, transaction) + + return { + uuid: microservice.uuid, + execEnabled: updatedMicroservice.execEnabled + } +} + +async function deleteSystemExecEndPoint (microserviceUuid, isCLI, transaction) { + const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) + // if (microservice.catalogItem && microservice.catalogItem.category !== 'SYSTEM') { + // throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) + // } + if (!microservice) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_MICROSERVICE_USER) + } + + await MicroserviceManager.update({ uuid: microservice.uuid }, { execEnabled: false }, transaction) + await ChangeTrackingService.update(microservice.iofogUuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + + const updatedMicroservice = await MicroserviceManager.findOneWithCategory({ uuid: microservice.uuid }, transaction) + + return { + uuid: microservice.uuid, + execEnabled: updatedMicroservice.execEnabled + } +} + module.exports = { createMicroserviceEndPoint: TransactionDecorator.generateTransaction(createMicroserviceEndPoint), createPortMappingEndPoint: TransactionDecorator.generateTransaction(createPortMappingEndPoint), @@ -1798,7 +1998,7 @@ module.exports = { getMicroserviceEndPoint: TransactionDecorator.generateTransaction(getMicroserviceEndPoint), getReceiverMicroservices, isMicroserviceConsumer, - listAllPublicPortsEndPoint: TransactionDecorator.generateTransaction(listAllPublicPortsEndPoint), + isMicroserviceRouter, listMicroservicePortMappingsEndPoint: TransactionDecorator.generateTransaction(listPortMappingsEndPoint), listMicroservicesEndPoint: TransactionDecorator.generateTransaction(listMicroservicesEndPoint), listVolumeMappingsEndPoint: TransactionDecorator.generateTransaction(listVolumeMappingsEndPoint), @@ -1809,5 +2009,9 @@ module.exports = { buildGetMicroserviceResponse: _buildGetMicroserviceResponse, updateChangeTracking: _updateChangeTracking, listMicroserviceByPubTagEndPoint: TransactionDecorator.generateTransaction(listMicroserviceByPubTagEndPoint), - listMicroserviceBySubTagEndPoint: TransactionDecorator.generateTransaction(listMicroserviceBySubTagEndPoint) + listMicroserviceBySubTagEndPoint: TransactionDecorator.generateTransaction(listMicroserviceBySubTagEndPoint), + createExecEndPoint: TransactionDecorator.generateTransaction(createExecEndPoint), + deleteExecEndPoint: TransactionDecorator.generateTransaction(deleteExecEndPoint), + createSystemExecEndPoint: TransactionDecorator.generateTransaction(createSystemExecEndPoint), + deleteSystemExecEndPoint: TransactionDecorator.generateTransaction(deleteSystemExecEndPoint) } diff --git a/src/services/router-service.js b/src/services/router-service.js index e1d4821c..8d9569bf 100644 --- a/src/services/router-service.js +++ b/src/services/router-service.js @@ -76,7 +76,7 @@ async function createRouterForFog (fogData, uuid, upstreamRouters, transaction) const router = await RouterManager.create(routerData, transaction) - const microserviceConfig = await _getRouterMicroserviceConfig(isEdge, uuid, messagingPort, router.interRouterPort, router.edgeRouterPort, transaction) + const microserviceConfig = await _getRouterMicroserviceConfig(isEdge, uuid, messagingPort, router.interRouterPort, router.edgeRouterPort, fogData.containerEngine, transaction) for (const upstreamRouter of upstreamRouters) { await RouterConnectionManager.create({ sourceRouter: router.id, destRouter: upstreamRouter.id }, transaction) @@ -94,7 +94,7 @@ async function createRouterForFog (fogData, uuid, upstreamRouters, transaction) return router } -async function updateRouter (oldRouter, newRouterData, upstreamRouters, transaction) { +async function updateRouter (oldRouter, newRouterData, upstreamRouters, containerEngine, transaction) { const routerCatalog = await CatalogService.getRouterCatalogItem(transaction) const routerMicroservice = await MicroserviceManager.findOne({ catalogItemId: routerCatalog.id, @@ -132,19 +132,19 @@ async function updateRouter (oldRouter, newRouterData, upstreamRouters, transact await RouterConnectionManager.bulkCreate(upstreamToCreate.map(router => ({ sourceRouter: oldRouter.id, destRouter: router.id })), transaction) // Update proxy microservice (If port or host changed) - const proxyCatalog = await CatalogService.getProxyCatalogItem(transaction) - const existingProxy = await MicroserviceManager.findOne({ iofogUuid: oldRouter.iofogUuid, catalogItemId: proxyCatalog.id }, transaction) - if (existingProxy) { - const config = JSON.parse(existingProxy.config || '{}') - config.networkRouter = { - host: newRouterData.host || oldRouter.host, - port: newRouterData.messagingPort - } - await MicroserviceManager.updateIfChanged({ uuid: existingProxy.uuid }, { config: JSON.stringify(config) }, transaction) - } + // const proxyCatalog = await CatalogService.getProxyCatalogItem(transaction) + // const existingProxy = await MicroserviceManager.findOne({ iofogUuid: oldRouter.iofogUuid, catalogItemId: proxyCatalog.id }, transaction) + // if (existingProxy) { + // const config = JSON.parse(existingProxy.config || '{}') + // config.networkRouter = { + // host: newRouterData.host || oldRouter.host, + // port: newRouterData.messagingPort + // } + // await MicroserviceManager.updateIfChanged({ uuid: existingProxy.uuid }, { config: JSON.stringify(config) }, transaction) + // } // Update config if needed - await updateConfig(oldRouter.id, transaction) + await updateConfig(oldRouter.id, containerEngine, transaction) await ChangeTrackingService.update(oldRouter.iofogUuid, ChangeTrackingService.events.routerChanged, transaction) await ChangeTrackingService.update(oldRouter.iofogUuid, ChangeTrackingService.events.microserviceList, transaction) await ChangeTrackingService.update(oldRouter.iofogUuid, ChangeTrackingService.events.microserviceConfig, transaction) @@ -171,7 +171,7 @@ async function _updateRouterPorts (routerMicroserviceUuid, router, transaction) } } -async function updateConfig (routerID, transaction) { +async function updateConfig (routerID, containerEngine, transaction) { const router = await RouterManager.findOne({ id: routerID }, transaction) if (!router) { throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, routerID)) @@ -197,6 +197,7 @@ async function updateConfig (routerID, transaction) { router.messagingPort, router.interRouterPort, router.edgeRouterPort, + containerEngine, transaction ) @@ -252,12 +253,22 @@ async function updateConfig (routerID, transaction) { } function _listenersChanged (currentListeners, newListeners) { - if (currentListeners.length !== newListeners.length) { + if (!currentListeners || !newListeners) { return true } - for (const listener of currentListeners) { - if (newListeners.findIndex(l => l.port === listener.port) === -1) { + // Convert to arrays if they're objects + const currentArray = Object.values(currentListeners) + const newArray = Object.values(newListeners) + + if (currentArray.length !== newArray.length) { + return true + } + + // Compare only port property + for (const currentListener of currentArray) { + const matchingListener = newArray.find(l => l.port === currentListener.port) + if (!matchingListener) { return true } } @@ -272,7 +283,7 @@ function _createRouterPorts (routerMicroserviceUuid, port, transaction) { } const mappingData = { - isPublic: false, + // isPublic: false, portInternal: port, portExternal: port, microserviceUuid: routerMicroserviceUuid @@ -290,7 +301,7 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran isSystem: true } const routerMicroserviceData = { - uuid: AppHelper.generateRandomString(32), + uuid: AppHelper.generateUUID(), name: `router-${uuid.toLowerCase()}`, config: JSON.stringify(microserviceConfig), catalogItemId: routerCatalog.id, @@ -354,7 +365,16 @@ function _getRouterConnectorConfig (isEdge, dest, uuid) { return config } -async function _getRouterMicroserviceConfig (isEdge, uuid, messagingPort, interRouterPort, edgeRouterPort, transaction) { +async function _getRouterMicroserviceConfig (isEdge, uuid, messagingPort, interRouterPort, edgeRouterPort, containerEngine, transaction) { + let platform = 'docker' + if (containerEngine === 'podman') { + platform = 'podman' + } + + let namespace = SITE_CONFIG_NAMESPACE + if (process.env.CONTROLLER_NAMESPACE) { + namespace = process.env.CONTROLLER_NAMESPACE + } const config = { addresses: { mc: { @@ -381,8 +401,8 @@ async function _getRouterMicroserviceConfig (isEdge, uuid, messagingPort, interR }, siteConfig: { name: uuid, - namespace: SITE_CONFIG_NAMESPACE, - platform: 'docker', + namespace: namespace, + platform: platform, version: SITE_CONFIG_VERSION }, sslProfiles: {} diff --git a/src/services/secret-service.js b/src/services/secret-service.js index 16084702..a51e11a6 100644 --- a/src/services/secret-service.js +++ b/src/services/secret-service.js @@ -17,6 +17,8 @@ const AppHelper = require('../helpers/app-helper') const Errors = require('../helpers/errors') const ErrorMessages = require('../helpers/error-messages') const Validator = require('../schemas/index') +const VolumeMountService = require('./volume-mount-service') +const VolumeMountingManager = require('../data/managers/volume-mounting-manager') function validateBase64 (value) { try { @@ -79,6 +81,7 @@ async function updateSecretEndpoint (secretName, secretData, transaction) { validateSecretData(existingSecret.type, secretData.data) const secret = await SecretManager.updateSecret(secretName, secretData.data, transaction) + await _updateChangeTrackingForFogs(secretName, transaction) return { id: secret.id, name: secret.name, @@ -127,6 +130,19 @@ async function deleteSecretEndpoint (secretName, transaction) { return {} } +async function _updateChangeTrackingForFogs (secretName, transaction) { + const secretVolumeMounts = await VolumeMountingManager.findAll({ secretName: secretName }, transaction) + if (secretVolumeMounts.length > 0) { + for (const secretVolumeMount of secretVolumeMounts) { + const volumeMountObj = { + name: secretVolumeMount.name, + secretName: secretName + } + await VolumeMountService.updateVolumeMountEndpoint(secretVolumeMount.name, volumeMountObj, transaction) + } + } +} + module.exports = { createSecretEndpoint: TransactionDecorator.generateTransaction(createSecretEndpoint), updateSecretEndpoint: TransactionDecorator.generateTransaction(updateSecretEndpoint), diff --git a/src/services/services-service.js b/src/services/services-service.js new file mode 100644 index 00000000..3518d3aa --- /dev/null +++ b/src/services/services-service.js @@ -0,0 +1,1209 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const TransactionDecorator = require('../decorators/transaction-decorator') +const ServiceManager = require('../data/managers/service-manager') +const MicroserviceManager = require('../data/managers/microservice-manager') +const RouterManager = require('../data/managers/router-manager') +const RouterConnectionManager = require('../data/managers/router-connection-manager') +const K8sClient = require('../utils/k8s-client') +const AppHelper = require('../helpers/app-helper') +const config = require('../config') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const Validator = require('../schemas') +const logger = require('../logger') +const FogManager = require('../data/managers/iofog-manager') +const TagsManager = require('../data/managers/tags-manager') +const ChangeTrackingService = require('./change-tracking-service') +const ApplicationManager = require('../data/managers/application-manager') +// const { Op } = require('sequelize') + +const K8S_ROUTER_CONFIG_MAP = 'pot-router' +const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' + +// Map service tags to string array +// Return plain JS object +function _mapTags (service) { + return service.tags ? service.tags.map(t => t.value) : [] +} + +async function _setTags (serviceModel, tagsArray, transaction) { + if (tagsArray) { + let tags = [] + for (const tag of tagsArray) { + let tagModel = await TagsManager.findOne({ value: tag }, transaction) + if (!tagModel) { + tagModel = await TagsManager.create({ value: tag }, transaction) + } + tags.push(tagModel) + } + await serviceModel.setTags(tags) + } +} + +async function handleServiceDistribution (serviceTags, transaction) { + // Always find fog nodes with 'all' tag + const allTaggedFogNodes = await FogManager.findAllWithTags({ + '$tags.value$': `${SERVICE_ANNOTATION_TAG}: all` + }, transaction) + + // If serviceTags is null or empty, return only fog nodes with 'all' tag + if (!serviceTags || serviceTags.length === 0) { + const uuids = allTaggedFogNodes.map(fog => fog.uuid) + return uuids + } + + // Filter tags that don't contain ':' or '=' + const filteredServiceTags = serviceTags + .filter(tag => tag != null) + .map(tag => String(tag)) + .filter(tag => !tag.includes(':') && !tag.includes('=')) + .filter(tag => tag.length > 0) + + if (filteredServiceTags.length === 0) { + const uuids = allTaggedFogNodes.map(fog => fog.uuid) + return uuids + } + + // Find fog nodes for each filtered tag + const specificTaggedFogNodes = new Set() + for (const tag of filteredServiceTags) { + const fogNodes = await FogManager.findAllWithTags({ + '$tags.value$': `${SERVICE_ANNOTATION_TAG}: ${tag}` + }, transaction) + fogNodes.forEach(fog => specificTaggedFogNodes.add(fog.uuid)) + } + + // Get all tag fog node UUIDs + const allTagUuids = allTaggedFogNodes.map(fog => fog.uuid) + + // Combine both sets of fog nodes and remove duplicates + const allFogUuids = new Set([...allTagUuids, ...Array.from(specificTaggedFogNodes)]) + + return Array.from(allFogUuids) +} + +async function checkKubernetesEnvironment () { + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + return controlPlane && controlPlane.toLowerCase() === 'kubernetes' +} + +async function validateNonK8sType (serviceConfig) { + const isK8s = await checkKubernetesEnvironment() + if (serviceConfig.type.toLowerCase() !== 'k8s' && isK8s) { + if (!serviceConfig.k8sType || !serviceConfig.servicePort) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type(LoadBalancer or ClusterIP or NodePort) and service port') + } + } +} + +async function _validateServiceName (serviceConfig) { + if (serviceConfig.name.toLowerCase() === 'controller' || serviceConfig.name.toLowerCase() === 'router' || serviceConfig.name.toLowerCase() === 'router-internal' || serviceConfig.name.toLowerCase() === 'docker' || serviceConfig.name.toLowerCase() === 'podman' || serviceConfig.name.toLowerCase() === 'kubernetes') { + throw new Errors.ValidationError('Service name cannot be "controller" or "router" or "router-internal" or "docker"') + } +} + +async function validateMicroserviceType (serviceConfig, transaction) { + if (serviceConfig.type.toLowerCase() !== 'microservice') { + return + } + + let microserviceUuid = serviceConfig.resource + + // If resource contains "/", it means user provided "/" + if (serviceConfig.resource.includes('/')) { + const [appName, microserviceName] = serviceConfig.resource.split('/') + const app = await ApplicationManager.findOne({ name: appName }, transaction) + if (!app) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_APPLICATION_NAME, appName)) + } + const microservice = await MicroserviceManager.findOne({ + name: microserviceName, + applicationId: app.id + }, transaction) + + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_NAME, serviceConfig.resource)) + } + + microserviceUuid = microservice.uuid + } else { + // User provided UUID directly, validate if microservice exists + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) + } + } + + // Update resource to be the microservice UUID + serviceConfig.resource = microserviceUuid +} + +async function validateFogServiceType (serviceConfig, transaction) { + if (serviceConfig.type.toLowerCase() !== 'agent') { + return + } + + // First try to find fog node by name + let fogNode = await FogManager.findOne({ name: serviceConfig.resource }, transaction) + + // If not found by name, try to find by UUID + if (!fogNode) { + fogNode = await FogManager.findOne({ uuid: serviceConfig.resource }, transaction) + } + + // If still not found, throw error + if (!fogNode) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, serviceConfig.resource)) + } + + // Always set resource to be the fog node UUID + serviceConfig.resource = fogNode.uuid +} + +async function validateDefaultBridge (serviceConfig, transaction) { + // If defaultBridge is empty, set it to 'default-router' + if (!serviceConfig.defaultBridge) { + logger.debug('Setting default bridge to default-router') + serviceConfig.defaultBridge = 'default-router' + return + } + + // If service type is not microservice or agent, defaultBridge must be 'default-router' + if (serviceConfig.type.toLowerCase() !== 'microservice' && serviceConfig.type.toLowerCase() !== 'agent') { + if (serviceConfig.defaultBridge !== 'default-router') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_DEFAULT_BRIDGE, serviceConfig.defaultBridge)) + } + return + } + + // For microservice or agent type, if user provided a UUID instead of 'default-router' + if (serviceConfig.defaultBridge !== 'default-router') { + let iofogUuid + + if (serviceConfig.type.toLowerCase() === 'microservice') { + // Get the microservice to find its iofog node + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) + } + iofogUuid = microservice.iofogUuid + } else if (serviceConfig.type.toLowerCase() === 'agent') { + // For agent type, the resource is the agent UUID + iofogUuid = serviceConfig.resource + } + + // Get the router for the iofog node + const router = await RouterManager.findOne({ iofogUuid: iofogUuid }, transaction) + if (!router) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, iofogUuid)) + } + + // Check if the router has a connection to the specified upstream router + const upstreamRouter = await RouterManager.findOne({ iofogUuid: serviceConfig.defaultBridge }, transaction) + if (!upstreamRouter) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, serviceConfig.defaultBridge)) + } + + const routerConnection = await RouterConnectionManager.findOne({ + sourceRouter: router.id, + destRouter: upstreamRouter.id + }, transaction) + + if (!routerConnection) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_CONNECTION, serviceConfig.defaultBridge, router.id)) + } + } +} + +async function defineBridgePort (serviceConfig, transaction) { + // Get bridge port range from environment or config + const bridgePortRangeStr = process.env.BRIDGE_PORTS_RANGE || config.get('bridgePorts.range') || '10024-65535' + const [startStr, endStr] = bridgePortRangeStr.split('-') + const start = parseInt(startStr) + const end = parseInt(endStr) + + // Get all existing services to check used ports + const existingServices = await ServiceManager.findAll({}, transaction) + const usedPorts = new Set(existingServices.map(service => service.bridgePort)) + + // Find the first available port in the range + let bridgePort = start + while (bridgePort <= end) { + if (!usedPorts.has(bridgePort)) { + serviceConfig.bridgePort = bridgePort + return + } + bridgePort++ + } + + // If we get here, no ports are available + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.NO_AVAILABLE_BRIDGE_PORT, bridgePortRangeStr)) +} + +// Helper function to determine host based on service type +async function _determineConnectorHost (serviceConfig, transaction) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (microservice.rootHostAccess) { + return 'iofog' + } else { + return `iofog_${serviceConfig.resource}` + } + case 'agent': + return 'iofog' + case 'k8s': + case 'external': + return serviceConfig.resource + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to determine siteId for connector +async function _determineConnectorSiteId (serviceConfig, transaction) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': { + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.NotFoundError(`Microservice not found: ${serviceConfig.resource}`) + } + return microservice.iofogUuid + } + case 'agent': + return serviceConfig.resource + case 'k8s': + case 'external': + return 'default-router' + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to determine processId for connector +async function _determineConnectorProcessId (serviceConfig) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': + return serviceConfig.resource + case 'agent': + return `${serviceConfig.resource}-local-${serviceConfig.targetPort}` + case 'k8s': + return `${serviceConfig.resource}-k8s-${serviceConfig.targetPort}` + case 'external': + return `${serviceConfig.resource}-external-${serviceConfig.targetPort}` + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to build tcpConnector configuration +async function _buildTcpConnector (serviceConfig, transaction) { + const host = await _determineConnectorHost(serviceConfig, transaction) + const siteId = await _determineConnectorSiteId(serviceConfig, transaction) + const processId = await _determineConnectorProcessId(serviceConfig) + + return { + name: `${serviceConfig.name}-connector`, + host, + port: serviceConfig.targetPort.toString(), + address: serviceConfig.name, + siteId, + processId + } +} + +// Helper function to build tcpListener configuration +async function _buildTcpListener (serviceConfig, fogNodeUuid = null) { + const listener = { + name: `${serviceConfig.name}-listener`, + port: serviceConfig.bridgePort.toString(), + address: serviceConfig.name, + siteId: fogNodeUuid || serviceConfig.defaultBridge + } + return listener +} + +// Helper function to get router microservice by fog node UUID +async function _getRouterMicroservice (fogNodeUuid, transaction) { + const routerName = `router-${fogNodeUuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + return routerMicroservice +} + +// Helper function to update router config in Kubernetes environment +async function _updateK8sRouterConfig (config) { + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const patchData = { + data: { + 'skrouterd.json': JSON.stringify(config) + } + } + + await K8sClient.patchConfigMap(K8S_ROUTER_CONFIG_MAP, patchData) +} + +// Helper function to update router microservice config +async function _updateRouterMicroserviceConfig (fogNodeUuid, config, transaction) { + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + + // Update microservice with the provided config + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + + // Update change tracking + await ChangeTrackingService.update(fogNodeUuid, ChangeTrackingService.events.microserviceConfig, transaction) +} + +// Helper function to add tcpConnector to router config +async function _addTcpConnector (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connector = await _buildTcpConnector(serviceConfig, transaction) + const siteId = connector.siteId + + if (siteId === 'default-router') { + if (isK8s) { + // Update K8s router config + logger.debug('Updating K8s router config') + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Add new connector to the array + routerConfig.push(['tcpConnector', connector]) + + await _updateK8sRouterConfig(routerConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + logger.error('Default router not found') + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + // Update specific router microservice config + const fogNodeUuid = siteId + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// Helper function to add tcpListener to router config +async function _addTcpListener (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + + // First handle K8s case if we're in K8s environment + if (isK8s) { + const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Add new listener to the array + routerConfig.push(['tcpListener', k8sListener]) + + await _updateK8sRouterConfig(routerConfig) + } + + // Handle distributed router microservice cases + // Get list of fog nodes that need this listener + const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) + + // If not in K8s environment, always include default router + if (!isK8s) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + logger.error('Default router not found') + throw new Errors.NotFoundError('Default router not found') + } + // Add default router if not already in the list + if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { + fogNodeUuids.push(defaultRouter.iofogUuid) + } + } + // else if (!fogNodeUuids || fogNodeUuids.length === 0) { + // // If in K8s and no fog nodes found, add default router + // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + // if (!defaultRouter) { + // logger.error('Default router not found') + // throw new Errors.NotFoundError('Default router not found') + // } + // fogNodeUuids.push(defaultRouter.iofogUuid) + // } + + // Add listener to each router microservice + for (const fogNodeUuid of fogNodeUuids) { + try { + const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + if (!currentConfig.bridges) currentConfig.bridges = {} + if (!currentConfig.bridges.tcpListeners) currentConfig.bridges.tcpListeners = {} + currentConfig.bridges.tcpListeners[listener.name] = listener + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } catch (err) { + if (err instanceof Errors.NotFoundError) { + logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) + continue + } + throw err + } + } +} + +// Helper function to update tcpConnector in router config +async function _updateTcpConnector (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connector = await _buildTcpConnector(serviceConfig, transaction) + const siteId = connector.siteId + + if (siteId === 'default-router') { + if (isK8s) { + // Update K8s router config + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Find and update the existing connector + const connectorIndex = routerConfig.findIndex(item => + item[0] === 'tcpConnector' && item[1].name === connector.name + ) + if (connectorIndex !== -1) { + routerConfig[connectorIndex] = ['tcpConnector', connector] + } + + await _updateK8sRouterConfig(routerConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + // Update the connector with the same name + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + // Update specific router microservice config + const fogNodeUuid = siteId + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + // Update the connector with the same name + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// // Helper function to update tcpListener in router config +// async function _updateTcpListener (serviceConfig, transaction) { +// const isK8s = await checkKubernetesEnvironment() + +// // First handle K8s case if we're in K8s environment +// if (isK8s) { +// const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case +// const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) +// if (!configMap) { +// throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) +// } + +// const routerConfig = JSON.parse(configMap.data['skrouterd.json']) +// // Update the listener in the array +// const listenerIndex = routerConfig.findIndex(item => +// item[0] === 'tcpListener' && item[1].name === k8sListener.name +// ) +// if (listenerIndex !== -1) { +// routerConfig[listenerIndex] = ['tcpListener', k8sListener] +// } else { +// routerConfig.push(['tcpListener', k8sListener]) +// } + +// await _updateK8sRouterConfig(routerConfig) +// } + +// // Handle distributed router microservice cases +// // Get list of fog nodes that need this listener +// const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) +// // If not in K8s environment, always include default router +// if (!isK8s) { +// const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) +// if (!defaultRouter) { +// throw new Errors.NotFoundError('Default router not found') +// } +// // Add default router if not already in the list +// if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { +// fogNodeUuids.push(defaultRouter.iofogUuid) +// } +// } +// // else if (!fogNodeUuids || fogNodeUuids.length === 0) { +// // // If in K8s and no fog nodes found, add default router +// // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) +// // if (!defaultRouter) { +// // throw new Errors.NotFoundError('Default router not found') +// // } +// // fogNodeUuids.push(defaultRouter.iofogUuid) +// // } + +// // Update listener in each router microservice +// for (const fogNodeUuid of fogNodeUuids) { +// try { +// const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) +// const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) +// const currentConfig = JSON.parse(routerMicroservice.config || '{}') + +// if (!currentConfig.bridges) { +// currentConfig.bridges = {} +// } +// if (!currentConfig.bridges.tcpListeners) { +// currentConfig.bridges.tcpListeners = {} +// } +// // Update listener with its name as key +// currentConfig.bridges.tcpListeners[listener.name] = listener + +// await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) +// } catch (err) { +// if (err instanceof Errors.NotFoundError) { +// logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) +// continue +// } +// throw err +// } +// } +// } + +// Helper function to delete tcpConnector from router config +async function _deleteTcpConnector (serviceName, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connectorName = `${serviceName}-connector` + + // Get service to determine if it's using default router + const service = await ServiceManager.findOne({ name: serviceName }, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service not found: ${serviceName}`) + } + + const isDefaultRouter = service.defaultBridge === 'default-router' + let microserviceSource = null + if (service.type === 'microservice') { + microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) + } + + if (isDefaultRouter && !microserviceSource) { + if (isK8s) { + // Update K8s router config + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Remove the connector from the array + const updatedConfig = routerConfig.filter(item => + !(item[0] === 'tcpConnector' && item[1].name === connectorName) + ) + + await _updateK8sRouterConfig(updatedConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { + delete currentConfig.bridges.tcpConnectors[connectorName] + } + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + let fogNodeUuid = null + if (microserviceSource) { + fogNodeUuid = microserviceSource.iofogUuid + } else { + fogNodeUuid = service.defaultBridge // This is the actual fogNodeUuid for non-default router + } + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { + delete currentConfig.bridges.tcpConnectors[connectorName] + } + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// Helper function to delete tcpListener from router config +async function _deleteTcpListener (serviceName, transaction) { + const isK8s = await checkKubernetesEnvironment() + const listenerName = `${serviceName}-listener` + + // First handle K8s case if we're in K8s environment + if (isK8s) { + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Remove the listener from the array + const updatedConfig = routerConfig.filter(item => + !(item[0] === 'tcpListener' && item[1].name === listenerName) + ) + + await _updateK8sRouterConfig(updatedConfig) + } + + // Get service to determine its tags for distribution + const service = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service not found: ${serviceName}`) + } + + let microserviceSource = null + if (service.type === 'microservice') { + microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) + } + // Handle distributed router microservice cases + // Get list of fog nodes that need this listener removed + const serviceTags = service.tags.map(tag => tag.value) + const fogNodeUuids = await handleServiceDistribution(serviceTags, transaction) + + if (microserviceSource) { + if (!fogNodeUuids.includes(microserviceSource.iofogUuid)) { + fogNodeUuids.push(microserviceSource.iofogUuid) + } + } + // If not in K8s environment, always include default router + if (!isK8s) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + // Add default router if not already in the list + if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { + fogNodeUuids.push(defaultRouter.iofogUuid) + } + } + // else if (!fogNodeUuids || fogNodeUuids.length === 0) { + // // If in K8s and no fog nodes found, add default router + // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + // if (!defaultRouter) { + // throw new Errors.NotFoundError('Default router not found') + // } + // fogNodeUuids.push(defaultRouter.iofogUuid) + // } + + // Remove listener from each router microservice + for (const fogNodeUuid of fogNodeUuids) { + try { + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + if (currentConfig.bridges && currentConfig.bridges.tcpListeners) { + delete currentConfig.bridges.tcpListeners[listenerName] + } + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } catch (err) { + if (err instanceof Errors.NotFoundError) { + logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) + continue + } + throw err + } + } +} + +// Helper function to create Kubernetes service +async function _createK8sService (serviceConfig, transaction) { + const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) + const serviceSpec = { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: serviceConfig.name, + annotations: normalizedTags.reduce((acc, tag) => { + const [key, value] = tag.split(':') + acc[key] = value || '' + return acc + }, {}) + }, + spec: { + type: serviceConfig.k8sType, + selector: { + application: 'interior-router', + name: 'router', + 'skupper.io/component': 'router' + }, + ports: [{ + port: parseInt(serviceConfig.bridgePort), + targetPort: parseInt(serviceConfig.servicePort), + protocol: 'TCP' + }] + } + } + + const service = await K8sClient.createService(serviceSpec) + + // If LoadBalancer type, wait for and set the external IP + if (serviceConfig.k8sType === 'LoadBalancer') { + const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) + if (loadBalancerIP) { + await ServiceManager.update( + { name: serviceConfig.name }, + { serviceEndpoint: loadBalancerIP }, + transaction + ) + } + } + + return service +} + +// Helper function to update Kubernetes service +async function _updateK8sService (serviceConfig, transaction) { + const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) + const patchData = { + metadata: { + annotations: normalizedTags.reduce((acc, tag) => { + const [key, value] = tag.split(':') + acc[key] = value || '' + return acc + }, {}) + }, + spec: { + type: serviceConfig.k8sType, + selector: { + application: 'interior-router', + name: 'router', + 'skupper.io/component': 'router' + }, + ports: [{ + port: parseInt(serviceConfig.bridgePort), + targetPort: parseInt(serviceConfig.servicePort), + protocol: 'TCP' + }] + } + } + + logger.debug(`Updating service: ${serviceConfig.name}`) + const service = await K8sClient.updateService(serviceConfig.name, patchData) + + // If LoadBalancer type, wait for and set the external IP + if (serviceConfig.k8sType === 'LoadBalancer') { + const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) + if (loadBalancerIP) { + await ServiceManager.update( + { name: serviceConfig.name }, + { serviceEndpoint: loadBalancerIP }, + transaction + ) + } + } + + return service +} + +// Helper function to delete Kubernetes service +async function _deleteK8sService (serviceName) { + await K8sClient.deleteService(serviceName) +} + +// Create service endpoint +async function createServiceEndpoint (serviceData, transaction) { + logger.debug('Creating service with data:' + JSON.stringify(serviceData)) + + // 1. Validate from schemas validator + await Validator.validate(serviceData, Validator.schemas.serviceCreate) + await _validateServiceName(serviceData) + + // 2. Check K8s environment if type is k8s + const isK8s = await checkKubernetesEnvironment() + if (serviceData.type === 'k8s' && !isK8s) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') + } + + if (serviceData.type !== 'k8s' && isK8s) { + logger.debug('Validating non k8s service type') + await validateNonK8sType(serviceData) + } + + // 3. Validate microservice type + if (serviceData.type === 'microservice') { + await validateMicroserviceType(serviceData, transaction) + } + + // 4. Validate agent type + if (serviceData.type === 'agent') { + logger.debug('Validating agent service type') + await validateFogServiceType(serviceData, transaction) + } + + // 5. Validate default bridge + logger.debug('Validating default bridge') + await validateDefaultBridge(serviceData, transaction) + + logger.debug('Defining bridge port') + // 6. Define bridge port + await defineBridgePort(serviceData, transaction) + + // Set provisioning fields + serviceData.provisioningStatus = 'pending' + serviceData.provisioningError = null + + // 7. Create service in database first + logger.debug('Creating service in database') + const service = await ServiceManager.create(serviceData, transaction) + + // 8. Start background orchestration + setImmediate(async () => { + try { + // Set tags if provided + logger.debug('Setting tags (background)') + if (serviceData.tags && serviceData.tags.length > 0) { + await _setTags(service, serviceData.tags, transaction) + } + + // Add TCP connector + logger.debug('Adding TCP connector (background)') + await _addTcpConnector(serviceData, transaction) + + // Add TCP listener + logger.debug('Adding TCP listener (background)') + await _addTcpListener(serviceData, transaction) + + // Create K8s service if needed + if ((serviceData.type === 'microservice' || serviceData.type === 'agent' || serviceData.type === 'external') && isK8s) { + logger.debug('Creating K8s service (background)') + await _createK8sService(serviceData, transaction) + } + + // Update provisioning status to ready + await ServiceManager.update({ id: service.id }, { provisioningStatus: 'ready', provisioningError: null }, transaction) + } catch (err) { + logger.error('Background provisioning failed:', err) + // Update provisioning status to failed and set error message + await ServiceManager.update({ id: service.id }, { provisioningStatus: 'failed', provisioningError: err.message }, transaction) + } + }) + + // 9. Return service immediately + return service +} + +// Update service endpoint +async function updateServiceEndpoint (serviceName, serviceData, transaction) { + // 1. Validate from schemas validator + await Validator.validate(serviceData, Validator.schemas.serviceUpdate) + await _validateServiceName(serviceData) + + // 2. Get existing service + const existingService = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) + if (!existingService) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + + // 3. Check if service type is being changed + if (serviceData.type && serviceData.type !== existingService.type) { + throw new Errors.ValidationError('Changing service type is not allowed. Please delete the service and create a new one with the desired type.') + } + + // 4. Check K8s environment if type is k8s + const isK8s = await checkKubernetesEnvironment() + if (existingService.type === 'k8s' && !isK8s) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') + } + + if (serviceData.type !== 'k8s' && isK8s) { + logger.debug('Validating non k8s service type') + await validateNonK8sType(serviceData) + } + + // 5. Validate microservice type if needed + if (existingService.type === 'microservice') { + await validateMicroserviceType(serviceData, transaction) + } + + // 6. Validate agent type if needed + if (existingService.type === 'agent') { + await validateFogServiceType(serviceData, transaction) + } + + // 7. Validate default bridge if needed + if (serviceData.defaultBridge) { + await validateDefaultBridge(serviceData, transaction) + } + + serviceData.bridgePort = existingService.bridgePort + + // Set provisioning fields + serviceData.provisioningStatus = 'pending' + serviceData.provisioningError = null + + // 8. Update service in database + const updatedService = await ServiceManager.update( + { name: serviceName }, + serviceData, + transaction + ) + + // 9. Start background orchestration + setImmediate(async () => { + try { + // Update tags if provided + if (serviceData.tags) { + await _setTags(existingService, serviceData.tags, transaction) + } + + // Handle resource changes + if (serviceData.resource && + JSON.stringify(serviceData.resource) !== JSON.stringify(existingService.resource)) { + await _deleteTcpConnector(serviceName, transaction) + await _addTcpConnector(serviceData, transaction) + } else { + await _updateTcpConnector(serviceData, transaction) + // await _updateTcpListener(serviceData, transaction) + } + + // Update K8s service if needed + if ((existingService.type === 'microservice' || existingService.type === 'agent' || existingService.type === 'external') && isK8s) { + await _updateK8sService(serviceData, transaction) + } + + // Update provisioning status to ready + await ServiceManager.update( + { name: serviceName }, + { provisioningStatus: 'ready', provisioningError: null }, + transaction + ) + } catch (err) { + logger.error('Background provisioning failed (update):', err) + // Update provisioning status to failed and set error message + await ServiceManager.update( + { name: serviceName }, + { provisioningStatus: 'failed', provisioningError: err.message }, + transaction + ) + } + }) + + // 10. Return updated service immediately + return updatedService +} + +// Delete service endpoint +async function deleteServiceEndpoint (serviceName, transaction) { + // Get existing service + const existingService = await ServiceManager.findOne({ name: serviceName }, transaction) + if (!existingService) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + + const isK8s = await checkKubernetesEnvironment() + + try { + // Delete TCP connector + await _deleteTcpConnector(serviceName, transaction) + + // Delete TCP listener + await _deleteTcpListener(serviceName, transaction) + + // Delete K8s service if needed + if (isK8s && existingService.type !== 'k8s') { + await _deleteK8sService(serviceName) + } + + // Finally delete the service from database + await ServiceManager.delete({ name: serviceName }, transaction) + + return { message: `Service ${serviceName} deleted successfully` } + } catch (error) { + logger.error('Error deleting service:', { + error: error.message, + stack: error.stack, + serviceName: serviceName, + serviceType: existingService.type + }) + + // Wrap the error in a proper error type if it's not already + if (!(error instanceof Errors.ValidationError) && + !(error instanceof Errors.NotFoundError) && + !(error instanceof Errors.TransactionError) && + !(error instanceof Errors.DuplicatePropertyError)) { + throw new Errors.ValidationError(`Failed to delete service: ${error.message}`) + } + throw error + } +} + +// List services endpoint +async function getServicesListEndpoint (transaction) { + const queryFogData = {} + const services = await ServiceManager.findAllWithTags(queryFogData, transaction) + return services.map(service => ({ + name: service.name, + type: service.type, + resource: service.resource, + defaultBridge: service.defaultBridge, + bridgePort: service.bridgePort, + targetPort: service.targetPort, + servicePort: service.servicePort, + k8sType: service.k8sType, + serviceEndpoint: service.serviceEndpoint, + tags: _mapTags(service), + provisioningStatus: service.provisioningStatus, + provisioningError: service.provisioningError + })) +} + +// Get service endpoint +async function getServiceEndpoint (serviceName, transaction) { + const queryFogData = { name: serviceName } + const service = await ServiceManager.findOneWithTags(queryFogData, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + return { + name: service.name, + type: service.type, + resource: service.resource, + defaultBridge: service.defaultBridge, + bridgePort: service.bridgePort, + targetPort: service.targetPort, + servicePort: service.servicePort, + k8sType: service.k8sType, + serviceEndpoint: service.serviceEndpoint, + tags: _mapTags(service), + provisioningStatus: service.provisioningStatus, + provisioningError: service.provisioningError + } +} + +async function moveMicroserviceTcpBridgeToNewFog (service, newFogUuid, oldFogUuid, transaction) { + const listenerName = `${service.name}-listener` + const connectorName = `${service.name}-connector` + + const oldRouterMicroservice = await _getRouterMicroservice(oldFogUuid, transaction) + const oldRouterConfig = JSON.parse(oldRouterMicroservice.config || '{}') + const newRouterMicroservice = await _getRouterMicroservice(newFogUuid, transaction) + const newRouterConfig = JSON.parse(newRouterMicroservice.config || '{}') + + const connector = oldRouterConfig.bridges.tcpConnectors[connectorName] + const listener = oldRouterConfig.bridges.tcpListeners[listenerName] + + if (oldRouterConfig.bridges.tcpConnectors[connectorName]) { + delete oldRouterConfig.bridges.tcpConnectors[connectorName] + } + if (oldRouterConfig.bridges.tcpListeners[listenerName]) { + delete oldRouterConfig.bridges.tcpListeners[listenerName] + } + + if (!newRouterConfig.bridges) { + newRouterConfig.bridges = {} + } + if (!newRouterConfig.bridges.tcpConnectors) { + newRouterConfig.bridges.tcpConnectors = {} + } + + newRouterConfig.bridges.tcpConnectors[connectorName] = connector + newRouterConfig.bridges.tcpListeners[listenerName] = listener + + await _updateRouterMicroserviceConfig(oldFogUuid, oldRouterConfig, transaction) + await _updateRouterMicroserviceConfig(newFogUuid, newRouterConfig, transaction) +} + +module.exports = { + checkKubernetesEnvironment, + validateMicroserviceType: TransactionDecorator.generateTransaction(validateMicroserviceType), + validateNonK8sType, + _validateServiceName, + validateFogServiceType: TransactionDecorator.generateTransaction(validateFogServiceType), + validateDefaultBridge: TransactionDecorator.generateTransaction(validateDefaultBridge), + defineBridgePort: TransactionDecorator.generateTransaction(defineBridgePort), + handleServiceDistribution: TransactionDecorator.generateTransaction(handleServiceDistribution), + _mapTags, + _setTags: TransactionDecorator.generateTransaction(_setTags), + _createK8sService, + _updateK8sService, + _deleteK8sService, + createServiceEndpoint: TransactionDecorator.generateTransaction(createServiceEndpoint), + updateServiceEndpoint: TransactionDecorator.generateTransaction(updateServiceEndpoint), + deleteServiceEndpoint: TransactionDecorator.generateTransaction(deleteServiceEndpoint), + getServicesListEndpoint: TransactionDecorator.generateTransaction(getServicesListEndpoint), + getServiceEndpoint: TransactionDecorator.generateTransaction(getServiceEndpoint), + moveMicroserviceTcpBridgeToNewFog: TransactionDecorator.generateTransaction(moveMicroserviceTcpBridgeToNewFog) +} diff --git a/src/services/volume-mount-service.js b/src/services/volume-mount-service.js new file mode 100644 index 00000000..403afb1a --- /dev/null +++ b/src/services/volume-mount-service.js @@ -0,0 +1,196 @@ +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const AppHelper = require('../helpers/app-helper') +const VolumeMountingManager = require('../data/managers/volume-mounting-manager') +const SecretManager = require('../data/managers/secret-manager') +const ConfigMapManager = require('../data/managers/config-map-manager') +const ChangeTrackingService = require('./change-tracking-service') +const FogManager = require('../data/managers/iofog-manager') +const TransactionDecorator = require('../decorators/transaction-decorator') +const Validator = require('../schemas') + +async function findVolumeMountedFogNodes (volumeMountName, transaction) { + const volumeMount = await VolumeMountingManager.findOne({ + name: volumeMountName + }, transaction) + + if (!volumeMount) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.VOLUME_MOUNT_NOT_FOUND, volumeMountName)) + } + + const fogs = await volumeMount.getFogs({}, transaction) + return fogs.map(fog => fog.uuid) +} + +async function _updateChangeTrackingForFogs (fogUuids, transaction) { + for (const fogUuid of fogUuids) { + await ChangeTrackingService.update(fogUuid, ChangeTrackingService.events.volumeMounts, transaction) + } +} + +async function listVolumeMountsEndpoint (transaction) { + return VolumeMountingManager.findAll({}, transaction) +} + +async function getVolumeMountEndpoint (name, transaction) { + const volumeMount = await VolumeMountingManager.findOne({ + name: name + }, transaction) + + if (!volumeMount) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.VOLUME_MOUNT_NOT_FOUND, name)) + } + + return volumeMount +} + +async function createVolumeMountEndpoint (data, transaction) { + await Validator.validate(data, Validator.schemas.volumeMountCreate) + // Validate that either secretName or configMapName is provided + if (!data.secretName && !data.configMapName) { + throw new Errors.ValidationError('Must specify either secretName or configMapName') + } + + // Validate that both are not provided + if (data.secretName && data.configMapName) { + throw new Errors.ValidationError('Cannot specify both secretName and configMapName') + } + + const existingVolumeMount = await VolumeMountingManager.findOne({ name: data.name }, transaction) + if (existingVolumeMount) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_NAME, data.name)) + } + + // Check if secret/configMap exists + if (data.secretName) { + const secret = await SecretManager.getSecret(data.secretName, transaction) + if (!secret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, data.secretName)) + } + } + + if (data.configMapName) { + const configMap = await ConfigMapManager.getConfigMap(data.configMapName, transaction) + if (!configMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, data.configMapName)) + } + } + const volumeMountObj = { + uuid: AppHelper.generateUUID(), + version: 1, + name: data.name, + configMapName: data.configMapName, + secretName: data.secretName + } + return VolumeMountingManager.create(volumeMountObj, transaction) +} + +async function updateVolumeMountEndpoint (name, data, transaction) { + await Validator.validate(data, Validator.schemas.volumeMountUpdate) + const volumeMount = await getVolumeMountEndpoint(name, transaction) + const existingVersion = volumeMount.version + + // Validate that either secretName or configMapName is provided + if (!data.secretName && !data.configMapName) { + throw new Errors.ValidationError('Must specify either secretName or configMapName') + } + + // Validate that both are not provided + if (data.secretName && data.configMapName) { + throw new Errors.ValidationError('Cannot specify both secretName and configMapName') + } + // Check if secret/configMap exists + if (data.secretName) { + const secret = await SecretManager.getSecret(data.secretName, transaction) + if (!secret) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.SECRET_NOT_FOUND, data.secretName)) + } + } + + if (data.configMapName) { + const configMap = await ConfigMapManager.getConfigMap(data.configMapName, transaction) + if (!configMap) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.CONFIGMAP_NOT_FOUND, data.configMapName)) + } + } + + // Get linked fog nodes before update + const linkedFogUuids = await findVolumeMountedFogNodes(name, transaction) + + // Update volume mount + const updatedVolumeMountObj = { + uuid: volumeMount.uuid, + version: existingVersion + 1, + name: volumeMount.name, + configMapName: data.configMapName, + secretName: data.secretName + } + await VolumeMountingManager.update({ name: name }, updatedVolumeMountObj, transaction) + + // Update change tracking for all linked fog nodes + await _updateChangeTrackingForFogs(linkedFogUuids, transaction) + + return getVolumeMountEndpoint(name, transaction) +} + +async function deleteVolumeMountEndpoint (name, transaction) { + // Get linked fog nodes before deletion + const linkedFogUuids = await findVolumeMountedFogNodes(name, transaction) + + // Delete volume mount + await VolumeMountingManager.delete({ name: name }, transaction) + + // Update change tracking for all linked fog nodes + await _updateChangeTrackingForFogs(linkedFogUuids, transaction) + + return {} +} + +async function linkVolumeMountEndpoint (name, fogUuids, transaction) { + await Validator.validate({ fogUuids }, Validator.schemas.volumeMountLink) + + const volumeMount = await getVolumeMountEndpoint(name, transaction) + + for (const fogUuid of fogUuids) { + const agent = await FogManager.findOne({ uuid: fogUuid }, transaction) + if (!agent) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.NOT_FOUND_AGENT_NAME, fogUuid)) + } + await agent.addVolumeMount(volumeMount.uuid, transaction) + } + + // Update change tracking for all linked fog nodes + await _updateChangeTrackingForFogs(fogUuids, transaction) + + return getVolumeMountEndpoint(name, transaction) +} + +async function unlinkVolumeMountEndpoint (name, fogUuids, transaction) { + await Validator.validate({ fogUuids }, Validator.schemas.volumeMountUnlink) + + const volumeMount = await getVolumeMountEndpoint(name, transaction) + + for (const fogUuid of fogUuids) { + const agent = await FogManager.findOne({ uuid: fogUuid }, transaction) + if (!agent) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.NOT_FOUND_AGENT_NAME, fogUuid)) + } + await agent.removeVolumeMount(volumeMount.uuid, transaction) + } + + // Update change tracking for all unlinked fog nodes + await _updateChangeTrackingForFogs(fogUuids, transaction) + + return {} +} + +module.exports = { + listVolumeMountsEndpoint: TransactionDecorator.generateTransaction(listVolumeMountsEndpoint), + getVolumeMountEndpoint: TransactionDecorator.generateTransaction(getVolumeMountEndpoint), + createVolumeMountEndpoint: TransactionDecorator.generateTransaction(createVolumeMountEndpoint), + updateVolumeMountEndpoint: TransactionDecorator.generateTransaction(updateVolumeMountEndpoint), + deleteVolumeMountEndpoint: TransactionDecorator.generateTransaction(deleteVolumeMountEndpoint), + linkVolumeMountEndpoint: TransactionDecorator.generateTransaction(linkVolumeMountEndpoint), + unlinkVolumeMountEndpoint: TransactionDecorator.generateTransaction(unlinkVolumeMountEndpoint), + findVolumeMountedFogNodes: TransactionDecorator.generateTransaction(findVolumeMountedFogNodes) +} diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index aa33472c..b620450c 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -53,7 +53,7 @@ async function parseSecretFile (fileContent, options = {}) { if (doc.kind !== 'Secret') { throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) } - if (doc.metadata == null || doc.spec == null) { + if (doc.metadata == null || doc.type == null || doc.data == null) { throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') } @@ -65,15 +65,153 @@ async function parseSecretFile (fileContent, options = {}) { // For updates, we only need the data return { - data: doc.spec.data + data: doc.data + } + } + + // For creates, return full object + return { + name: lget(doc, 'metadata.name', undefined), + type: doc.spec.type, + data: doc.data + } + } catch (error) { + if (error instanceof Errors.ValidationError) { + throw error + } + throw new Errors.ValidationError(`Error parsing YAML: ${error.message}`) + } +} + +async function parseVolumeMountFile (fileContent, options = {}) { + try { + const doc = yaml.load(fileContent) + if (!doc || !doc.kind) { + throw new Errors.ValidationError(`Invalid YAML format: missing kind field`) + } + if (doc.kind !== 'VolumeMount') { + throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) + } + if (doc.metadata == null || doc.spec == null) { + throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') + } + + // Validate that either secretName or configMapName is provided, but not both + if (doc.spec.secretName && doc.spec.configMapName) { + throw new Errors.ValidationError('Cannot specify both secretName and configMapName') + } + if (!doc.spec.secretName && !doc.spec.configMapName) { + throw new Errors.ValidationError('Must specify either secretName or configMapName') + } + + // If this is an update, validate that the name matches + if (options.isUpdate && options.volumeMountName) { + if (doc.metadata.name !== options.volumeMountName) { + throw new Errors.ValidationError(`VolumeMount name in YAML (${doc.metadata.name}) doesn't match endpoint path (${options.volumeMountName})`) + } + + return { + name: lget(doc, 'metadata.name', undefined), + secretName: doc.spec.secretName, + configMapName: doc.spec.configMapName + } + } + + // For creates, return full object + return { + name: lget(doc, 'metadata.name', undefined), + secretName: doc.spec.secretName, + configMapName: doc.spec.configMapName + } + } catch (error) { + if (error instanceof Errors.ValidationError) { + throw error + } + throw new Errors.ValidationError(`Error parsing YAML: ${error.message}`) + } +} + +async function parseConfigMapFile (fileContent, options = {}) { + try { + const doc = yaml.load(fileContent) + if (!doc || !doc.kind) { + throw new Errors.ValidationError(`Invalid YAML format: missing kind field`) + } + if (doc.kind !== 'ConfigMap') { + throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) + } + if (doc.metadata == null || doc.data == null) { + throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') + } + + // If this is an update, validate that the name matches + if (options.isUpdate && options.configMapName) { + if (doc.metadata.name !== options.configMapName) { + throw new Errors.ValidationError(`ConfigMap name in YAML (${doc.metadata.name}) doesn't match endpoint path (${options.configMapName})`) + } + + // For updates, we only need the data + return { + data: doc.data + } + } + + // For creates, return full object + return { + name: lget(doc, 'metadata.name', undefined), + data: doc.data, + immutable: doc.spec.immutable + } + } catch (error) { + if (error instanceof Errors.ValidationError) { + throw error + } + throw new Errors.ValidationError(`Error parsing YAML: ${error.message}`) + } +} + +async function parseServiceFile (fileContent, options = {}) { + try { + const doc = yaml.load(fileContent) + if (!doc || !doc.kind) { + throw new Errors.ValidationError(`Invalid YAML format: missing kind field`) + } + if (doc.kind !== 'Service') { + throw new Errors.ValidationError(`Invalid kind ${doc.kind}`) + } + if (doc.metadata == null || doc.spec == null) { + throw new Errors.ValidationError('Invalid YAML format: missing metadata or spec') + } + + // If this is an update, validate that the name matches + if (options.isUpdate && options.serviceName) { + if (doc.metadata.name !== options.serviceName) { + throw new Errors.ValidationError(`Service name in YAML (${doc.metadata.name}) doesn't match endpoint path (${options.serviceName})`) + } + + // For updates, we only need the spec and tags fields + return { + name: lget(doc, 'metadata.name', undefined), + tags: lget(doc, 'metadata.tags', []), + type: doc.spec.type, + resource: doc.spec.resource, + targetPort: doc.spec.targetPort, + defaultBridge: doc.spec.defaultBridge, + servicePort: doc.spec.servicePort, + k8sType: doc.spec.k8sType } } // For creates, return full object return { name: lget(doc, 'metadata.name', undefined), + tags: lget(doc, 'metadata.tags', []), type: doc.spec.type, - data: doc.spec.data + resource: doc.spec.resource, + targetPort: doc.spec.targetPort, + defaultBridge: doc.spec.defaultBridge, + servicePort: doc.spec.servicePort, + k8sType: doc.spec.k8sType } } catch (error) { if (error instanceof Errors.ValidationError) { @@ -127,6 +265,9 @@ const parseMicroserviceYAML = async (microservice) => { agentName: lget(microservice, 'agent.name'), registryId, ...container, + rootHostAccess: lget(microservice, 'rootHostAccess', false), + pidMode: lget(microservice, 'pidMode', ''), + ipcMode: lget(microservice, 'ipcMode', ''), annotations: container.annotations != null ? JSON.stringify(container.annotations) : undefined, capAdd: lget(microservice, 'container.capAdd', []), capDrop: lget(microservice, 'container.capDrop', []), @@ -216,5 +357,8 @@ module.exports = { parseAppFile: parseAppFile, parseMicroserviceFile: parseMicroserviceFile, parseSecretFile: parseSecretFile, - parseCertificateFile: parseCertificateFile + parseVolumeMountFile: parseVolumeMountFile, + parseConfigMapFile: parseConfigMapFile, + parseCertificateFile: parseCertificateFile, + parseServiceFile: parseServiceFile } diff --git a/src/utils/cert.js b/src/utils/cert.js index 91398b5f..8865ab47 100644 --- a/src/utils/cert.js +++ b/src/utils/cert.js @@ -1,5 +1,6 @@ const forge = require('node-forge') const k8sClient = require('./k8s-client') +const BigNumber = require('bignumber.js') // Types for CA input const CA_TYPES = { @@ -120,7 +121,7 @@ async function loadCA (name) { if (secret.type !== 'tls') { throw new Error(`Secret ${name} is not a TLS secret`) - } + } if (!secret.data || !secret.data['tls.crt'] || !secret.data['tls.key']) { throw new Error(`Invalid TLS secret data for ${name}`) @@ -136,6 +137,18 @@ async function loadCA (name) { } } +/** + * Generates a random serial number between 0 and 2^128-1 + * @returns {string} - Serial number as a decimal string + */ +function generateSerialNumber () { + // Create a random 16-byte buffer + const randomBytes = forge.random.getBytesSync(16) + // Convert to BigNumber + const serialNumber = new BigNumber('0x' + forge.util.bytesToHex(randomBytes)) + return serialNumber.toString() +} + /** * Generates a self-signed CA certificate * @param {string} subject - CA subject name @@ -152,7 +165,7 @@ async function generateSelfSignedCA (subject, expiration = 5 * 365 * 24 * 60 * 6 // Set certificate fields cert.publicKey = keys.publicKey - cert.serialNumber = forge.util.bytesToHex(forge.random.getBytesSync(16)) + cert.serialNumber = generateSerialNumber() // Set validity period const now = new Date() @@ -224,8 +237,8 @@ async function getCAFromK8sSecret (secretName) { return null } if (!secret.data['tls.crt'] || !secret.data['tls.key']) { - return null - } + return null + } const cert = Buffer.from(secret.data['tls.crt'], 'base64').toString() const key = Buffer.from(secret.data['tls.key'], 'base64').toString() @@ -362,7 +375,8 @@ async function generateCertificate ({ if (host.match(/^(\d{1,3}\.){3}\d{1,3}$/)) { // IP address altNames.push({ type: 7, ip: host }) - } else { + altNames.push({ type: 2, value: host }) + } else { // DNS name altNames.push({ type: 2, value: host }) } @@ -413,31 +427,31 @@ async function generateCertificate ({ cert.setIssuer(subjectAttrs) // Add extensions for a self-signed server certificate - cert.setExtensions([ - { - name: 'basicConstraints', + cert.setExtensions([ + { + name: 'basicConstraints', cA: false, critical: true - }, - { - name: 'keyUsage', - digitalSignature: true, - keyEncipherment: true, + }, + { + name: 'keyUsage', + digitalSignature: true, + keyEncipherment: true, critical: true - }, - { - name: 'extKeyUsage', - serverAuth: true, - clientAuth: true - }, - { - name: 'subjectAltName', + }, + { + name: 'extKeyUsage', + serverAuth: true, + clientAuth: true + }, + { + name: 'subjectAltName', altNames: altNames }, { name: 'subjectKeyIdentifier' - } - ]) + } + ]) // Self-sign the certificate cert.sign(keys.privateKey, forge.md.sha256.create()) @@ -454,7 +468,7 @@ async function generateCertificate ({ 'ca.crt': Buffer.from(caCert ? caCert.certPem || caCert.crtData : certPem).toString('base64') } - const secret = { + const secret = { name: name, type: 'tls', data: secretData diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js index e84afda1..65896321 100644 --- a/src/utils/k8s-client.js +++ b/src/utils/k8s-client.js @@ -1,4 +1,24 @@ const logger = require('../logger') +const config = require('../config') + +// Only set CONTROLLER_NAMESPACE if running in Kubernetes mode +let CONTROLLER_NAMESPACE = null + +function checkKubernetesEnvironment () { + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + return controlPlane && controlPlane.toLowerCase() === 'kubernetes' +} + +if (checkKubernetesEnvironment()) { + CONTROLLER_NAMESPACE = process.env.CONTROLLER_NAMESPACE + + // Validate that CONTROLLER_NAMESPACE is set when in Kubernetes mode + if (!CONTROLLER_NAMESPACE) { + logger.error('CONTROLLER_NAMESPACE environment variable is not set') + throw new Error('CONTROLLER_NAMESPACE environment variable is not set') + } +} + let k8sApi = null async function initializeK8sClient () { @@ -9,17 +29,18 @@ async function initializeK8sClient () { // Use the in-cluster configuration kubeConfig.loadFromCluster() + // kubeConfig.loadFromDefault() k8sApi = kubeConfig.makeApiClient(k8s.CoreV1Api) logger.info('Kubernetes client initialized successfully') } return k8sApi } -async function getSecret (secretName, namespace) { - logger.debug(`Getting secret: ${secretName} in namespace: ${namespace}`) +async function getSecret (secretName) { + logger.debug(`Getting secret: ${secretName} in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - const response = await api.readNamespacedSecret(secretName, namespace) + const response = await api.readNamespacedSecret(secretName, CONTROLLER_NAMESPACE) logger.info(`Successfully retrieved secret: ${secretName}`) return response.body } catch (error) { @@ -29,11 +50,11 @@ async function getSecret (secretName, namespace) { } // ConfigMap methods -async function getConfigMap (configMapName, namespace) { - logger.debug(`Getting ConfigMap: ${configMapName} in namespace: ${namespace}`) +async function getConfigMap (configMapName) { + logger.debug(`Getting ConfigMap: ${configMapName} in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - const response = await api.readNamespacedConfigMap(configMapName, namespace) + const response = await api.readNamespacedConfigMap(configMapName, CONTROLLER_NAMESPACE) logger.info(`Successfully retrieved ConfigMap: ${configMapName}`) return response.body } catch (error) { @@ -42,60 +63,82 @@ async function getConfigMap (configMapName, namespace) { } } -async function patchConfigMap (configMapName, namespace, patchData) { - logger.debug(`Patching ConfigMap: ${configMapName} in namespace: ${namespace}`) +async function patchConfigMap (configMapName, patchData) { + logger.debug(`Patching ConfigMap: ${configMapName} in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - // Pass all options in one object - much cleaner than multiple undefined parameters - const response = await api.patchNamespacedConfigMap( - configMapName, - namespace, - patchData, + + // Create JSON Patch operation with formatted JSON + const patch = [ { - headers: { 'Content-Type': 'application/strategic-merge-patch+json' } + op: 'replace', + path: '/data/skrouterd.json', + value: typeof patchData.data['skrouterd.json'] === 'string' + ? JSON.stringify(JSON.parse(patchData.data['skrouterd.json']), null, 2) + : JSON.stringify(patchData.data['skrouterd.json'], null, 2) } + ] + + const { body: configMap } = await api.patchNamespacedConfigMap( + configMapName, + CONTROLLER_NAMESPACE, + patch, + undefined, + undefined, + undefined, + undefined, + undefined, + { headers: { 'content-type': 'application/json-patch+json' } } ) logger.info(`Successfully patched ConfigMap: ${configMapName}`) - return response.body + return configMap } catch (error) { logger.error(`Failed to patch ConfigMap ${configMapName}: ${error.message}`) + if (error.response) { + logger.error(`Response status: ${error.response.status}`) + logger.error(`Response body: ${JSON.stringify(error.response.body)}`) + } throw error } } // Service methods -async function getNamespacedServices (namespace) { - logger.debug(`Listing services in namespace: ${namespace}`) +async function getNamespacedServices () { + logger.debug(`Listing services in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - const response = await api.listNamespacedService(namespace) - logger.info(`Successfully retrieved ${response.body.items.length} services in namespace: ${namespace}`) + const response = await api.listNamespacedService(CONTROLLER_NAMESPACE) + logger.info(`Successfully retrieved ${response.body.items.length} services in namespace: ${CONTROLLER_NAMESPACE}`) return response.body } catch (error) { - logger.error(`Failed to list services in namespace ${namespace}: ${error.message}`) + logger.error(`Failed to list services in namespace ${CONTROLLER_NAMESPACE}: ${error.message}`) throw error } } -async function createService (namespace, serviceSpec) { - logger.debug(`Creating service in namespace: ${namespace}`) +async function createService (serviceSpec) { + logger.debug(`Creating service in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - const response = await api.createNamespacedService(namespace, serviceSpec) - logger.info(`Successfully created service: ${response.body.metadata.name} in namespace: ${namespace}`) + const response = await api.createNamespacedService(CONTROLLER_NAMESPACE, serviceSpec) + logger.info(`Successfully created service: ${response.body.metadata.name} in namespace: ${CONTROLLER_NAMESPACE}`) return response.body } catch (error) { - logger.error(`Failed to create service in namespace ${namespace}: ${error.message}`) + logger.error(`Failed to create service in namespace ${CONTROLLER_NAMESPACE}: ${error.message}`) + if (error.response) { + logger.error(`Response status: ${error.response.status}`) + logger.error(`Response body: ${JSON.stringify(error.response.body)}`) + } throw error } } -async function deleteService (serviceName, namespace) { - logger.debug(`Deleting service: ${serviceName} in namespace: ${namespace}`) +async function deleteService (serviceName) { + logger.debug(`Deleting service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) try { const api = await initializeK8sClient() - const response = await api.deleteNamespacedService(serviceName, namespace) - logger.info(`Successfully deleted service: ${serviceName} from namespace: ${namespace}`) + const response = await api.deleteNamespacedService(serviceName, CONTROLLER_NAMESPACE) + logger.info(`Successfully deleted service: ${serviceName} from namespace: ${CONTROLLER_NAMESPACE}`) return response.body } catch (error) { logger.error(`Failed to delete service ${serviceName}: ${error.message}`) @@ -103,43 +146,93 @@ async function deleteService (serviceName, namespace) { } } +/** + * Updates a service using strategic merge patch + * @param {string} serviceName - The name of the service to update + * @param {Object} patchData - The patch data to apply to the service + * @returns {Promise} The updated service object + */ +async function updateService (serviceName, patchData) { + logger.debug(`Updating service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) + try { + const api = await initializeK8sClient() + + // For strategic merge patch, we send the data as a map + const patch = { + spec: patchData.spec, + metadata: patchData.metadata + } + + const response = await api.patchNamespacedService( + serviceName, + CONTROLLER_NAMESPACE, + patch, + undefined, + undefined, + undefined, + undefined, + undefined, + { headers: { 'Content-Type': 'application/strategic-merge-patch+json' } } + ) + logger.info(`Successfully updated service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) + return response.body + } catch (error) { + logger.error(`Failed to update service ${serviceName}: ${error.message}`) + if (error.response) { + logger.error(`Response status: ${error.response.status}`) + logger.error(`Response body: ${JSON.stringify(error.response.body)}`) + } + throw error + } +} + /** * Gets the LoadBalancer IP for a service if it exists * @param {string} serviceName - The name of the service - * @param {string} namespace - The namespace of the service - * @returns {Promise} The LoadBalancer IP or null if not available + * @param {number} maxRetries - Maximum number of retries (default: 30) + * @param {number} retryInterval - Interval between retries in milliseconds (default: 2000) + * @returns {Promise} The LoadBalancer IP or null if not available after timeout */ -async function watchLoadBalancerIP (serviceName, namespace) { - logger.debug(`Checking LoadBalancer IP for service: ${serviceName} in namespace: ${namespace}`) +async function watchLoadBalancerIP (serviceName, maxRetries = 10, retryInterval = 2000) { + logger.debug(`Checking LoadBalancer IP for service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) const api = await initializeK8sClient() - try { - const response = await api.readNamespacedService(serviceName, namespace) - const service = response.body - - // Check if the service type is LoadBalancer - if (service.spec && service.spec.type === 'LoadBalancer') { - // Check if the LoadBalancer IP exists - if (service.status && - service.status.loadBalancer && - service.status.loadBalancer.ingress && - service.status.loadBalancer.ingress.length > 0) { - const ip = service.status.loadBalancer.ingress[0].ip - if (ip) { - logger.info(`Found LoadBalancer IP: ${ip} for service: ${serviceName}`) - return ip + + for (let attempt = 0; attempt < maxRetries; attempt++) { + try { + const response = await api.readNamespacedService(serviceName, CONTROLLER_NAMESPACE) + const service = response.body + + // Check if the service type is LoadBalancer + if (service.spec && service.spec.type === 'LoadBalancer') { + // Check if the LoadBalancer IP exists + if (service.status && + service.status.loadBalancer && + service.status.loadBalancer.ingress && + service.status.loadBalancer.ingress.length > 0) { + const ip = service.status.loadBalancer.ingress[0].ip + if (ip) { + logger.info(`Found LoadBalancer IP: ${ip} for service: ${serviceName}`) + return ip + } } + logger.info(`Service ${serviceName} is LoadBalancer type but IP not yet assigned (attempt ${attempt + 1}/${maxRetries})`) + } else { + const serviceType = service.spec && service.spec.type ? service.spec.type : 'unknown' + logger.info(`Service ${serviceName} is not of type LoadBalancer (type: ${serviceType})`) + return null } - logger.info(`Service ${serviceName} is LoadBalancer type but IP not yet assigned`) - } else { - const serviceType = service.spec && service.spec.type ? service.spec.type : 'unknown' - logger.info(`Service ${serviceName} is not of type LoadBalancer (type: ${serviceType})`) + + // Wait before next retry + await new Promise(resolve => setTimeout(resolve, retryInterval)) + } catch (error) { + logger.error(`Error getting LoadBalancer IP for service ${serviceName}: ${error.message}`) + // Wait before next retry even if there's an error + await new Promise(resolve => setTimeout(resolve, retryInterval)) } - // Return null if the service is not a LoadBalancer or IP is not yet assigned - return null - } catch (error) { - logger.error(`Error getting LoadBalancer IP for service ${serviceName}: ${error.message}`) - return null } + + logger.warn(`LoadBalancer IP not assigned for service ${serviceName} after ${maxRetries} attempts`) + return null } module.exports = { @@ -149,5 +242,7 @@ module.exports = { getNamespacedServices, createService, deleteService, - watchLoadBalancerIP + updateService, + watchLoadBalancerIP, + checkKubernetesEnvironment } diff --git a/src/utils/ssl-utils.js b/src/utils/ssl-utils.js index 988eca7a..6a0a1200 100644 --- a/src/utils/ssl-utils.js +++ b/src/utils/ssl-utils.js @@ -58,9 +58,14 @@ function createSSLOptions ({ key, cert, intermedKey, isBase64 = false }) { rejectUnauthorized: false } - // Only add CA if intermediate certificate is provided + // Only add CA if intermediate certificate is provided and exists if (intermedKey) { try { + // Check if file exists when not using base64 + if (!isBase64 && !fs.existsSync(intermedKey)) { + logger.warn(`Intermediate certificate file not found at path: ${intermedKey}, continuing without it`) + return sslOptions + } sslOptions.ca = loadCertificate(intermedKey, isBase64) } catch (e) { logger.warn('Intermediate certificate could not be loaded, continuing without it') diff --git a/test/backup/iofog-service.js b/test/backup/iofog-service.js new file mode 100644 index 00000000..bf90d94b --- /dev/null +++ b/test/backup/iofog-service.js @@ -0,0 +1,1250 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const config = require('../config') +const fs = require('fs') +const TransactionDecorator = require('../decorators/transaction-decorator') +const AppHelper = require('../helpers/app-helper') +const FogManager = require('../data/managers/iofog-manager') +const FogProvisionKeyManager = require('../data/managers/iofog-provision-key-manager') +const FogVersionCommandManager = require('../data/managers/iofog-version-command-manager') +const ChangeTrackingService = require('./change-tracking-service') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const Validator = require('../schemas') +const HWInfoManager = require('../data/managers/hw-info-manager') +const USBInfoManager = require('../data/managers/usb-info-manager') +const CatalogService = require('./catalog-service') +const MicroserviceManager = require('../data/managers/microservice-manager') +const ApplicationManager = require('../data/managers/application-manager') +const TagsManager = require('../data/managers/tags-manager') +const MicroserviceService = require('./microservices-service') +const EdgeResourceService = require('./edge-resource-service') +const VolumeMountService = require('./volume-mount-service') +const RouterManager = require('../data/managers/router-manager') +const MicroserviceExtraHostManager = require('../data/managers/microservice-extra-host-manager') +const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const RouterConnectionManager = require('../data/managers/router-connection-manager') +const RouterService = require('./router-service') +const Constants = require('../helpers/constants') +const Op = require('sequelize').Op +const lget = require('lodash/get') +const CertificateService = require('./certificate-service') +const logger = require('../logger') +const ServiceManager = require('../data/managers/service-manager') + +const SITE_CA_CERT = 'pot-site-ca' +const DEFAULT_ROUTER_LOCAL_CA = 'default-router-local-ca' +const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' + +async function checkKubernetesEnvironment () { + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + return controlPlane && controlPlane.toLowerCase() === 'kubernetes' +} + +async function getLocalCertificateHosts (isKubernetes, namespace) { + if (isKubernetes) { + return `router-local,router-local.${namespace},router-local.${namespace}.svc.cluster.local` + } + return '127.0.0.1,localhost,host.docker.internal,host.containers.internal' +} + +async function getSiteCertificateHosts (fogData, transaction) { + const hosts = new Set() + // Add existing hosts if isSystem + if (fogData.isSystem) { + if (fogData.host) hosts.add(fogData.host) + if (fogData.ipAddress) hosts.add(fogData.ipAddress) + if (fogData.ipAddressExternal) hosts.add(fogData.ipAddressExternal) + } + // Add default router host if not system + if (!fogData.isSystem) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (defaultRouter.host) hosts.add(defaultRouter.host) + } + // Add upstream router hosts + // const upstreamRouters = (fogData.upstreamRouters || []).filter(uuid => uuid !== 'default-router') + // if (upstreamRouters.length) { + // for (const uuid of upstreamRouters) { + // const routerHost = await FogManager.findOne({ uuid: uuid }, transaction) + // if (routerHost.host) hosts.add(routerHost.host) + // if (routerHost.ipAddress) hosts.add(routerHost.ipAddress) + // } + // } + return Array.from(hosts).join(',') || 'localhost' +} + +async function _handleRouterCertificates (fogData, uuid, isRouterModeChanged, transaction) { + logger.debug('Starting _handleRouterCertificates for fog: ' + JSON.stringify({ uuid: uuid, host: fogData.host })) + + // Check if we're in Kubernetes environment + const isKubernetes = await checkKubernetesEnvironment() + const namespace = isKubernetes ? process.env.CONTROLLER_NAMESPACE : null + + // Helper to check CA existence + async function ensureCA (name, subject) { + logger.debug('Checking CA existence: ' + JSON.stringify({ name, subject })) + try { + await CertificateService.getCAEndpoint(name, transaction) + logger.debug('CA already exists: ' + name) + // CA exists + } catch (err) { + if (err.name === 'NotFoundError') { + logger.debug('CA not found, creating new CA: ' + JSON.stringify({ name, subject })) + await CertificateService.createCAEndpoint({ + name, + subject: `${subject}`, + expiration: 60, // months + type: 'self-signed' + }, transaction) + logger.debug('Successfully created CA: ' + name) + } else if (err.name === 'ConflictError') { + logger.debug('CA already exists (conflict): ' + name) + // Already exists, ignore + } else { + logger.error('Error in ensureCA - Name: ' + name + ', Subject: ' + subject + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) + logger.error('Stack trace: ' + err.stack) + throw err + } + } + } + + // Helper to check cert existence + async function ensureCert (name, subject, hosts, ca, shouldRecreate = false) { + logger.debug('Checking certificate existence: ' + JSON.stringify({ name, subject, hosts, ca })) + try { + const existingCert = await CertificateService.getCertificateEndpoint(name, transaction) + if (shouldRecreate && existingCert) { + logger.debug('Certificate exists and needs recreation: ' + name) + await CertificateService.deleteCertificateEndpoint(name, transaction) + logger.debug('Deleted existing certificate: ' + name) + // Create new certificate + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + logger.debug('Successfully recreated certificate: ' + name) + } else if (!existingCert) { + logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + logger.debug('Successfully created certificate: ' + name) + } else { + logger.debug('Certificate already exists: ' + name) + } + } catch (err) { + if (err.name === 'NotFoundError') { + logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) + await CertificateService.createCertificateEndpoint({ + name, + subject: `${subject}`, + hosts, + ca + }, transaction) + logger.debug('Successfully created certificate: ' + name) + } else if (err.name === 'ConflictError') { + logger.debug('Certificate already exists (conflict): ' + name) + // Already exists, ignore + } else { + logger.error('Error in ensureCert - Name: ' + name + ', Subject: ' + subject + ', Hosts: ' + hosts + ', CA: ' + JSON.stringify(ca) + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) + logger.error('Stack trace: ' + err.stack) + throw err + } + } + } + + try { + // Always ensure SITE_CA_CERT exists + logger.debug('Ensuring SITE_CA_CERT exists') + await ensureCA(SITE_CA_CERT, SITE_CA_CERT) + + // If routerMode is 'none', only ensure DEFAULT_ROUTER_LOCAL_CA and its signed certificate + if (fogData.routerMode === 'none') { + logger.debug('Router mode is none, ensuring DEFAULT_ROUTER_LOCAL_CA exists') + await ensureCA(DEFAULT_ROUTER_LOCAL_CA, DEFAULT_ROUTER_LOCAL_CA) + logger.debug('Ensuring local-agent certificate signed by DEFAULT_ROUTER_LOCAL_CA') + const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) + await ensureCert( + `${uuid}-local-agent`, + `${uuid}-local-agent`, + localHosts, + { type: 'direct', secretName: DEFAULT_ROUTER_LOCAL_CA }, + isRouterModeChanged + ) + logger.debug('Successfully completed _handleRouterCertificates for routerMode none') + return + } + + // For other router modes, ensure all other certificates + // Always ensure site-server cert exists + logger.debug('Ensuring site-server certificate exists') + const siteHosts = await getSiteCertificateHosts(fogData, transaction) + await ensureCert( + `${uuid}-site-server`, + `${uuid}-site-server`, + siteHosts, + { type: 'direct', secretName: SITE_CA_CERT }, + false + ) + + // Always ensure local-ca exists + logger.debug('Ensuring local-ca exists') + await ensureCA(`${uuid}-local-ca`, `${uuid}-local-ca`) + + // Always ensure local-server cert exists + logger.debug('Ensuring local-server certificate exists') + const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) + await ensureCert( + `${uuid}-local-server`, + `${uuid}-local-server`, + localHosts, + { type: 'direct', secretName: `${uuid}-local-ca` }, + isRouterModeChanged + ) + + // Always ensure local-agent cert exists + logger.debug('Ensuring local-agent certificate exists') + await ensureCert( + `${uuid}-local-agent`, + `${uuid}-local-agent`, + localHosts, + { type: 'direct', secretName: `${uuid}-local-ca` }, + isRouterModeChanged + ) + + logger.debug('Successfully completed _handleRouterCertificates') + } catch (error) { + logger.error('Certificate operation failed - UUID: ' + uuid + ', RouterMode: ' + fogData.routerMode + ', Error: ' + error.message + ', Type: ' + error.name + ', Code: ' + error.code) + logger.error('Stack trace: ' + error.stack) + } +} + +async function createFogEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogCreate) + + let createFogData = { + uuid: AppHelper.generateUUID(), + name: fogData.name, + location: fogData.location, + latitude: fogData.latitude, + longitude: fogData.longitude, + gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, + description: fogData.description, + networkInterface: fogData.networkInterface, + dockerUrl: fogData.dockerUrl, + containerEngine: fogData.containerEngine, + deploymentType: fogData.deploymentType, + diskLimit: fogData.diskLimit, + diskDirectory: fogData.diskDirectory, + memoryLimit: fogData.memoryLimit, + cpuLimit: fogData.cpuLimit, + logLimit: fogData.logLimit, + logDirectory: fogData.logDirectory, + logFileCount: fogData.logFileCount, + statusFrequency: fogData.statusFrequency, + changeFrequency: fogData.changeFrequency, + deviceScanFrequency: fogData.deviceScanFrequency, + bluetoothEnabled: fogData.bluetoothEnabled, + watchdogEnabled: fogData.watchdogEnabled, + abstractedHardwareEnabled: fogData.abstractedHardwareEnabled, + fogTypeId: fogData.fogType, + logLevel: fogData.logLevel, + dockerPruningFrequency: fogData.dockerPruningFrequency, + availableDiskThreshold: fogData.availableDiskThreshold, + isSystem: fogData.isSystem, + host: fogData.host, + routerId: null, + timeZone: fogData.timeZone + } + + createFogData = AppHelper.deleteUndefinedFields(createFogData) + + // Default router is edge + fogData.routerMode = fogData.routerMode || 'edge' + + if (fogData.isSystem && fogData.routerMode !== 'interior') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) + } + + if (fogData.isSystem && !!(await FogManager.findOne({ isSystem: true }, transaction))) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_SYSTEM_FOG)) + } + + const existingFog = await FogManager.findOne({ name: createFogData.name }, transaction) + if (existingFog) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_NAME, createFogData.name)) + } + + let defaultRouter, upstreamRouters + if (fogData.routerMode === 'none') { + const networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) + if (!networkRouter) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) + } + createFogData.routerId = networkRouter.id + } else { + defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(fogData.upstreamRouters, fogData.isSystem, defaultRouter) + } + + const fog = await FogManager.create(createFogData, transaction) + + // Set tags + await _setTags(fog, fogData.tags, transaction) + + // Add certificate handling + await _handleRouterCertificates(fogData, createFogData.uuid, false, transaction) + + if (fogData.routerMode !== 'none') { + if (!fogData.host && !isCLI) { + throw new Errors.ValidationError(ErrorMessages.HOST_IS_REQUIRED) + } + + await RouterService.createRouterForFog(fogData, fog.uuid, upstreamRouters) + + // --- Service Distribution Logic --- + // 1. Extract service tags + const serviceTags = await _extractServiceTags(fogData.tags) + + // 2. If service tags are not empty, find matching services + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + + // 3. If services are not empty, build listeners and update router config + if (services.length > 0) { + // Get router microservice + const routerName = `router-${fog.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + + // For each service, build listener and merge + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fog.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + + // Update router microservice config + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + + // Update change tracking + await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + } + } + } + + const res = { + uuid: fog.uuid + } + + await ChangeTrackingService.create(fog.uuid, transaction) + + if (fogData.abstractedHardwareEnabled) { + await _createHalMicroserviceForFog(fog, null, transaction) + } + + if (fogData.bluetoothEnabled) { + await _createBluetoothMicroserviceForFog(fog, null, transaction) + } + + await ChangeTrackingService.update(createFogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) + + return res +} + +async function _setTags (fogModel, tagsArray, transaction) { + if (tagsArray) { + let tags = [] + for (const tag of tagsArray) { + let tagModel = await TagsManager.findOne({ value: tag }, transaction) + if (!tagModel) { + tagModel = await TagsManager.create({ value: tag }, transaction) + } + tags.push(tagModel) + } + await fogModel.setTags(tags) + } +} + +async function updateFogEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogUpdate) + + const queryFogData = { uuid: fogData.uuid } + + let updateFogData = { + name: fogData.name, + location: fogData.location, + latitude: fogData.latitude, + longitude: fogData.longitude, + gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, + description: fogData.description, + networkInterface: fogData.networkInterface, + dockerUrl: fogData.dockerUrl, + containerEngine: fogData.containerEngine, + deploymentType: fogData.deploymentType, + diskLimit: fogData.diskLimit, + diskDirectory: fogData.diskDirectory, + memoryLimit: fogData.memoryLimit, + cpuLimit: fogData.cpuLimit, + logLimit: fogData.logLimit, + logDirectory: fogData.logDirectory, + logFileCount: fogData.logFileCount, + statusFrequency: fogData.statusFrequency, + changeFrequency: fogData.changeFrequency, + deviceScanFrequency: fogData.deviceScanFrequency, + bluetoothEnabled: fogData.bluetoothEnabled, + watchdogEnabled: fogData.watchdogEnabled, + isSystem: fogData.isSystem, + abstractedHardwareEnabled: fogData.abstractedHardwareEnabled, + fogTypeId: fogData.fogType, + logLevel: fogData.logLevel, + dockerPruningFrequency: fogData.dockerPruningFrequency, + host: fogData.host, + availableDiskThreshold: fogData.availableDiskThreshold, + timeZone: fogData.timeZone + } + updateFogData = AppHelper.deleteUndefinedFields(updateFogData) + + const oldFog = await FogManager.findOne(queryFogData, transaction) + if (!oldFog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + // Update tags + await _setTags(oldFog, fogData.tags, transaction) + + if (updateFogData.name) { + const conflictQuery = isCLI + ? { name: updateFogData.name, uuid: { [Op.not]: fogData.uuid } } + : { name: updateFogData.name, uuid: { [Op.not]: fogData.uuid } } + const conflict = await FogManager.findOne(conflictQuery, transaction) + if (conflict) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_NAME, updateFogData.name)) + } + } + + // Update router + // Get all router config informations + const router = await oldFog.getRouter() + const host = fogData.host || lget(router, 'host') + const upstreamRoutersConnections = router ? (await RouterConnectionManager.findAllWithRouters({ sourceRouter: router.id }, transaction) || []) : [] + const upstreamRoutersIofogUuid = fogData.upstreamRouters || await Promise.all(upstreamRoutersConnections.map(connection => connection.dest.iofogUuid)) + const routerMode = fogData.routerMode || (router ? (router.isEdge ? 'edge' : 'interior') : 'none') + const messagingPort = fogData.messagingPort || (router ? router.messagingPort : null) + const interRouterPort = fogData.interRouterPort || (router ? router.interRouterPort : null) + const edgeRouterPort = fogData.edgeRouterPort || (router ? router.edgeRouterPort : null) + let networkRouter + + const isSystem = updateFogData.isSystem === undefined ? oldFog.isSystem : updateFogData.isSystem + if (isSystem && routerMode !== 'interior') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) + } + + let isRouterModeChanged = false + const oldRouterMode = (router ? (router.isEdge ? 'edge' : 'interior') : 'none') + if (fogData.routerMode && fogData.routerMode !== oldRouterMode) { + if (fogData.routerMode === 'none' || oldRouterMode === 'none') { + isRouterModeChanged = true + } + } + // Add certificate handling + await _handleRouterCertificates(fogData, fogData.uuid, isRouterModeChanged, transaction) + + if (routerMode === 'none') { + networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) + if (!networkRouter) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) + } + // Only delete previous router if there is a network router + if (router) { + // New router mode is none, delete existing router + await _deleteFogRouter(fogData, transaction) + } + } else { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + const upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(upstreamRoutersIofogUuid, oldFog.isSystem, defaultRouter) + if (!router) { + // Router does not exist yet + networkRouter = await RouterService.createRouterForFog(fogData, oldFog.uuid, upstreamRouters) + // --- Service Distribution Logic --- + // 1. Extract service tags + const serviceTags = await _extractServiceTags(fogData.tags) + + // 2. If service tags are not empty, find matching services + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + + // 3. If services are not empty, build listeners and update router config + if (services.length > 0) { + // Get router microservice + const routerName = `router-${fogData.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + + // For each service, build listener and merge + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + + // Update router microservice config + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + + // Update change tracking + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + } + } + } else { + // Extract existing TCP connectors before updating config + const existingConnectors = await _extractExistingTcpConnectors(fogData.uuid, transaction) + // Update existing router + networkRouter = await RouterService.updateRouter(router, { + messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host + }, upstreamRouters, fogData.containerEngine) + + // --- Service Distribution Logic --- + // 1. Extract service tags + const serviceTags = await _extractServiceTags(fogData.tags) + + // Get router microservice for config updates + const routerName = `router-${fogData.uuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + let config = JSON.parse(routerMicroservice.config || '{}') + + // 2. If service tags are not empty, find matching services and build listeners + if (serviceTags.length > 0) { + const services = await _findMatchingServices(serviceTags, transaction) + + // 3. If services are not empty, build and merge listeners + if (services.length > 0) { + // For each service, build listener and merge + for (const service of services) { + const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) + config = _mergeTcpListener(config, listenerConfig) + } + } + } + + // 4. Merge back existing connectors if any + if (existingConnectors && Object.keys(existingConnectors).length > 0) { + for (const connectorName in existingConnectors) { + const connectorObj = existingConnectors[connectorName] + config = _mergeTcpConnector(config, connectorObj) + } + } + + // Update router microservice config + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + + // Update change tracking + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) + } + } + updateFogData.routerId = networkRouter.id + + // If router changed, set routerChanged flag + if (updateFogData.routerId !== oldFog.routerId || updateFogData.routerMode !== oldFog.routerMode) { + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceList, transaction) + } + + await FogManager.update(queryFogData, updateFogData, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.config, transaction) + + let msChanged = false + + // Update Microservice extra hosts + if (updateFogData.host && updateFogData.host !== oldFog.host) { + await _updateMicroserviceExtraHosts(fogData.uuid, updateFogData.host, transaction) + } + + if (oldFog.abstractedHardwareEnabled === true && fogData.abstractedHardwareEnabled === false) { + await _deleteHalMicroserviceByFog(fogData, transaction) + msChanged = true + } + if (oldFog.abstractedHardwareEnabled === false && fogData.abstractedHardwareEnabled === true) { + await _createHalMicroserviceForFog(fogData, oldFog, transaction) + msChanged = true + } + + if (oldFog.bluetoothEnabled === true && fogData.bluetoothEnabled === false) { + await _deleteBluetoothMicroserviceByFog(fogData, transaction) + msChanged = true + } + if (oldFog.bluetoothEnabled === false && fogData.bluetoothEnabled === true) { + await _createBluetoothMicroserviceForFog(fogData, oldFog, transaction) + msChanged = true + } + + if (msChanged) { + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) + } +} + +async function _updateMicroserviceExtraHosts (fogUuid, host, transaction) { + const microserviceExtraHosts = await MicroserviceExtraHostManager.findAll({ targetFogUuid: fogUuid }, transaction) + for (const extraHost of microserviceExtraHosts) { + extraHost.value = host + await extraHost.save() + // Update tracking change for microservice + await MicroserviceExtraHostManager.updateOriginMicroserviceChangeTracking(extraHost, transaction) + } +} + +async function _updateProxyRouters (fogId, router, transaction) { + const proxyCatalog = await CatalogService.getProxyCatalogItem(transaction) + const proxyMicroservices = await MicroserviceManager.findAll({ catalogItemId: proxyCatalog.id, iofogUuid: fogId }, transaction) + for (const proxyMicroservice of proxyMicroservices) { + const config = JSON.parse(proxyMicroservice.config || '{}') + config.networkRouter = { + host: router.host, + port: router.messagingPort + } + await MicroserviceManager.updateIfChanged({ uuid: proxyMicroservice.uuid }, { config: JSON.stringify(config) }, transaction) + await ChangeTrackingService.update(fogId, ChangeTrackingService.events.microserviceConfig, transaction) + } +} + +async function _deleteFogRouter (fogData, transaction) { + const router = await RouterManager.findOne({ iofogUuid: fogData.uuid }, transaction) + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + + // If agent had a router, delete router and update linked routers + if (!router) { + // Router mode is none, there is nothing to do + return + } + + const routerId = router.id + const routerConnections = await RouterConnectionManager.findAllWithRouters({ [Op.or]: [{ destRouter: routerId }, { sourceRouter: routerId }] }, transaction) + // Delete all router connections, and set routerChanged flag for linked routers + if (routerConnections) { + for (const connection of routerConnections) { + const router = connection.source.id === routerId ? connection.dest : connection.source + // Delete router connection + await RouterConnectionManager.delete({ id: connection.id }, transaction) + // Update config for downstream routers + if (connection.dest.id === routerId) { + // in order to keep downstream routers in the network, we connect them to default router + if (defaultRouter) { + await RouterConnectionManager.create({ sourceRouter: router.id, destRouter: defaultRouter.id }, transaction) + } + + // Update router config + await RouterService.updateConfig(router.id, fogData.containerEngine, transaction) + // Set routerChanged flag + await ChangeTrackingService.update(router.iofogUuid, ChangeTrackingService.events.routerChanged, transaction) + } + } + } + + // Connect the agents to default router + if (defaultRouter) { + const connectedAgents = await FogManager.findAll({ routerId }, transaction) + for (const connectedAgent of connectedAgents) { + await FogManager.update({ uuid: connectedAgent.uuid }, { routerId: defaultRouter.id }, transaction) + await _updateProxyRouters(connectedAgent.uuid, defaultRouter, transaction) + await ChangeTrackingService.update(connectedAgent.uuid, ChangeTrackingService.events.routerChanged, transaction) + } + } + // Delete router + await RouterManager.delete({ iofogUuid: fogData.uuid }, transaction) + // Delete router msvc + const routerCatalog = await CatalogService.getRouterCatalogItem(transaction) + await MicroserviceManager.delete({ catalogItemId: routerCatalog.id, iofogUuid: fogData.uuid }, transaction) + await ApplicationManager.delete({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) +} + +async function deleteFogEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogDelete) + + const queryFogData = { uuid: fogData.uuid } + + const fog = await FogManager.findOne(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + await _deleteFogRouter(fogData, transaction) + + await _processDeleteCommand(fog, transaction) +} + +function _getRouterUuid (router, defaultRouter) { + return (defaultRouter && (router.id === defaultRouter.id)) ? Constants.DEFAULT_ROUTER_NAME : router.iofogUuid +} + +async function _getFogRouterConfig (fog, transaction) { + // Get fog router config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + const router = await fog.getRouter() + const routerConfig = { + + } + // Router mode is either interior or edge + if (router) { + routerConfig.routerMode = router.isEdge ? 'edge' : 'interior' + routerConfig.messagingPort = router.messagingPort + if (routerConfig.routerMode === 'interior') { + routerConfig.interRouterPort = router.interRouterPort + routerConfig.edgeRouterPort = router.edgeRouterPort + } + // Get upstream routers + const upstreamRoutersConnections = await RouterConnectionManager.findAllWithRouters({ sourceRouter: router.id }, transaction) + routerConfig.upstreamRouters = upstreamRoutersConnections ? upstreamRoutersConnections.map(r => _getRouterUuid(r.dest, defaultRouter)) : [] + } else { + routerConfig.routerMode = 'none' + const networkRouter = await RouterManager.findOne({ id: fog.routerId }, transaction) + if (networkRouter) { + routerConfig.networkRouter = _getRouterUuid(networkRouter, defaultRouter) + } + } + + return routerConfig +} + +async function _getFogEdgeResources (fog, transaction) { + const resourceAttributes = [ + 'name', + 'version', + 'description', + 'interfaceProtocol', + 'displayName', + 'displayIcon', + 'displayColor' + ] + const resources = await fog.getEdgeResources({ attributes: resourceAttributes }) + return resources.map(EdgeResourceService.buildGetObject) +} + +async function _getFogVolumeMounts (fog, transaction) { + const volumeMountAttributes = [ + 'name', + 'version', + 'configMapName', + 'secretName' + ] + const volumeMounts = await fog.getVolumeMounts({ attributes: volumeMountAttributes }) + return volumeMounts.map(vm => { + return { + name: vm.name, + version: vm.version, + configMapName: vm.configMapName, + secretName: vm.secretName + } + }) +} + +async function _getFogExtraInformation (fog, transaction) { + const routerConfig = await _getFogRouterConfig(fog, transaction) + const edgeResources = await _getFogEdgeResources(fog, transaction) + const volumeMounts = await _getFogVolumeMounts(fog, transaction) + // Transform to plain JS object + if (fog.toJSON && typeof fog.toJSON === 'function') { + fog = fog.toJSON() + } + return { ...fog, tags: _mapTags(fog), ...routerConfig, edgeResources, volumeMounts } +} + +// Map tags to string array +// Return plain JS object +function _mapTags (fog) { + return fog.tags ? fog.tags.map(t => t.value) : [] +} + +/** + * Extracts service-related tags from fog node tags + * @param {Array} fogTags - Array of tags from fog node + * @returns {Array} Array of service tags (e.g., ["all", "foo", "bar"]) + */ +async function _extractServiceTags (fogTags) { + if (!fogTags || !Array.isArray(fogTags)) { + return [] + } + + // Filter tags that start with SERVICE_ANNOTATION_TAG + const serviceTags = fogTags + .filter(tag => tag.startsWith(SERVICE_ANNOTATION_TAG)) + .map(tag => { + // Extract the value after the colon + const parts = tag.split(':') + return parts.length > 1 ? parts[1].trim() : '' + }) + .filter(tag => tag !== '') // Remove empty tags + + // If we have "all" tag, return just that + if (serviceTags.includes('all')) { + return ['all'] + } + + return serviceTags +} + +async function getFog (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogGet) + + const queryFogData = fogData.uuid ? { uuid: fogData.uuid } : { name: fogData.name } + + const fog = await FogManager.findOneWithTags(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + return _getFogExtraInformation(fog, transaction) +} + +async function getFogEndPoint (fogData, isCLI, transaction) { + return getFog(fogData, isCLI, transaction) +} + +// async function getFogListEndPoint (filters, isCLI, isSystem, transaction) { +async function getFogListEndPoint (filters, isCLI, transaction) { + await Validator.validate(filters, Validator.schemas.iofogFilters) + + // // If listing system agent through REST API, make sure user is authenticated + // if (isSystem && !isCLI && !lget('id')) { + // throw new Errors.AuthenticationError('Unauthorized') + // } + + // const queryFogData = isSystem ? { isSystem } : (isCLI ? {} : { isSystem: false }) + const queryFogData = {} + + let fogs = await FogManager.findAllWithTags(queryFogData, transaction) + fogs = _filterFogs(fogs, filters) + + // Map all tags + // Get router config info for all fogs + fogs = await Promise.all(fogs.map(async (fog) => _getFogExtraInformation(fog, transaction))) + return { + fogs + } +} + +async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogGenerateProvision) + + const queryFogData = { uuid: fogData.uuid } + + const newProvision = { + iofogUuid: fogData.uuid, + provisionKey: AppHelper.generateRandomString(16), + expirationTime: new Date().getTime() + (10 * 60 * 1000) + } + + const fog = await FogManager.findOne(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + const provisioningKeyData = await FogProvisionKeyManager.updateOrCreate({ iofogUuid: fogData.uuid }, newProvision, transaction) + + const devMode = process.env.DEV_MODE || config.get('server.devMode') + const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') + const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') + const sslCertBase64 = config.get('server.ssl.base64.cert') + const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') + const hasFileBasedSSL = !devMode && sslCert + const hasBase64SSL = !devMode && sslCertBase64 + let caCert = '' + + if (!devMode) { + if (hasFileBasedSSL) { + try { + if (intermedKey) { + const certData = fs.readFileSync(intermedKey) + caCert = Buffer.from(certData).toString('base64') + } else { + const certData = fs.readFileSync(sslCert) + caCert = Buffer.from(certData).toString('base64') + } + } catch (error) { + throw new Errors.ValidationError('Failed to read SSL certificate file') + } + } + if (hasBase64SSL) { + if (intermedKeyBase64) { + caCert = intermedKeyBase64 + } else if (sslCertBase64) { + caCert = sslCertBase64 + } + } + } + return { + key: provisioningKeyData.provisionKey, + expirationTime: provisioningKeyData.expirationTime, + caCert: caCert + } +} + +async function setFogVersionCommandEndPoint (fogVersionData, isCLI, transaction) { + await Validator.validate(fogVersionData, Validator.schemas.iofogSetVersionCommand) + + const queryFogData = { uuid: fogVersionData.uuid } + + const newVersionCommand = { + iofogUuid: fogVersionData.uuid, + versionCommand: fogVersionData.versionCommand + } + + const fog = await FogManager.findOne(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, queryFogData.uuid)) + } + + if (!fog.isReadyToRollback && fogVersionData.versionCommand === 'rollback') { + throw new Errors.ValidationError(ErrorMessages.INVALID_VERSION_COMMAND_ROLLBACK) + } + if (!fog.isReadyToUpgrade && fogVersionData.versionCommand === 'upgrade') { + throw new Errors.ValidationError(ErrorMessages.INVALID_VERSION_COMMAND_UPGRADE) + } + + await generateProvisioningKeyEndPoint({ uuid: fogVersionData.uuid }, isCLI, transaction) + await FogVersionCommandManager.updateOrCreate({ iofogUuid: fogVersionData.uuid }, newVersionCommand, transaction) + await ChangeTrackingService.update(fogVersionData.uuid, ChangeTrackingService.events.version, transaction) +} + +async function setFogRebootCommandEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogReboot) + + const queryFogData = { uuid: fogData.uuid } + + const fog = await FogManager.findOne(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.reboot, transaction) +} + +async function getHalHardwareInfoEndPoint (uuidObj, isCLI, transaction) { + await Validator.validate(uuidObj, Validator.schemas.halGet) + + const fog = await FogManager.findOne({ + uuid: uuidObj.uuid + }, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, uuidObj.uuid)) + } + + return HWInfoManager.findOne({ + iofogUuid: uuidObj.uuid + }, transaction) +} + +async function getHalUsbInfoEndPoint (uuidObj, isCLI, transaction) { + await Validator.validate(uuidObj, Validator.schemas.halGet) + + const fog = await FogManager.findOne({ + uuid: uuidObj.uuid + }, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, uuidObj.uuid)) + } + + return USBInfoManager.findOne({ + iofogUuid: uuidObj.uuid + }, transaction) +} + +function _filterFogs (fogs, filters) { + if (!filters) { + return fogs + } + + const filtered = [] + fogs.forEach((fog) => { + let isMatchFog = true + filters.some((filter) => { + const fld = filter.key + const val = filter.value + const condition = filter.condition + const isMatchField = (condition === 'equals' && fog[fld] && fog[fld] === val) || + (condition === 'has' && fog[fld] && fog[fld].includes(val)) + if (!isMatchField) { + isMatchFog = false + return false + } + }) + if (isMatchFog) { + filtered.push(fog) + } + }) + return filtered +} + +async function _processDeleteCommand (fog, transaction) { + const microservices = await MicroserviceManager.findAll({ iofogUuid: fog.uuid }, transaction) + for (const microservice of microservices) { + await MicroserviceService.deleteMicroserviceWithRoutesAndPortMappings(microservice, transaction) + } + + await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.deleteNode, transaction) + await FogManager.delete({ uuid: fog.uuid }, transaction) +} + +async function _createHalMicroserviceForFog (fogData, oldFog, transaction) { + const halItem = await CatalogService.getHalCatalogItem(transaction) + + const halMicroserviceData = { + uuid: AppHelper.generateUUID(), + name: `hal-${fogData.uuid.toLowerCase()}`, + config: '{}', + catalogItemId: halItem.id, + iofogUuid: fogData.uuid, + rootHostAccess: true, + logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, + configLastUpdated: Date.now() + } + + const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + halMicroserviceData.applicationId = application.id + await MicroserviceManager.create(halMicroserviceData, transaction) + await MicroserviceStatusManager.create({ microserviceUuid: halMicroserviceData.uuid }, transaction) +} + +async function _deleteHalMicroserviceByFog (fogData, transaction) { + const halItem = await CatalogService.getHalCatalogItem(transaction) + const deleteHalMicroserviceData = { + iofogUuid: fogData.uuid, + catalogItemId: halItem.id + } + + const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + deleteHalMicroserviceData.applicationId = application.id + await MicroserviceManager.delete(deleteHalMicroserviceData, transaction) +} + +async function _createBluetoothMicroserviceForFog (fogData, oldFog, transaction) { + const bluetoothItem = await CatalogService.getBluetoothCatalogItem(transaction) + + const bluetoothMicroserviceData = { + uuid: AppHelper.generateUUID(), + name: `ble-${fogData.uuid.toLowerCase()}`, + config: '{}', + catalogItemId: bluetoothItem.id, + iofogUuid: fogData.uuid, + rootHostAccess: true, + logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, + configLastUpdated: Date.now() + } + + const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + bluetoothMicroserviceData.applicationId = application.id + await MicroserviceManager.create(bluetoothMicroserviceData, transaction) + await MicroserviceStatusManager.create({ microserviceUuid: bluetoothMicroserviceData.uuid }, transaction) +} + +async function _deleteBluetoothMicroserviceByFog (fogData, transaction) { + const bluetoothItem = await CatalogService.getBluetoothCatalogItem(transaction) + const deleteBluetoothMicroserviceData = { + iofogUuid: fogData.uuid, + catalogItemId: bluetoothItem.id + } + const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + deleteBluetoothMicroserviceData.applicationId = application.id + + await MicroserviceManager.delete(deleteBluetoothMicroserviceData, transaction) +} + +async function setFogPruneCommandEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.iofogPrune) + + const queryFogData = { uuid: fogData.uuid } + + const fog = await FogManager.findOne(queryFogData, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.prune, transaction) +} + +/** + * Finds services that match the fog node's service tags + * @param {Array} serviceTags - Array of service tags from fog node + * @param {Object} transaction - Database transaction + * @returns {Promise>} Array of matching services + */ +async function _findMatchingServices (serviceTags, transaction) { + if (!serviceTags || serviceTags.length === 0) { + return [] + } + + // If 'all' tag is present, get all services + if (serviceTags.includes('all')) { + return ServiceManager.findAllWithTags({}, transaction) + } + + // For each service tag, find matching services + const servicesPromises = serviceTags.map(async (tag) => { + const queryData = { + '$tags.value$': `${tag}` + } + return ServiceManager.findAllWithTags(queryData, transaction) + }) + + // Wait for all queries to complete + const servicesArrays = await Promise.all(servicesPromises) + + // Flatten arrays and remove duplicates based on service name + const seen = new Set() + const uniqueServices = servicesArrays + .flat() + .filter(service => { + if (seen.has(service.name)) { + return false + } + seen.add(service.name) + return true + }) + + return uniqueServices +} + +/** + * Builds TCP listener configuration for a service on a specific fog node + * @param {Object} service - Service object containing name and bridgePort + * @param {string} fogNodeUuid - UUID of the fog node + * @returns {Object} TCP listener configuration + */ +function _buildTcpListenerForFog (service, fogNodeUuid) { + return { + name: `${service.name}-listener`, + port: service.bridgePort.toString(), + address: service.name, + siteId: fogNodeUuid + } +} + +/** + * Gets the router microservice configuration for a fog node + * @param {string} fogNodeUuid - UUID of the fog node + * @param {Object} transaction - Database transaction + * @returns {Promise} Router microservice configuration + */ +async function _getRouterMicroserviceConfig (fogNodeUuid, transaction) { + const routerName = `router-${fogNodeUuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + const routerConfig = JSON.parse(routerMicroservice.config || '{}') + return routerConfig +} + +/** + * Extracts existing TCP connectors from router configuration + * @param {string} fogNodeUuid - UUID of the fog node + * @param {Object} transaction - Database transaction + * @returns {Promise} Object containing TCP connectors + */ +async function _extractExistingTcpConnectors (fogNodeUuid, transaction) { + const routerConfig = await _getRouterMicroserviceConfig(fogNodeUuid, transaction) + // Return empty object if no bridges or tcpConnectors exist + if (!routerConfig.bridges || !routerConfig.bridges.tcpConnectors) { + return {} + } + + return routerConfig.bridges.tcpConnectors +} + +/** + * Merges a single TCP connector into router configuration + * @param {Object} routerConfig - Base router configuration + * @param {Object} connectorObj - TCP connector object (must have 'name' property) + * @returns {Object} Updated router configuration + */ +function _mergeTcpConnector (routerConfig, connectorObj) { + if (!connectorObj || !connectorObj.name) { + throw new Error('Connector object must have a name property') + } + if (!routerConfig.bridges) { + routerConfig.bridges = {} + } + if (!routerConfig.bridges.tcpConnectors) { + routerConfig.bridges.tcpConnectors = {} + } + routerConfig.bridges.tcpConnectors[connectorObj.name] = connectorObj + return routerConfig +} + +/** + * Merges a single TCP listener into router configuration + * @param {Object} routerConfig - Base router configuration + * @param {Object} listenerObj - TCP listener object (must have 'name' property) + * @returns {Object} Updated router configuration + */ +function _mergeTcpListener (routerConfig, listenerObj) { + if (!listenerObj || !listenerObj.name) { + throw new Error('Listener object must have a name property') + } + if (!routerConfig.bridges) { + routerConfig.bridges = {} + } + if (!routerConfig.bridges.tcpListeners) { + routerConfig.bridges.tcpListeners = {} + } + routerConfig.bridges.tcpListeners[listenerObj.name] = listenerObj + return routerConfig +} + +module.exports = { + createFogEndPoint: TransactionDecorator.generateTransaction(createFogEndPoint), + updateFogEndPoint: TransactionDecorator.generateTransaction(updateFogEndPoint), + deleteFogEndPoint: TransactionDecorator.generateTransaction(deleteFogEndPoint), + getFogEndPoint: TransactionDecorator.generateTransaction(getFogEndPoint), + getFogListEndPoint: TransactionDecorator.generateTransaction(getFogListEndPoint), + generateProvisioningKeyEndPoint: TransactionDecorator.generateTransaction(generateProvisioningKeyEndPoint), + setFogVersionCommandEndPoint: TransactionDecorator.generateTransaction(setFogVersionCommandEndPoint), + setFogRebootCommandEndPoint: TransactionDecorator.generateTransaction(setFogRebootCommandEndPoint), + getHalHardwareInfoEndPoint: TransactionDecorator.generateTransaction(getHalHardwareInfoEndPoint), + getHalUsbInfoEndPoint: TransactionDecorator.generateTransaction(getHalUsbInfoEndPoint), + getFog: getFog, + setFogPruneCommandEndPoint: TransactionDecorator.generateTransaction(setFogPruneCommandEndPoint), + _extractServiceTags, + _findMatchingServices: TransactionDecorator.generateTransaction(_findMatchingServices), + _buildTcpListenerForFog, + _getRouterMicroserviceConfig: TransactionDecorator.generateTransaction(_getRouterMicroserviceConfig), + _extractExistingTcpConnectors: TransactionDecorator.generateTransaction(_extractExistingTcpConnectors), + _mergeTcpConnector, + _mergeTcpListener, + checkKubernetesEnvironment, + _handleRouterCertificates: TransactionDecorator.generateTransaction(_handleRouterCertificates) +} diff --git a/test/backup/services-service.js b/test/backup/services-service.js new file mode 100644 index 00000000..f2dd81ca --- /dev/null +++ b/test/backup/services-service.js @@ -0,0 +1,1261 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const TransactionDecorator = require('../decorators/transaction-decorator') +const ServiceManager = require('../data/managers/service-manager') +const MicroserviceManager = require('../data/managers/microservice-manager') +const RouterManager = require('../data/managers/router-manager') +const RouterConnectionManager = require('../data/managers/router-connection-manager') +const K8sClient = require('../utils/k8s-client') +const AppHelper = require('../helpers/app-helper') +const config = require('../config') +const Errors = require('../helpers/errors') +const ErrorMessages = require('../helpers/error-messages') +const Validator = require('../schemas') +const logger = require('../logger') +const FogManager = require('../data/managers/iofog-manager') +const TagsManager = require('../data/managers/tags-manager') +const ChangeTrackingService = require('./change-tracking-service') +const ApplicationManager = require('../data/managers/application-manager') +// const { Op } = require('sequelize') + +const K8S_ROUTER_CONFIG_MAP = 'pot-router' +const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' + +// Map service tags to string array +// Return plain JS object +function _mapTags (service) { + return service.tags ? service.tags.map(t => t.value) : [] +} + +async function _setTags (serviceModel, tagsArray, transaction) { + if (tagsArray) { + let tags = [] + for (const tag of tagsArray) { + let tagModel = await TagsManager.findOne({ value: tag }, transaction) + if (!tagModel) { + tagModel = await TagsManager.create({ value: tag }, transaction) + } + tags.push(tagModel) + } + await serviceModel.setTags(tags) + } +} + +async function handleServiceDistribution (serviceTags, transaction) { + // Always find fog nodes with 'all' tag + const allTaggedFogNodes = await FogManager.findAllWithTags({ + '$tags.value$': `${SERVICE_ANNOTATION_TAG}: all` + }, transaction) + + // If serviceTags is null or empty, return only fog nodes with 'all' tag + if (!serviceTags || serviceTags.length === 0) { + const uuids = allTaggedFogNodes.map(fog => fog.uuid) + return uuids + } + + // Filter tags that don't contain ':' or '=' + const filteredServiceTags = serviceTags + .filter(tag => tag != null) + .map(tag => String(tag)) + .filter(tag => !tag.includes(':') && !tag.includes('=')) + .filter(tag => tag.length > 0) + + if (filteredServiceTags.length === 0) { + const uuids = allTaggedFogNodes.map(fog => fog.uuid) + return uuids + } + + // Find fog nodes for each filtered tag + const specificTaggedFogNodes = new Set() + for (const tag of filteredServiceTags) { + const fogNodes = await FogManager.findAllWithTags({ + '$tags.value$': `${SERVICE_ANNOTATION_TAG}: ${tag}` + }, transaction) + fogNodes.forEach(fog => specificTaggedFogNodes.add(fog.uuid)) + } + + // Get all tag fog node UUIDs + const allTagUuids = allTaggedFogNodes.map(fog => fog.uuid) + + // Combine both sets of fog nodes and remove duplicates + const allFogUuids = new Set([...allTagUuids, ...Array.from(specificTaggedFogNodes)]) + + return Array.from(allFogUuids) +} + +async function checkKubernetesEnvironment () { + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') + return controlPlane && controlPlane.toLowerCase() === 'kubernetes' +} + +async function validateNonK8sType (serviceConfig) { + const isK8s = await checkKubernetesEnvironment() + if (serviceConfig.type.toLowerCase() !== 'k8s' && isK8s) { + if (!serviceConfig.k8sType || !serviceConfig.servicePort) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type(LoadBalancer or ClusterIP or NodePort) and service port') + } + } +} + +async function _validateServiceName (serviceConfig) { + if (serviceConfig.name.toLowerCase() === 'controller' || serviceConfig.name.toLowerCase() === 'router' || serviceConfig.name.toLowerCase() === 'router-internal' || serviceConfig.name.toLowerCase() === 'docker' || serviceConfig.name.toLowerCase() === 'podman' || serviceConfig.name.toLowerCase() === 'kubernetes') { + throw new Errors.ValidationError('Service name cannot be "controller" or "router" or "router-internal" or "docker"') + } +} + +async function validateMicroserviceType (serviceConfig, transaction) { + if (serviceConfig.type.toLowerCase() !== 'microservice') { + return + } + + let microserviceUuid = serviceConfig.resource + + // If resource contains "/", it means user provided "/" + if (serviceConfig.resource.includes('/')) { + const [appName, microserviceName] = serviceConfig.resource.split('/') + const app = await ApplicationManager.findOne({ name: appName }, transaction) + if (!app) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_APPLICATION_NAME, appName)) + } + const microservice = await MicroserviceManager.findOne({ + name: microserviceName, + applicationId: app.id + }, transaction) + + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_NAME, serviceConfig.resource)) + } + + microserviceUuid = microservice.uuid + } else { + // User provided UUID directly, validate if microservice exists + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) + } + } + + // Update resource to be the microservice UUID + serviceConfig.resource = microserviceUuid +} + +async function validateFogServiceType (serviceConfig, transaction) { + if (serviceConfig.type.toLowerCase() !== 'agent') { + return + } + + // First try to find fog node by name + let fogNode = await FogManager.findOne({ name: serviceConfig.resource }, transaction) + + // If not found by name, try to find by UUID + if (!fogNode) { + fogNode = await FogManager.findOne({ uuid: serviceConfig.resource }, transaction) + } + + // If still not found, throw error + if (!fogNode) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, serviceConfig.resource)) + } + + // Always set resource to be the fog node UUID + serviceConfig.resource = fogNode.uuid +} + +async function validateDefaultBridge (serviceConfig, transaction) { + // If defaultBridge is empty, set it to 'default-router' + if (!serviceConfig.defaultBridge) { + logger.debug('Setting default bridge to default-router') + serviceConfig.defaultBridge = 'default-router' + return + } + + // If service type is not microservice or agent, defaultBridge must be 'default-router' + if (serviceConfig.type.toLowerCase() !== 'microservice' && serviceConfig.type.toLowerCase() !== 'agent') { + if (serviceConfig.defaultBridge !== 'default-router') { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_DEFAULT_BRIDGE, serviceConfig.defaultBridge)) + } + return + } + + // For microservice or agent type, if user provided a UUID instead of 'default-router' + if (serviceConfig.defaultBridge !== 'default-router') { + let iofogUuid + + if (serviceConfig.type.toLowerCase() === 'microservice') { + // Get the microservice to find its iofog node + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) + } + iofogUuid = microservice.iofogUuid + } else if (serviceConfig.type.toLowerCase() === 'agent') { + // For agent type, the resource is the agent UUID + iofogUuid = serviceConfig.resource + } + + // Get the router for the iofog node + const router = await RouterManager.findOne({ iofogUuid: iofogUuid }, transaction) + if (!router) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, iofogUuid)) + } + + // Check if the router has a connection to the specified upstream router + const upstreamRouter = await RouterManager.findOne({ iofogUuid: serviceConfig.defaultBridge }, transaction) + if (!upstreamRouter) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, serviceConfig.defaultBridge)) + } + + const routerConnection = await RouterConnectionManager.findOne({ + sourceRouter: router.id, + destRouter: upstreamRouter.id + }, transaction) + + if (!routerConnection) { + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_CONNECTION, serviceConfig.defaultBridge, router.id)) + } + } +} + +async function defineBridgePort (serviceConfig, transaction) { + // Get bridge port range from environment or config + const bridgePortRangeStr = process.env.BRIDGE_PORTS_RANGE || config.get('bridgePorts.range') || '10024-65535' + const [startStr, endStr] = bridgePortRangeStr.split('-') + const start = parseInt(startStr) + const end = parseInt(endStr) + + // Get all existing services to check used ports + const existingServices = await ServiceManager.findAll({}, transaction) + const usedPorts = new Set(existingServices.map(service => service.bridgePort)) + + // Find the first available port in the range + let bridgePort = start + while (bridgePort <= end) { + if (!usedPorts.has(bridgePort)) { + serviceConfig.bridgePort = bridgePort + return + } + bridgePort++ + } + + // If we get here, no ports are available + throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.NO_AVAILABLE_BRIDGE_PORT, bridgePortRangeStr)) +} + +// Helper function to determine host based on service type +async function _determineConnectorHost (serviceConfig, transaction) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (microservice.rootHostAccess) { + return 'iofog' + } else { + return `iofog_${serviceConfig.resource}` + } + case 'agent': + return 'iofog' + case 'k8s': + case 'external': + return serviceConfig.resource + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to determine siteId for connector +async function _determineConnectorSiteId (serviceConfig, transaction) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': { + const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) + if (!microservice) { + throw new Errors.NotFoundError(`Microservice not found: ${serviceConfig.resource}`) + } + return microservice.iofogUuid + } + case 'agent': + return serviceConfig.resource + case 'k8s': + case 'external': + return 'default-router' + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to determine processId for connector +async function _determineConnectorProcessId (serviceConfig) { + switch (serviceConfig.type.toLowerCase()) { + case 'microservice': + return serviceConfig.resource + case 'agent': + return `${serviceConfig.resource}-local-${serviceConfig.targetPort}` + case 'k8s': + return `${serviceConfig.resource}-k8s-${serviceConfig.targetPort}` + case 'external': + return `${serviceConfig.resource}-external-${serviceConfig.targetPort}` + default: + throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) + } +} + +// Helper function to build tcpConnector configuration +async function _buildTcpConnector (serviceConfig, transaction) { + const host = await _determineConnectorHost(serviceConfig, transaction) + const siteId = await _determineConnectorSiteId(serviceConfig, transaction) + const processId = await _determineConnectorProcessId(serviceConfig) + + return { + name: `${serviceConfig.name}-connector`, + host, + port: serviceConfig.targetPort.toString(), + address: serviceConfig.name, + siteId, + processId + } +} + +// Helper function to build tcpListener configuration +async function _buildTcpListener (serviceConfig, fogNodeUuid = null) { + const listener = { + name: `${serviceConfig.name}-listener`, + port: serviceConfig.bridgePort.toString(), + address: serviceConfig.name, + siteId: fogNodeUuid || serviceConfig.defaultBridge + } + return listener +} + +// Helper function to get router microservice by fog node UUID +async function _getRouterMicroservice (fogNodeUuid, transaction) { + const routerName = `router-${fogNodeUuid.toLowerCase()}` + const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) + if (!routerMicroservice) { + throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) + } + return routerMicroservice +} + +// Helper function to update router config in Kubernetes environment +async function _updateK8sRouterConfig (config) { + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const patchData = { + data: { + 'skrouterd.json': JSON.stringify(config) + } + } + + await K8sClient.patchConfigMap(K8S_ROUTER_CONFIG_MAP, patchData) +} + +// Helper function to update router microservice config +async function _updateRouterMicroserviceConfig (fogNodeUuid, config, transaction) { + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + + // Update microservice with the provided config + await MicroserviceManager.update( + { uuid: routerMicroservice.uuid }, + { config: JSON.stringify(config) }, + transaction + ) + + // Update change tracking + await ChangeTrackingService.update(fogNodeUuid, ChangeTrackingService.events.microserviceConfig, transaction) +} + +// Helper function to add tcpConnector to router config +async function _addTcpConnector (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connector = await _buildTcpConnector(serviceConfig, transaction) + const siteId = connector.siteId + + if (siteId === 'default-router') { + if (isK8s) { + // Update K8s router config + logger.debug('Updating K8s router config') + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Add new connector to the array + routerConfig.push(['tcpConnector', connector]) + + await _updateK8sRouterConfig(routerConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + logger.error('Default router not found') + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + // Update specific router microservice config + const fogNodeUuid = siteId + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// Helper function to add tcpListener to router config +async function _addTcpListener (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + + // First handle K8s case if we're in K8s environment + if (isK8s) { + const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Add new listener to the array + routerConfig.push(['tcpListener', k8sListener]) + + await _updateK8sRouterConfig(routerConfig) + } + + // Handle distributed router microservice cases + // Get list of fog nodes that need this listener + const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) + + // If not in K8s environment, always include default router + if (!isK8s) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + logger.error('Default router not found') + throw new Errors.NotFoundError('Default router not found') + } + // Add default router if not already in the list + if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { + fogNodeUuids.push(defaultRouter.iofogUuid) + } + } + // else if (!fogNodeUuids || fogNodeUuids.length === 0) { + // // If in K8s and no fog nodes found, add default router + // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + // if (!defaultRouter) { + // logger.error('Default router not found') + // throw new Errors.NotFoundError('Default router not found') + // } + // fogNodeUuids.push(defaultRouter.iofogUuid) + // } + + // Add listener to each router microservice + for (const fogNodeUuid of fogNodeUuids) { + try { + const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + if (!currentConfig.bridges) currentConfig.bridges = {} + if (!currentConfig.bridges.tcpListeners) currentConfig.bridges.tcpListeners = {} + currentConfig.bridges.tcpListeners[listener.name] = listener + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } catch (err) { + if (err instanceof Errors.NotFoundError) { + logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) + continue + } + throw err + } + } +} + +// Helper function to update tcpConnector in router config +async function _updateTcpConnector (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connector = await _buildTcpConnector(serviceConfig, transaction) + const siteId = connector.siteId + + if (siteId === 'default-router') { + if (isK8s) { + // Update K8s router config + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Find and update the existing connector + const connectorIndex = routerConfig.findIndex(item => + item[0] === 'tcpConnector' && item[1].name === connector.name + ) + if (connectorIndex !== -1) { + routerConfig[connectorIndex] = ['tcpConnector', connector] + } + + await _updateK8sRouterConfig(routerConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + // Update the connector with the same name + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + // Update specific router microservice config + const fogNodeUuid = siteId + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpConnectors) { + currentConfig.bridges.tcpConnectors = {} + } + // Update the connector with the same name + currentConfig.bridges.tcpConnectors[connector.name] = connector + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// Helper function to update tcpListener in router config +async function _updateTcpListener (serviceConfig, transaction) { + const isK8s = await checkKubernetesEnvironment() + + // First handle K8s case if we're in K8s environment + if (isK8s) { + const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Update the listener in the array + const listenerIndex = routerConfig.findIndex(item => + item[0] === 'tcpListener' && item[1].name === k8sListener.name + ) + if (listenerIndex !== -1) { + routerConfig[listenerIndex] = ['tcpListener', k8sListener] + } else { + routerConfig.push(['tcpListener', k8sListener]) + } + + await _updateK8sRouterConfig(routerConfig) + } + + // Handle distributed router microservice cases + // Get list of fog nodes that need this listener + const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) + // If not in K8s environment, always include default router + if (!isK8s) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + // Add default router if not already in the list + if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { + fogNodeUuids.push(defaultRouter.iofogUuid) + } + } + // else if (!fogNodeUuids || fogNodeUuids.length === 0) { + // // If in K8s and no fog nodes found, add default router + // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + // if (!defaultRouter) { + // throw new Errors.NotFoundError('Default router not found') + // } + // fogNodeUuids.push(defaultRouter.iofogUuid) + // } + + // Update listener in each router microservice + for (const fogNodeUuid of fogNodeUuids) { + try { + const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (!currentConfig.bridges) { + currentConfig.bridges = {} + } + if (!currentConfig.bridges.tcpListeners) { + currentConfig.bridges.tcpListeners = {} + } + // Update listener with its name as key + currentConfig.bridges.tcpListeners[listener.name] = listener + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } catch (err) { + if (err instanceof Errors.NotFoundError) { + logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) + continue + } + throw err + } + } +} + +// Helper function to delete tcpConnector from router config +async function _deleteTcpConnector (serviceName, transaction) { + const isK8s = await checkKubernetesEnvironment() + const connectorName = `${serviceName}-connector` + + // Get service to determine if it's using default router + const service = await ServiceManager.findOne({ name: serviceName }, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service not found: ${serviceName}`) + } + + const isDefaultRouter = service.defaultBridge === 'default-router' + let microserviceSource = null + if (service.type === 'microservice') { + microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) + } + + if (isDefaultRouter && !microserviceSource) { + if (isK8s) { + // Update K8s router config + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Remove the connector from the array + const updatedConfig = routerConfig.filter(item => + !(item[0] === 'tcpConnector' && item[1].name === connectorName) + ) + + await _updateK8sRouterConfig(updatedConfig) + } else { + // Update default router microservice config + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + const fogNodeUuid = defaultRouter.iofogUuid + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { + delete currentConfig.bridges.tcpConnectors[connectorName] + } + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } + } else { + let fogNodeUuid = null + if (microserviceSource) { + fogNodeUuid = microserviceSource.iofogUuid + } else { + fogNodeUuid = service.defaultBridge // This is the actual fogNodeUuid for non-default router + } + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + + if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { + delete currentConfig.bridges.tcpConnectors[connectorName] + } + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } +} + +// Helper function to delete tcpListener from router config +async function _deleteTcpListener (serviceName, transaction) { + const isK8s = await checkKubernetesEnvironment() + const listenerName = `${serviceName}-listener` + + // First handle K8s case if we're in K8s environment + if (isK8s) { + const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) + if (!configMap) { + throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) + } + + const routerConfig = JSON.parse(configMap.data['skrouterd.json']) + // Remove the listener from the array + const updatedConfig = routerConfig.filter(item => + !(item[0] === 'tcpListener' && item[1].name === listenerName) + ) + + await _updateK8sRouterConfig(updatedConfig) + } + + // Get service to determine its tags for distribution + const service = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service not found: ${serviceName}`) + } + + let microserviceSource = null + if (service.type === 'microservice') { + microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) + } + // Handle distributed router microservice cases + // Get list of fog nodes that need this listener removed + const serviceTags = service.tags.map(tag => tag.value) + const fogNodeUuids = await handleServiceDistribution(serviceTags, transaction) + + if (microserviceSource) { + if (!fogNodeUuids.includes(microserviceSource.iofogUuid)) { + fogNodeUuids.push(microserviceSource.iofogUuid) + } + } + // If not in K8s environment, always include default router + if (!isK8s) { + const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + if (!defaultRouter) { + throw new Errors.NotFoundError('Default router not found') + } + // Add default router if not already in the list + if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { + fogNodeUuids.push(defaultRouter.iofogUuid) + } + } + // else if (!fogNodeUuids || fogNodeUuids.length === 0) { + // // If in K8s and no fog nodes found, add default router + // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) + // if (!defaultRouter) { + // throw new Errors.NotFoundError('Default router not found') + // } + // fogNodeUuids.push(defaultRouter.iofogUuid) + // } + + // Remove listener from each router microservice + for (const fogNodeUuid of fogNodeUuids) { + try { + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') + if (currentConfig.bridges && currentConfig.bridges.tcpListeners) { + delete currentConfig.bridges.tcpListeners[listenerName] + } + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + } catch (err) { + if (err instanceof Errors.NotFoundError) { + logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) + continue + } + throw err + } + } +} + +// Helper function to create Kubernetes service +async function _createK8sService (serviceConfig, transaction) { + const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) + const serviceSpec = { + apiVersion: 'v1', + kind: 'Service', + metadata: { + name: serviceConfig.name, + annotations: normalizedTags.reduce((acc, tag) => { + const [key, value] = tag.split(':') + acc[key] = value || '' + return acc + }, {}) + }, + spec: { + type: serviceConfig.k8sType, + selector: { + application: 'interior-router', + name: 'router', + 'skupper.io/component': 'router' + }, + ports: [{ + port: parseInt(serviceConfig.bridgePort), + targetPort: parseInt(serviceConfig.servicePort), + protocol: 'TCP' + }] + } + } + + const service = await K8sClient.createService(serviceSpec) + + // If LoadBalancer type, wait for and set the external IP + if (serviceConfig.k8sType === 'LoadBalancer') { + const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) + if (loadBalancerIP) { + await ServiceManager.update( + { name: serviceConfig.name }, + { serviceEndpoint: loadBalancerIP }, + transaction + ) + } + } + + return service +} + +// Helper function to update Kubernetes service +async function _updateK8sService (serviceConfig, transaction) { + const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) + const patchData = { + metadata: { + annotations: normalizedTags.reduce((acc, tag) => { + const [key, value] = tag.split(':') + acc[key] = value || '' + return acc + }, {}) + }, + spec: { + type: serviceConfig.k8sType, + selector: { + application: 'interior-router', + name: 'router', + 'skupper.io/component': 'router' + }, + ports: [{ + port: parseInt(serviceConfig.bridgePort), + targetPort: parseInt(serviceConfig.servicePort), + protocol: 'TCP' + }] + } + } + + logger.debug(`Updating service: ${serviceConfig.name}`) + const service = await K8sClient.updateService(serviceConfig.name, patchData) + + // If LoadBalancer type, wait for and set the external IP + if (serviceConfig.k8sType === 'LoadBalancer') { + const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) + if (loadBalancerIP) { + await ServiceManager.update( + { name: serviceConfig.name }, + { serviceEndpoint: loadBalancerIP }, + transaction + ) + } + } + + return service +} + +// Helper function to delete Kubernetes service +async function _deleteK8sService (serviceName) { + await K8sClient.deleteService(serviceName) +} + +// Create service endpoint +async function createServiceEndpoint (serviceData, transaction) { + logger.debug('Creating service with data:' + JSON.stringify(serviceData)) + + // 1. Validate from schemas validator + await Validator.validate(serviceData, Validator.schemas.serviceCreate) + await _validateServiceName(serviceData) + + // 2. Check K8s environment if type is k8s + const isK8s = await checkKubernetesEnvironment() + if (serviceData.type === 'k8s' && !isK8s) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') + } + + if (serviceData.type !== 'k8s' && isK8s) { + logger.debug('Validating non k8s service type') + await validateNonK8sType(serviceData) + } + + // 3. Validate microservice type + if (serviceData.type === 'microservice') { + await validateMicroserviceType(serviceData, transaction) + } + + // 4. Validate agent type + if (serviceData.type === 'agent') { + logger.debug('Validating agent service type') + await validateFogServiceType(serviceData, transaction) + } + + // 5. Validate default bridge + logger.debug('Validating default bridge') + await validateDefaultBridge(serviceData, transaction) + + logger.debug('Defining bridge port') + // 6. Define bridge port + await defineBridgePort(serviceData, transaction) + + let service + try { + // Create service in database first + logger.debug('Creating service in database') + service = await ServiceManager.create(serviceData, transaction) + + // Set tags if provided + logger.debug('Setting tags') + if (serviceData.tags && serviceData.tags.length > 0) { + await _setTags(service, serviceData.tags, transaction) + } + + // 7. Add TCP connector + logger.debug('Adding TCP connector') + await _addTcpConnector(serviceData, transaction) + + // 8. Add TCP listener + logger.debug('Adding TCP listener') + try { + await _addTcpListener(serviceData, transaction) + } catch (error) { + logger.error('Error adding TCP listener:' + error.message + ' ' + error.stack + ' ' + serviceData.name) + throw error + } + + // 9. Create K8s service if needed + if ((serviceData.type === 'microservice' || serviceData.type === 'agent' || serviceData.type === 'external') && isK8s) { + logger.debug('Creating K8s service') + try { + await _createK8sService(serviceData, transaction) + } catch (error) { + logger.error('Error creating K8s service:' + error.message + ' ' + error.stack + ' ' + serviceData.name) + throw error + } + } + + return service + } catch (error) { + logger.error('Error creating service:' + error.message + ' ' + error.stack + ' ' + serviceData.name + ' ' + serviceData.type + ' ' + error.validationStep) + + // If any error occurs after service creation, clean up + if (service) { + try { + // Delete K8s service if it was created + if ((serviceData.type === 'microservice' || serviceData.type === 'agent' || serviceData.type === 'external') && isK8s) { + await _deleteK8sService(serviceData.name) + } + // Delete TCP listener if it was added + await _deleteTcpListener(serviceData.name, transaction) + // Delete TCP connector if it was added + await _deleteTcpConnector(serviceData.name, transaction) + // Finally delete the service from database + await ServiceManager.delete({ id: service.id }, transaction) + } catch (cleanupError) { + logger.error('Error during service creation cleanup:', { + error: cleanupError.message, + stack: cleanupError.stack, + serviceName: serviceData.name + }) + } + } + + // Wrap the error in a proper error type if it's not already + if (!(error instanceof Errors.ValidationError) && + !(error instanceof Errors.NotFoundError) && + !(error instanceof Errors.TransactionError) && + !(error instanceof Errors.DuplicatePropertyError)) { + throw new Errors.ValidationError(`Failed to create service: ${error.message}`) + } + throw error + } +} + +// Update service endpoint +async function updateServiceEndpoint (serviceName, serviceData, transaction) { + // 1. Validate from schemas validator + await Validator.validate(serviceData, Validator.schemas.serviceUpdate) + await _validateServiceName(serviceData) + + // 2. Get existing service + const existingService = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) + if (!existingService) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + + // 3. Check if service type is being changed + if (serviceData.type && serviceData.type !== existingService.type) { + throw new Errors.ValidationError('Changing service type is not allowed. Please delete the service and create a new one with the desired type.') + } + + // 4. Check K8s environment if type is k8s + const isK8s = await checkKubernetesEnvironment() + if (existingService.type === 'k8s' && !isK8s) { + throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') + } + + if (serviceData.type !== 'k8s' && isK8s) { + logger.debug('Validating non k8s service type') + await validateNonK8sType(serviceData) + } + + // 5. Validate microservice type if needed + if (existingService.type === 'microservice') { + await validateMicroserviceType(serviceData, transaction) + } + + // 6. Validate agent type if needed + if (existingService.type === 'agent') { + await validateFogServiceType(serviceData, transaction) + } + + // 7. Validate default bridge if needed + if (serviceData.defaultBridge) { + await validateDefaultBridge(serviceData, transaction) + } + + serviceData.bridgePort = existingService.bridgePort + + let updatedService + try { + // Update service in database + updatedService = await ServiceManager.update( + { name: serviceName }, + serviceData, + transaction + ) + + // Update tags if provided + if (serviceData.tags) { + await _setTags(existingService, serviceData.tags, transaction) + } + + // Handle resource changes + if (serviceData.resource && + JSON.stringify(serviceData.resource) !== JSON.stringify(existingService.resource)) { + // If resource changed, delete and recreate connector + await _deleteTcpConnector(serviceName, transaction) + await _addTcpConnector(serviceData, transaction) + } else { + // If resource didn't change, just update connector and listener + await _updateTcpConnector(serviceData, transaction) + // await _updateTcpListener(serviceData, transaction) + } + + // Update K8s service if needed + if ((existingService.type === 'microservice' || existingService.type === 'agent' || existingService.type === 'external') && isK8s) { + await _updateK8sService(serviceData, transaction) + } + + return updatedService + } catch (error) { + logger.error('Error updating service:', { + error: error.message, + stack: error.stack, + serviceName: serviceName, + serviceType: existingService.type + }) + + // If any error occurs after service update, attempt to rollback + if (updatedService) { + try { + // Rollback K8s service if it was updated + if ((existingService.type === 'microservice' || existingService.type === 'agent' || existingService.type === 'external') && isK8s) { + await _updateK8sService(existingService, transaction) + } + // Rollback TCP connector and listener + if (serviceData.resource && + JSON.stringify(serviceData.resource) !== JSON.stringify(existingService.resource)) { + await _deleteTcpConnector(serviceName, transaction) + await _addTcpConnector(existingService, transaction) + } else { + await _updateTcpConnector(existingService, transaction) + await _updateTcpListener(existingService, transaction) + } + // Rollback service in database + await ServiceManager.update( + { name: serviceName }, + existingService, + transaction + ) + } catch (rollbackError) { + logger.error('Error during service update rollback:', { + error: rollbackError.message, + stack: rollbackError.stack, + serviceName: serviceName + }) + } + } + + // Wrap the error in a proper error type if it's not already + if (!(error instanceof Errors.ValidationError) && + !(error instanceof Errors.NotFoundError) && + !(error instanceof Errors.TransactionError) && + !(error instanceof Errors.DuplicatePropertyError)) { + throw new Errors.ValidationError(`Failed to update service: ${error.message}`) + } + throw error + } +} + +// Delete service endpoint +async function deleteServiceEndpoint (serviceName, transaction) { + // Get existing service + const existingService = await ServiceManager.findOne({ name: serviceName }, transaction) + if (!existingService) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + + const isK8s = await checkKubernetesEnvironment() + + try { + // Delete TCP connector + await _deleteTcpConnector(serviceName, transaction) + + // Delete TCP listener + await _deleteTcpListener(serviceName, transaction) + + // Delete K8s service if needed + if (isK8s && existingService.type !== 'k8s') { + await _deleteK8sService(serviceName) + } + + // Finally delete the service from database + await ServiceManager.delete({ name: serviceName }, transaction) + + return { message: `Service ${serviceName} deleted successfully` } + } catch (error) { + logger.error('Error deleting service:', { + error: error.message, + stack: error.stack, + serviceName: serviceName, + serviceType: existingService.type + }) + + // Wrap the error in a proper error type if it's not already + if (!(error instanceof Errors.ValidationError) && + !(error instanceof Errors.NotFoundError) && + !(error instanceof Errors.TransactionError) && + !(error instanceof Errors.DuplicatePropertyError)) { + throw new Errors.ValidationError(`Failed to delete service: ${error.message}`) + } + throw error + } +} + +// List services endpoint +async function getServicesListEndpoint (transaction) { + const queryFogData = {} + const services = await ServiceManager.findAllWithTags(queryFogData, transaction) + return services.map(service => ({ + name: service.name, + type: service.type, + resource: service.resource, + defaultBridge: service.defaultBridge, + bridgePort: service.bridgePort, + targetPort: service.targetPort, + servicePort: service.servicePort, + k8sType: service.k8sType, + serviceEndpoint: service.serviceEndpoint, + tags: _mapTags(service) + })) +} + +// Get service endpoint +async function getServiceEndpoint (serviceName, transaction) { + const queryFogData = { name: serviceName } + const service = await ServiceManager.findOneWithTags(queryFogData, transaction) + if (!service) { + throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) + } + return { + name: service.name, + type: service.type, + resource: service.resource, + defaultBridge: service.defaultBridge, + bridgePort: service.bridgePort, + targetPort: service.targetPort, + servicePort: service.servicePort, + k8sType: service.k8sType, + serviceEndpoint: service.serviceEndpoint, + tags: _mapTags(service) + } +} + +async function moveMicroserviceTcpBridgeToNewFog (service, newFogUuid, oldFogUuid, transaction) { + const listenerName = `${service.name}-listener` + const connectorName = `${service.name}-connector` + + const oldRouterMicroservice = await _getRouterMicroservice(oldFogUuid, transaction) + const oldRouterConfig = JSON.parse(oldRouterMicroservice.config || '{}') + const newRouterMicroservice = await _getRouterMicroservice(newFogUuid, transaction) + const newRouterConfig = JSON.parse(newRouterMicroservice.config || '{}') + + const connector = oldRouterConfig.bridges.tcpConnectors[connectorName] + const listener = oldRouterConfig.bridges.tcpListeners[listenerName] + + if (oldRouterConfig.bridges.tcpConnectors[connectorName]) { + delete oldRouterConfig.bridges.tcpConnectors[connectorName] + } + if (oldRouterConfig.bridges.tcpListeners[listenerName]) { + delete oldRouterConfig.bridges.tcpListeners[listenerName] + } + + if (!newRouterConfig.bridges) { + newRouterConfig.bridges = {} + } + if (!newRouterConfig.bridges.tcpConnectors) { + newRouterConfig.bridges.tcpConnectors = {} + } + + newRouterConfig.bridges.tcpConnectors[connectorName] = connector + newRouterConfig.bridges.tcpListeners[listenerName] = listener + + await _updateRouterMicroserviceConfig(oldFogUuid, oldRouterConfig, transaction) + await _updateRouterMicroserviceConfig(newFogUuid, newRouterConfig, transaction) +} + +module.exports = { + checkKubernetesEnvironment, + validateMicroserviceType: TransactionDecorator.generateTransaction(validateMicroserviceType), + validateNonK8sType, + _validateServiceName, + validateFogServiceType: TransactionDecorator.generateTransaction(validateFogServiceType), + validateDefaultBridge: TransactionDecorator.generateTransaction(validateDefaultBridge), + defineBridgePort: TransactionDecorator.generateTransaction(defineBridgePort), + handleServiceDistribution: TransactionDecorator.generateTransaction(handleServiceDistribution), + _mapTags, + _setTags: TransactionDecorator.generateTransaction(_setTags), + _createK8sService, + _updateK8sService, + _deleteK8sService, + createServiceEndpoint: TransactionDecorator.generateTransaction(createServiceEndpoint), + updateServiceEndpoint: TransactionDecorator.generateTransaction(updateServiceEndpoint), + deleteServiceEndpoint: TransactionDecorator.generateTransaction(deleteServiceEndpoint), + getServicesListEndpoint: TransactionDecorator.generateTransaction(getServicesListEndpoint), + getServiceEndpoint: TransactionDecorator.generateTransaction(getServiceEndpoint), + moveMicroserviceTcpBridgeToNewFog: TransactionDecorator.generateTransaction(moveMicroserviceTcpBridgeToNewFog) +} From 05ffaa607af282d8a711552354add53b4c8ce3ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Fri, 20 Jun 2025 01:01:37 +0300 Subject: [PATCH 09/25] websocket api server for exec session. ws message forward between user and agent microservice session. system application handling. Container scheduler, node debugger catalog item, microservice exec status --- docs/swagger-test.yaml | 7638 +++++++++++++++++ generate-swagger.sh | 3 +- package-lock.json | 5661 ++++++------ package.json | 32 +- scripts/generate-swagger.js | 778 ++ src/config/controller.yaml | 21 +- src/config/env-mapping.js | 12 + src/controllers/application-controller.js | 7 + src/controllers/iofog-controller.js | 21 +- src/controllers/microservices-controller.js | 24 + src/data/constants.js | 3 +- .../microservice-exec-status-manager.js | 37 + src/data/managers/microservice-manager.js | 30 + .../mysql/db_migration_mysql_v1.0.2.sql | 15 +- .../postgres/db_migration_pg_v1.0.2.sql | 16 +- .../sqlite/db_migration_sqlite_v1.0.2.sql | 16 +- src/data/models/index.js | 1 + src/data/models/microservice.js | 10 + src/data/models/microserviceExecStatus.js | 36 + src/data/models/microservicestatus.js | 17 +- .../seeders/mysql/db_seeder_mysql_v1.0.2.sql | 7 +- .../seeders/postgres/db_seeder_pg_v1.0.2.sql | 7 +- .../sqlite/db_seeder_sqlite_v1.0.2.sql | 7 +- src/enums/fog-state.js | 1 + src/enums/microservice-state.js | 8 +- src/jobs/fog-status-job.js | 9 +- src/jobs/stopped-app-status-job.js | 19 +- src/routes/agent.js | 45 +- src/routes/application.js | 33 + src/routes/iofog.js | 70 + src/routes/microservices.js | 139 + src/schemas/agent.js | 2 +- src/schemas/iofog.js | 37 +- src/schemas/microservice.js | 10 + src/server.js | 35 +- src/services/agent-service.js | 16 +- src/services/application-service.js | 22 +- src/services/catalog-service.js | 26 +- src/services/iofog-service.js | 189 +- src/services/microservices-service.js | 100 +- src/services/router-service.js | 3 + src/services/yaml-parser-service.js | 9 +- src/websocket/error-handler.js | 54 + src/websocket/server.js | 1022 +++ src/websocket/session-manager.js | 495 ++ swagger.js | 29 +- 46 files changed, 13855 insertions(+), 2917 deletions(-) create mode 100644 docs/swagger-test.yaml create mode 100644 scripts/generate-swagger.js create mode 100644 src/data/managers/microservice-exec-status-manager.js create mode 100644 src/data/models/microserviceExecStatus.js create mode 100644 src/websocket/error-handler.js create mode 100644 src/websocket/server.js create mode 100644 src/websocket/session-manager.js diff --git a/docs/swagger-test.yaml b/docs/swagger-test.yaml new file mode 100644 index 00000000..f9dc8841 --- /dev/null +++ b/docs/swagger-test.yaml @@ -0,0 +1,7638 @@ +openapi: 3.0.0 +info: + title: Datasance PoT Controller + version: 3.5.0 + description: Datasance PoT Controller REST API Documentation +servers: + - url: http://localhost:51121/api/v3 +tags: + - name: Controller + description: Manage your controller + - name: ioFog + description: Manage your agents + - name: Application + description: Manage your applications + - name: Application Template + description: Manage your application templates + - name: Catalog + description: Manage your catalog + - name: Registries + description: Manage your registries + - name: Microservices + description: Manage your microservices + - name: Routing + description: Manage your routes + - name: Router + description: Manage your Default Router + - name: Edge Resource + description: Manage your Edge Resources + - name: Diagnostics + description: Diagnostic your microservices + - name: Tunnel + description: Manage ssh tunnels + - name: Agent + description: Used by your agents to communicate with your controller + - name: User + description: Manage your users + - name: Secrets + description: Manage your secrets + - name: Certificates + description: Manage your certificates + - name: Services + description: Manage your services + - name: VolumeMounts + description: Manage your volume mounts + - name: ConfigMap + description: Manage your config maps +components: + securitySchemes: + authToken: + type: http + scheme: bearer + bearerFormat: JWT + description: JWT token for authentication (user or agent) + schemas: + image: + type: object + properties: + containerImage: + type: string + fogTypeId: + type: integer + minimum: 1 + maximum: 2 + required: + - containerImage + - fogTypeId + volumeMappings: + type: object + properties: + hostDestination: + type: string + containerDestination: + type: string + accessMode: + type: string + type: + enum: + - volume + - bind + required: + - hostDestination + - containerDestination + - accessMode + ports: + type: object + properties: + internal: + type: integer + external: + type: integer + protocol: + enum: + - tcp + - udp + required: + - internal + - external + extraHosts: + type: object + properties: + name: + type: string + address: + type: string + required: + - name + - address + env: + type: object + properties: + key: + type: string + value: + type: string + valueFromSecret: + type: string + valueFromConfigMap: + type: string + required: + - key + oneOf: + - required: + - value + - required: + - valueFromSecret + - required: + - valueFromConfigMap + straceData: + type: object + properties: + microserviceUuid: + type: string + buffer: + type: string + required: + - microserviceUuid + - buffer + microserviceStatus: + type: object + properties: + id: + type: string + containerId: + type: string + status: + type: string + startTime: + type: integer + operatingDuration: + type: integer + cpuUsage: + type: number + memoryUsage: + type: number + ipAddress: + type: string + ipAddressExternal: + type: string + execSessionIds: + type: array + items: + type: string + required: + - id + agentProvision: + type: object + properties: + type: + type: integer + minimum: 0 + maximum: 2 + key: + type: string + required: + - type + - key + agentDeprovision: + type: object + properties: + microserviceUuids: + type: array + items: + type: string + required: + - microserviceUuids + updateAgentConfig: + type: object + properties: + networkInterface: + type: string + dockerUrl: + type: string + diskLimit: + type: integer + minimum: 0 + diskDirectory: + type: string + memoryLimit: + type: integer + minimum: 0 + cpuLimit: + type: integer + minimum: 0 + logLimit: + type: integer + minimum: 0 + logDirectory: + type: string + logFileCount: + type: integer + minimum: 0 + statusFrequency: + type: integer + minimum: 0 + changeFrequency: + type: integer + minimum: 0 + deviceScanFrequency: + type: integer + minimum: 0 + watchdogEnabled: + type: boolean + latitude: + type: number + minimum: -90 + maximum: 90 + longitude: + type: number + minimum: -180 + maximum: 180 + gpsMode: + type: string + gpsDevice: + type: string + gpsScanFrequency: + type: integer + minimum: 0 + edgeGuardFrequency: + type: integer + minimum: 0 + dockerPruningFrequency: + type: integer + minimum: 0 + availableDiskThreshold: + type: integer + minimum: 0 + logLevel: + type: string + timeZone: + type: string + updateAgentStatus: + type: object + properties: + daemonStatus: + type: string + warningMessage: + type: string + daemonOperatingDuration: + type: integer + minimum: 0 + daemonLastStart: + type: integer + minimum: 0 + memoryUsage: + type: number + minimum: 0 + diskUsage: + type: number + minimum: 0 + cpuUsage: + type: number + minimum: 0 + memoryViolation: + type: boolean + diskViolation: + type: boolean + cpuViolation: + type: boolean + systemAvailableDisk: + type: integer + systemAvailableMemory: + type: integer + systemTotalCpu: + type: number + securityStatus: + type: string + securityViolationInfo: + type: string + microserviceStatus: + type: string + repositoryCount: + type: integer + minimum: 0 + repositoryStatus: + type: string + systemTime: + type: integer + minimum: 0 + lastStatusTime: + type: integer + minimum: 0 + ipAddress: + type: string + ipAddressExternal: + type: string + processedMessages: + type: integer + minimum: 0 + microserviceMessageCounts: + type: string + messageSpeed: + type: number + minimum: 0 + lastCommandTime: + type: integer + minimum: 0 + gpsMode: + type: string + gpsDevice: + type: string + gpsScanFrequency: + type: integer + minimum: 0 + edgeGuardFrequency: + type: integer + minimum: 0 + tunnelStatus: + type: string + version: + type: string + isReadyToUpgrade: + type: boolean + isReadyToRollback: + type: boolean + updateAgentStrace: + type: object + properties: + straceData: + type: array + items: + $ref: '#/components/schemas/straceData' + updateHardwareInfo: + type: object + properties: + info: + type: string + required: + - info + updateUsbInfo: + type: object + properties: + info: + type: string + required: + - info + applicationTemplateVariable: + type: object + properties: + key: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + required: + - key + applicationTemplateCreate: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + variables: + type: array + items: + $ref: '#/components/schemas/applicationTemplateVariable' + required: + - name + applicationTemplateUpdate: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + applicationJSON: + $ref: '#/components/schemas/applicationCreate' + variables: + type: array + items: + $ref: '#/components/schemas/applicationTemplateVariable' + applicationTemplatePatch: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + applicationTemplateDeploy: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + isActivated: + type: boolean + isSystem: + type: boolean + variables: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + required: + - name + applicationCreate: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + routes: + type: array + items: + $ref: '#/components/schemas/routingCreate' + microservices: + type: array + items: + $ref: '#/components/schemas/microserviceCreate' + description: + type: string + isActivated: + type: boolean + isSystem: + type: boolean + required: + - name + applicationUpdate: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + microservices: + type: array + items: + $ref: '#/components/schemas/microserviceCreate' + routes: + type: array + items: + $ref: '#/components/schemas/routingCreate' + description: + type: string + isActivated: + type: boolean + isSystem: + type: boolean + applicationPatch: + type: object + properties: + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + isActivated: + type: boolean + isSystem: + type: boolean + type: + type: object + properties: + infoType: + type: string + infoFormat: + type: string + catalogItemCreate: + type: object + properties: + name: + type: string + minLength: 1 + description: + type: string + category: + type: string + publisher: + type: string + diskRequired: + type: integer + ramRequired: + type: integer + picture: + type: string + isPublic: + type: boolean + registryId: + type: integer + configExample: + type: string + images: + type: array + minItems: 1 + maxItems: 2 + items: + $ref: '#/components/schemas/image' + inputType: + $ref: '#/components/schemas/type' + outputType: + $ref: '#/components/schemas/type' + required: + - name + - registryId + - images + catalogItemUpdate: + type: object + properties: + name: + type: string + minLength: 1 + description: + type: string + category: + type: string + publisher: + type: string + diskRequired: + type: integer + ramRequired: + type: integer + picture: + type: string + isPublic: + type: boolean + registryId: + type: integer + configExample: + type: string + images: + type: array + maxItems: 2 + items: + $ref: '#/components/schemas/image' + inputType: + $ref: '#/components/schemas/type' + outputType: + $ref: '#/components/schemas/type' + caCreate: + type: object + properties: + name: + type: string + minLength: 1 + maxLength: 255 + subject: + type: string + minLength: 1 + expiration: + type: integer + minimum: 0 + type: + type: string + enum: + - k8s-secret + - direct + - self-signed + secretName: + type: string + required: + - type + - name + allOf: + - {} + - {} + certificateCreate: + type: object + properties: + name: + type: string + minLength: 1 + maxLength: 255 + subject: + type: string + minLength: 1 + hosts: + type: string + minLength: 1 + expiration: + type: integer + minimum: 0 + ca: + type: object + properties: + type: + type: string + enum: + - k8s-secret + - direct + - self-signed + secretName: + type: string + required: + - type + required: + - name + - subject + - hosts + caResponse: + type: object + properties: + name: + type: string + subject: + type: string + type: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - name + - subject + - type + - created_at + - updated_at + certificateResponse: + type: object + properties: + name: + type: string + subject: + type: string + hosts: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - name + - subject + - hosts + - created_at + - updated_at + caListResponse: + type: object + properties: + cas: + type: array + items: + type: object + properties: + name: + type: string + subject: + type: string + type: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - name + - subject + - type + - created_at + - updated_at + required: + - cas + certificateListResponse: + type: object + properties: + certificates: + type: array + items: + type: object + properties: + name: + type: string + subject: + type: string + hosts: + type: string + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - name + - subject + - hosts + - created_at + - updated_at + required: + - certificates + configMapCreate: + type: object + properties: + name: + type: string + minLength: 1 + maxLength: 255 + immutable: + type: boolean + data: + type: object + required: + - name + - data + configMapUpdate: + type: object + properties: + immutable: + type: boolean + data: + type: object + required: + - data + configMapResponse: + type: object + properties: + id: + type: integer + name: + type: string + immutable: + type: boolean + data: + type: object + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - id + - name + - data + - created_at + - updated_at + configMapListResponse: + type: object + properties: + configMaps: + type: array + items: + type: object + properties: + id: + type: integer + name: + type: string + immutable: + type: boolean + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - id + - name + - created_at + - updated_at + required: + - configMaps + configUpdate: + type: object + properties: + port: + type: integer + minimum: 0 + maximum: 65535 + sslCert: + type: string + sslKey: + type: string + intermediateCert: + type: string + logDir: + type: string + logSize: + type: integer + configElement: + type: object + properties: + key: + type: string + minLength: 1 + value: + type: string + required: + - key + - value + profile: + type: object + properties: {} + straceStateUpdate: + type: object + properties: + enable: + type: boolean + required: + - enable + straceGetData: + type: object + properties: + format: + enum: + - string + - file + required: + - format + stracePostToFtp: + type: object + properties: + ftpHost: + type: string + ftpPort: + type: integer + minimum: 0 + ftpUser: + type: string + ftpPass: + type: string + ftpDestDir: + type: string + required: + - ftpHost + - ftpPort + - ftpUser + - ftpPass + - ftpDestDir + edgeResourceDisplay: + type: object + properties: + name: + type: string + color: + type: string + pattern: ^(#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{8}))|(rgb(s*(?:(d{1,3})s*,?){3}))|(rgba(s*(?:(d{1,3})s*,?){4}))|$ + icon: + type: string + edgeResourceHTTPEndpoint: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + description: + type: string + method: + enum: + - GET + - POST + - PUT + - PATCH + - DELETE + - HEAD + - OPTIONS + url: + type: string + requestType: + type: string + responseType: + type: string + requestPayloadExample: + type: string + responsePayloadExample: + type: string + edgeResource: + type: object + properties: + display: + $ref: '#/components/schemas/edgeResourceDisplay' + name: + type: string + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + version: + type: string + minLength: 1 + pattern: ^v?(0|[1-9]d*).(0|[1-9]d*).(0|[1-9]d*)(?:-((?:0|[1-9]d*|d*[a-zA-Z-][0-9a-zA-Z-]*)(?:.(?:0|[1-9]d*|d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:[+]([0-9a-zA-Z-]+(?:.[0-9a-zA-Z-]+)*))?$ + description: + type: string + orchestrationTags: + type: array + items: + type: string + interfaceProtocol: + enum: + - http + - https + - ws + - wss + oneOf: + - properties: + interfaceProtocol: + enum: + - http + - https + - ws + - wss + interface: + type: object + properties: + endpoints: + type: array + items: + $ref: /edgeResourceHTTPEndpoint + edgeResourceCreate: + type: object + allOf: + - $ref: '#/components/schemas/edgeResource' + required: + - name + - version + edgeResourceUpdate: + type: object + allOf: + - $ref: '#/components/schemas/edgeResource' + filter: + type: object + properties: + key: + type: string + value: + type: string + condition: + enum: + - has + - equals + required: + - key + - value + - condition + iofogTag: + type: string + iofogCreate: + type: object + properties: + name: + type: string + minLength: 1 + location: + type: string + latitude: + type: number + minimum: -90 + maximum: 90 + longitude: + type: number + minimum: -180 + maximum: 180 + description: + type: string + networkInterface: + type: string + dockerUrl: + type: string + containerEngine: + type: string + enum: + - docker + - podman + deploymentType: + type: string + enum: + - native + - container + diskLimit: + type: integer + minimum: 0 + diskDirectory: + type: string + memoryLimit: + type: integer + minimum: 0 + cpuLimit: + type: integer + minimum: 0 + logLimit: + type: integer + minimum: 0 + logDirectory: + type: string + logFileCount: + type: integer + minimum: 0 + statusFrequency: + type: integer + minimum: 0 + changeFrequency: + type: integer + minimum: 0 + deviceScanFrequency: + type: integer + minimum: 0 + bluetoothEnabled: + type: boolean + watchdogEnabled: + type: boolean + abstractedHardwareEnabled: + type: boolean + fogType: + type: integer + minimum: 0 + maximum: 2 + dockerPruningFrequency: + type: integer + minimum: 0 + availableDiskThreshold: + type: integer + minimum: 0 + logLevel: + type: string + isSystem: + type: boolean + routerMode: + enum: + - none + - edge + - interior + default: edge + messagingPort: + type: integer + minimum: 1 + maximum: 65535 + interRouterPort: + type: integer + minimum: 1 + maximum: 65535 + edgeRouterPort: + type: integer + minimum: 1 + maximum: 65535 + host: + type: string + tags: + type: array + items: + $ref: '#/components/schemas/iofogTag' + upstreamRouters: + type: array + items: + type: string + minLength: 1 + networkRouter: + type: string + timeZone: + type: string + anyOf: + - properties: + routerMode: {} + required: + - interRouterPort + - edgeRouterPort + - host + - properties: + routerMode: {} + required: + - host + - properties: + routerMode: {} + required: + - name + - fogType + iofogUpdate: + type: object + properties: + uuid: + type: string + name: + type: string + minLength: 1 + location: + type: string + latitude: + type: number + minimum: -90 + maximum: 90 + longitude: + type: number + minimum: -180 + maximum: 180 + description: + type: string + networkInterface: + type: string + dockerUrl: + type: string + containerEngine: + type: string + enum: + - docker + - podman + deploymentType: + type: string + enum: + - native + - container + diskLimit: + type: integer + minimum: 0 + diskDirectory: + type: string + memoryLimit: + type: integer + minimum: 0 + cpuLimit: + type: integer + minimum: 0 + logLimit: + type: integer + minimum: 0 + logDirectory: + type: string + logFileCount: + type: integer + minimum: 0 + statusFrequency: + type: integer + minimum: 0 + changeFrequency: + type: integer + minimum: 0 + deviceScanFrequency: + type: integer + minimum: 0 + bluetoothEnabled: + type: boolean + watchdogEnabled: + type: boolean + abstractedHardwareEnabled: + type: boolean + fogType: + type: integer + minimum: 0 + maximum: 2 + dockerPruningFrequency: + type: integer + minimum: 0 + availableDiskThreshold: + type: integer + minimum: 0 + logLevel: + type: string + isSystem: + type: boolean + routerMode: + enum: + - none + - edge + - interior + messagingPort: + type: integer + minimum: 1 + maximum: 65535 + interRouterPort: + type: integer + minimum: 1 + maximum: 65535 + edgeRouterPort: + type: integer + minimum: 1 + maximum: 65535 + host: + type: string + upstreamRouters: + type: array + items: + type: string + minLength: 1 + tags: + type: array + items: + $ref: '#/components/schemas/iofogTag' + networkRouter: + type: string + minLength: 1 + timeZone: + type: string + anyOf: + - properties: + routerMode: {} + required: + - interRouterPort + - edgeRouterPort + - host + - properties: + routerMode: {} + - properties: + routerMode: {} + required: + - uuid + iofogDelete: + type: object + properties: + uuid: + type: string + required: + - uuid + iofogGet: + type: object + properties: + uuid: + type: string + name: + type: string + oneOf: + - required: + - uuid + - required: + - name + iofogGenerateProvision: + type: object + properties: + uuid: + type: string + required: + - uuid + iofogSetVersionCommand: + type: object + properties: + uuid: + type: string + versionCommand: + enum: + - upgrade + - rollback + required: + - uuid + - versionCommand + iofogReboot: + type: object + properties: + uuid: + type: string + required: + - uuid + iofogFilters: + type: array + items: + $ref: '#/components/schemas/filter' + halGet: + type: object + properties: + uuid: + type: string + required: + - uuid + iofogPrune: + type: object + properties: + uuid: + type: string + required: + - uuid + defaultRouterCreate: + type: object + properties: + messagingPort: + type: integer + minimum: 1 + maximum: 65535 + interRouterPort: + type: integer + minimum: 1 + maximum: 65535 + edgeRouterPort: + type: integer + minimum: 1 + maximum: 65535 + requireSsl: + type: string + sslProfile: + type: string + saslMechanisms: + type: string + authenticatePeer: + type: string + caCert: + type: string + tlsCert: + type: string + tlsKey: + type: string + host: + type: string + required: + - host + microserviceCreate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + config: + type: string + annotations: + type: string + catalogItemId: + type: integer + minimum: 4 + images: + type: array + maxItems: 2 + items: + $ref: '#/components/schemas/image' + registryId: + type: integer + application: + anyOf: + - type: string + - type: number + iofogUuid: + type: string + agentName: + type: string + rootHostAccess: + type: boolean + schedule: + type: integer + minimum: 0 + maximum: 100 + logSize: + type: integer + imageSnapshot: + type: string + volumeMappings: + type: array + items: + $ref: '#/components/schemas/volumeMappings' + ports: + type: array + items: + $ref: '#/components/schemas/ports' + extraHosts: + type: array + items: + $ref: '#/components/schemas/extraHosts' + routes: + type: array + items: + type: string + env: + type: array + items: + $ref: '#/components/schemas/env' + cmd: + type: array + items: + type: string + cdiDevices: + type: array + items: + type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string + runAsUser: + type: string + platform: + type: string + runtime: + type: string + pubTags: + type: array + items: + type: string + subTags: + type: array + items: + type: string + required: + - name + microserviceUpdate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + config: + type: string + annotations: + type: string + rebuild: + type: boolean + iofogUuid: + type: string + agentName: + type: string + rootHostAccess: + type: boolean + logSize: + type: integer + minimum: 0 + schedule: + type: integer + minimum: 0 + maximum: 100 + volumeMappings: + type: array + items: + $ref: '#/components/schemas/volumeMappings' + images: + type: array + maxItems: 2 + minItems: 1 + items: + $ref: '#/components/schemas/image' + ports: + type: array + items: + $ref: '#/components/schemas/ports' + extraHosts: + type: array + items: + $ref: '#/components/schemas/extraHosts' + env: + type: array + items: + $ref: '#/components/schemas/env' + cmd: + type: array + items: + type: string + cdiDevices: + type: array + items: + type: string + capAdd: + type: array + items: + type: string + capDrop: + type: array + items: + type: string + runAsUser: + type: string + platform: + type: string + runtime: + type: string + pubTags: + type: array + items: + type: string + subTags: + type: array + items: + type: string + portsCreate: + type: object + properties: + internal: + type: integer + external: + type: integer + protocol: + enum: + - tcp + - udp + required: + - internal + - external + microserviceDelete: + type: object + properties: + withCleanup: + type: boolean + additionalProperties: + type: object + additionalProperties: true + registryCreate: + type: object + properties: + url: + type: string + minLength: 1 + isPublic: + type: boolean + username: + type: string + minLength: 1 + password: + type: string + email: + type: string + pattern: ^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$ + requiresCert: + type: boolean + certificate: + type: string + required: + - url + - isPublic + - username + - password + - email + registryDelete: + type: object + properties: + id: + type: integer + required: + - id + registryUpdate: + type: object + properties: + url: + type: string + minLength: 1 + isPublic: + type: boolean + username: + type: string + minLength: 1 + password: + type: string + email: + type: string + pattern: ^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$ + requiresCert: + type: boolean + certificate: + type: string + routingCreate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + oneOf: + - properties: + from: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + to: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + application: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + required: + - name + - from + - to + - properties: + sourceMicroserviceUuid: + type: string + destMicroserviceUuid: + type: string + required: + - name + - sourceMicroserviceUuid + - destMicroserviceUuid + routingUpdate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + oneOf: + - properties: + from: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + to: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + application: + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + required: + - from + - to + - application + - properties: + sourceMicroserviceUuid: + type: string + destMicroserviceUuid: + type: string + required: + - sourceMicroserviceUuid + - destMicroserviceUuid + secretCreate: + type: object + properties: + name: + type: string + minLength: 1 + maxLength: 255 + type: + type: string + enum: + - opaque + - tls + data: + type: object + required: + - name + - type + - data + secretUpdate: + type: object + properties: + data: + type: object + required: + - data + secretResponse: + type: object + properties: + id: + type: integer + name: + type: string + type: + type: string + enum: + - opaque + - tls + data: + type: object + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - id + - name + - type + - data + - created_at + - updated_at + secretListResponse: + type: object + properties: + secrets: + type: array + items: + type: object + properties: + id: + type: integer + name: + type: string + type: + type: string + enum: + - opaque + - tls + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + required: + - id + - name + - type + - created_at + - updated_at + required: + - secrets + serviceTag: + type: string + serviceCreate: + type: object + required: + - name + - type + - resource + - targetPort + properties: + name: + type: string + pattern: ^[a-z0-9]([a-z0-9-]*[a-z0-9])?$ + type: + type: string + enum: + - microservice + - k8s + - agent + - external + resource: + type: string + required: + - cpu + - memory + targetPort: + type: integer + defaultBridge: + type: string + servicePort: + type: integer + k8sType: + type: string + enum: + - LoadBalancer + - ClusterIP + - NodePort + tags: + type: array + items: + $ref: '#/components/schemas/serviceTag' + serviceUpdate: + type: object + required: + - name + properties: + name: + type: string + pattern: ^[a-z0-9]([a-z0-9-]*[a-z0-9])?$ + type: + type: string + enum: + - microservice + - k8s + - agent + - external + resource: + type: string + targetPort: + type: integer + defaultBridge: + type: string + servicePort: + type: integer + k8sType: + type: string + enum: + - LoadBalancer + - ClusterIP + - NodePort + tags: + type: array + items: + $ref: '#/components/schemas/serviceTag' + tunnelCreate: + type: object + properties: + iofogUuid: + type: string + username: + type: string + minLength: 1 + password: + type: string + rsakey: + type: string + lport: + type: integer + minimum: 0 + maximum: 65535 + rport: + type: integer + minimum: 0 + maximum: 65535 + required: + - iofogUuid + - username + - password + - lport + - rport + login: + type: object + properties: + email: + type: string + pattern: ^(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$ + password: + type: string + totp: + type: string + required: + - email + - password + refresh: + type: object + properties: + refreshToken: + type: string + required: + - refreshToken + volumeMountCreate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([a-z0-9-]*[a-z0-9])?$ + secretName: + type: string + configMapName: + type: string + required: + - name + oneOf: + - required: + - secretName + - required: + - configMapName + volumeMountUpdate: + type: object + properties: + name: + type: string + pattern: ^[a-z0-9]([a-z0-9-]*[a-z0-9])?$ + secretName: + type: string + configMapName: + type: string + oneOf: + - required: + - secretName + - required: + - configMapName + volumeMountLink: + type: object + properties: + fogUuids: + type: array + items: + type: string + minItems: 1 + required: + - fogUuids + volumeMountUnlink: + type: object + properties: + fogUuids: + type: array + items: + type: string + minItems: 1 + required: + - fogUuids +security: + - authToken: [] +paths: + /agent/provision: + post: + tags: + - Agent + summary: POST /api/v3/agent/provision + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/agentProvision' + /agent/deprovision: + post: + tags: + - Agent + summary: POST /api/v3/agent/deprovision + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/agentProvision' + /agent/config: + get: + tags: + - Agent + summary: GET /api/v3/agent/config + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Agent + summary: PATCH /api/v3/agent/config + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /agent/config/changes: + get: + tags: + - Agent + summary: GET /api/v3/agent/config/changes + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Agent + summary: PATCH /api/v3/agent/config/changes + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /agent/status: + put: + tags: + - Agent + summary: PUT /api/v3/agent/status + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/updateAgentConfig' + /agent/edgeResources: + get: + tags: + - Agent + summary: GET /api/v3/agent/edgeResources + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/volumeMounts: + get: + tags: + - Agent + summary: GET /api/v3/agent/volumeMounts + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/microservices: + get: + tags: + - Agent + summary: GET /api/v3/agent/microservices + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/microservices/{microserviceUuid}: + get: + tags: + - Agent + summary: GET /api/v3/agent/microservices/:microserviceUuid + security: + - authToken: [] + parameters: + - name: microserviceUuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/registries: + get: + tags: + - Agent + summary: GET /api/v3/agent/registries + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/tunnel: + get: + tags: + - Agent + summary: GET /api/v3/agent/tunnel + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/strace: + get: + tags: + - Agent + summary: GET /api/v3/agent/strace + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - Agent + summary: PUT /api/v3/agent/strace + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/updateAgentConfig' + /agent/version: + get: + tags: + - Agent + summary: GET /api/v3/agent/version + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/hal/hw: + put: + tags: + - Agent + summary: PUT /api/v3/agent/hal/hw + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/updateAgentConfig' + /agent/hal/usb: + put: + tags: + - Agent + summary: PUT /api/v3/agent/hal/usb + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/updateAgentConfig' + /agent/delete-node: + delete: + tags: + - Agent + summary: DELETE /api/v3/agent/delete-node + security: + - authToken: [] + parameters: [] + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /agent/image-snapshot: + get: + tags: + - Agent + summary: GET /api/v3/agent/image-snapshot + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - Agent + summary: PUT /api/v3/agent/image-snapshot + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/updateAgentConfig' + /agent/cert: + get: + tags: + - Agent + summary: GET /api/v3/agent/cert + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /application: + get: + tags: + - Application + summary: GET /api/v3/application + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Application + summary: POST /api/v3/application + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationCreate' + /application/system: + get: + tags: + - Application + summary: GET /api/v3/application/system + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /application/yaml: + post: + tags: + - Application + summary: POST /api/v3/application/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /application/{name}: + get: + tags: + - Application + summary: GET /api/v3/application/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Application + summary: PATCH /api/v3/application/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationPatch' + put: + tags: + - Application + summary: PUT /api/v3/application/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationUpdate' + delete: + tags: + - Application + summary: DELETE /api/v3/application/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /application/system/{name}: + get: + tags: + - Application + summary: GET /api/v3/application/system/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + delete: + tags: + - Application + summary: DELETE /api/v3/application/system/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /application/yaml/{name}: + put: + tags: + - Application + summary: PUT /api/v3/application/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationUpdate' + /applicationTemplates: + get: + tags: + - Application + summary: GET /api/v3/applicationTemplates + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /applicationTemplate: + post: + tags: + - Application + summary: POST /api/v3/applicationTemplate + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationCreate' + /applicationTemplate/yaml: + post: + tags: + - Application + summary: POST /api/v3/applicationTemplate/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /applicationTemplate/{name}: + get: + tags: + - Application + summary: GET /api/v3/applicationTemplate/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + applications: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + version: + type: string + microservices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Application + summary: PATCH /api/v3/applicationTemplate/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationPatch' + put: + tags: + - Application + summary: PUT /api/v3/applicationTemplate/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationUpdate' + delete: + tags: + - Application + summary: DELETE /api/v3/applicationTemplate/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /applicationTemplate/yaml/{name}: + put: + tags: + - Application + summary: PUT /api/v3/applicationTemplate/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/applicationUpdate' + /capabilities/edgeResources: + head: + tags: + - Controller + summary: HEAD /api/v3/capabilities/edgeResources + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /capabilities/applicationTemplates: + head: + tags: + - Controller + summary: HEAD /api/v3/capabilities/applicationTemplates + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /catalog/microservices: + get: + tags: + - Catalog + summary: GET /api/v3/catalog/microservices + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Catalog + summary: POST /api/v3/catalog/microservices + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + /catalog/microservices/{id}: + get: + tags: + - Catalog + summary: GET /api/v3/catalog/microservices/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Catalog + summary: PATCH /api/v3/catalog/microservices/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - Catalog + summary: DELETE /api/v3/catalog/microservices/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates/ca: + post: + tags: + - Certificates + summary: POST /api/v3/certificates/ca + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/certificateCreate' + get: + tags: + - Certificates + summary: GET /api/v3/certificates/ca + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates/ca/{name}: + get: + tags: + - Certificates + summary: GET /api/v3/certificates/ca/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + delete: + tags: + - Certificates + summary: DELETE /api/v3/certificates/ca/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates: + post: + tags: + - Certificates + summary: POST /api/v3/certificates + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/certificateCreate' + get: + tags: + - Certificates + summary: GET /api/v3/certificates + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates/expiring: + get: + tags: + - Certificates + summary: GET /api/v3/certificates/expiring + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates/{name}: + get: + tags: + - Certificates + summary: GET /api/v3/certificates/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + delete: + tags: + - Certificates + summary: DELETE /api/v3/certificates/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /certificates/{name}/renew: + post: + tags: + - Certificates + summary: POST /api/v3/certificates/:name/renew + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/certificateCreate' + /certificates/yaml: + post: + tags: + - Certificates + summary: POST /api/v3/certificates/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /config: + get: + tags: + - ConfigMap + summary: GET /api/v3/config + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - ConfigMap + summary: PUT /api/v3/config + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/configUpdate' + /config/{key}: + get: + tags: + - ConfigMap + summary: GET /api/v3/config/:key + security: + - authToken: [] + parameters: + - name: key + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /configmaps: + post: + tags: + - ConfigMap + summary: POST /api/v3/configmaps + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - ConfigMap + summary: GET /api/v3/configmaps + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /configmaps/yaml: + post: + tags: + - ConfigMap + summary: POST /api/v3/configmaps/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /configmaps/{name}: + patch: + tags: + - ConfigMap + summary: PATCH /api/v3/configmaps/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - ConfigMap + summary: GET /api/v3/configmaps/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + delete: + tags: + - ConfigMap + summary: DELETE /api/v3/configmaps/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /configmaps/yaml/{name}: + patch: + tags: + - ConfigMap + summary: PATCH /api/v3/configmaps/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /status: + get: + tags: + - Controller + summary: GET /api/v3/status + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /fog-types/: + get: + tags: + - Controller + summary: GET /api/v3/fog-types/ + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/image-snapshot: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/:uuid/image-snapshot + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + get: + tags: + - Microservices + summary: GET /api/v3/microservices/:uuid/image-snapshot + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/strace: + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/:uuid/strace + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - Microservices + summary: GET /api/v3/microservices/:uuid/strace + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - Microservices + summary: PUT /api/v3/microservices/:uuid/strace + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceUpdate' + /edgeResources: + get: + tags: + - Edge Resource + summary: GET /api/v3/edgeResources + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /edgeResource/{name}/{version}: + get: + tags: + - Edge Resource + summary: GET /api/v3/edgeResource/:name/:version + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + - name: version + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - Edge Resource + summary: PUT /api/v3/edgeResource/:name/:version + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + - name: version + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/edgeResourceUpdate' + delete: + tags: + - Edge Resource + summary: DELETE /api/v3/edgeResource/:name/:version + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + - name: version + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /edgeResource/{name}: + get: + tags: + - Edge Resource + summary: GET /api/v3/edgeResource/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /edgeResource: + post: + tags: + - Edge Resource + summary: POST /api/v3/edgeResource + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/edgeResourceCreate' + /edgeResource/{name}/{version}/link: + post: + tags: + - Edge Resource + summary: POST /api/v3/edgeResource/:name/:version/link + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + - name: version + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/edgeResourceCreate' + delete: + tags: + - Edge Resource + summary: DELETE /api/v3/edgeResource/:name/:version/link + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + - name: version + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /flow: + get: + tags: + - Application + summary: GET /api/v3/flow + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Application + summary: POST /api/v3/flow + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /flow/{id}: + get: + tags: + - Application + summary: GET /api/v3/flow/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Application + summary: PATCH /api/v3/flow/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - Application + summary: DELETE /api/v3/flow/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog-list: + get: + tags: + - ioFog + summary: GET /api/v3/iofog-list + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog: + post: + tags: + - ioFog + summary: POST /api/v3/iofog + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/iofogCreate' + /iofog/{uuid}: + patch: + tags: + - ioFog + summary: PATCH /api/v3/iofog/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - ioFog + summary: DELETE /api/v3/iofog/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + get: + tags: + - ioFog + summary: GET /api/v3/iofog/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog/{uuid}/provisioning-key: + get: + tags: + - ioFog + summary: GET /api/v3/iofog/:uuid/provisioning-key + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog/{uuid}/version/{versionCommand}: + post: + tags: + - ioFog + summary: POST /api/v3/iofog/:uuid/version/:versionCommand + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: versionCommand + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/iofogCreate' + /iofog/{uuid}/reboot: + post: + tags: + - ioFog + summary: POST /api/v3/iofog/:uuid/reboot + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/iofogCreate' + /iofog/{uuid}/hal/hw: + get: + tags: + - ioFog + summary: GET /api/v3/iofog/:uuid/hal/hw + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog/{uuid}/hal/usb: + get: + tags: + - ioFog + summary: GET /api/v3/iofog/:uuid/hal/usb + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /iofog/{uuid}/prune: + post: + tags: + - ioFog + summary: POST /api/v3/iofog/:uuid/prune + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/iofogCreate' + /microservices/: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/ + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/system + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices: + post: + tags: + - Microservices + summary: POST /api/v3/microservices + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + /microservices/yaml: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /microservices/{uuid}: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system/{uuid}: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/system/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/system/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /microservices/pub/{tag}: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/pub/:tag + security: + - authToken: [] + parameters: + - name: tag + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/sub/{tag}: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/sub/:tag + security: + - authToken: [] + parameters: + - name: tag + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/rebuild: + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/:uuid/rebuild + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /microservices/system/{uuid}/rebuild: + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/system/:uuid/rebuild + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /microservices/yaml/{uuid}: + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/yaml/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /microservices/system/yaml/{uuid}: + patch: + tags: + - Microservices + summary: PATCH /api/v3/microservices/system/yaml/:uuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /microservices/{uuid}/routes/{receiverUuid}: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/:uuid/routes/:receiverUuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: receiverUuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/:uuid/routes/:receiverUuid + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: receiverUuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/port-mapping: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/:uuid/port-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + get: + tags: + - Microservices + summary: GET /api/v3/microservices/:uuid/port-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system/{uuid}/port-mapping: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/system/:uuid/port-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + /microservices/{uuid}/port-mapping/{internalPort}: + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/:uuid/port-mapping/:internalPort + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: internalPort + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system/{uuid}/port-mapping/{internalPort}: + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/system/:uuid/port-mapping/:internalPort + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: internalPort + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/volume-mapping: + get: + tags: + - Microservices + summary: GET /api/v3/microservices/:uuid/volume-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Microservices + summary: POST /api/v3/microservices/:uuid/volume-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + /microservices/system/{uuid}/volume-mapping: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/system/:uuid/volume-mapping + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + /microservices/{uuid}/volume-mapping/{id}: + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/:uuid/volume-mapping/:id + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system/{uuid}/volume-mapping/{id}: + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/system/:uuid/volume-mapping/:id + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/{uuid}/exec: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/:uuid/exec + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/:uuid/exec + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /microservices/system/{uuid}/exec: + post: + tags: + - Microservices + summary: POST /api/v3/microservices/system/:uuid/exec + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/microserviceCreate' + delete: + tags: + - Microservices + summary: DELETE /api/v3/microservices/system/:uuid/exec + security: + - authToken: [] + parameters: + - name: uuid + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /registries: + post: + tags: + - Registries + summary: POST /api/v3/registries + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - Registries + summary: GET /api/v3/registries + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /registries/{id}: + delete: + tags: + - Registries + summary: DELETE /api/v3/registries/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Registries + summary: PATCH /api/v3/registries/:id + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /router: + get: + tags: + - Router + summary: GET /api/v3/router + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + put: + tags: + - Router + summary: PUT /api/v3/router + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /routes: + get: + tags: + - Routing + summary: GET /api/v3/routes + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Routing + summary: POST /api/v3/routes + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /routes/{appName}/{name}: + get: + tags: + - Routing + summary: GET /api/v3/routes/:appName/:name + security: + - authToken: [] + parameters: + - name: appName + in: path + required: true + schema: + type: string + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Routing + summary: PATCH /api/v3/routes/:appName/:name + security: + - authToken: [] + parameters: + - name: appName + in: path + required: true + schema: + type: string + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - Routing + summary: DELETE /api/v3/routes/:appName/:name + security: + - authToken: [] + parameters: + - name: appName + in: path + required: true + schema: + type: string + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /secrets: + post: + tags: + - Secrets + summary: POST /api/v3/secrets + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/secretCreate' + get: + tags: + - Secrets + summary: GET /api/v3/secrets + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /secrets/yaml: + post: + tags: + - Secrets + summary: POST /api/v3/secrets/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /secrets/{name}: + patch: + tags: + - Secrets + summary: PATCH /api/v3/secrets/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - Secrets + summary: GET /api/v3/secrets/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + delete: + tags: + - Secrets + summary: DELETE /api/v3/secrets/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /secrets/yaml/{name}: + patch: + tags: + - Secrets + summary: PATCH /api/v3/secrets/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /services: + get: + tags: + - Services + summary: GET /api/v3/services + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - Services + summary: POST /api/v3/services + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/serviceCreate' + /services/{name}: + get: + tags: + - Services + summary: GET /api/v3/services/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - Services + summary: PATCH /api/v3/services/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - Services + summary: DELETE /api/v3/services/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /services/yaml: + post: + tags: + - Services + summary: POST /api/v3/services/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /services/yaml/{name}: + patch: + tags: + - Services + summary: PATCH /api/v3/services/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /iofog/{id}/tunnel: + patch: + tags: + - ioFog + summary: PATCH /api/v3/iofog/:id/tunnel + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + get: + tags: + - ioFog + summary: GET /api/v3/iofog/:id/tunnel + security: + - authToken: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /user/login: + post: + tags: + - User + summary: POST /api/v3/user/login + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/login' + /user/refresh: + post: + tags: + - User + summary: POST /api/v3/user/refresh + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/login' + /user/profile: + get: + tags: + - User + summary: GET /api/v3/user/profile + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /user/logout: + post: + tags: + - User + summary: POST /api/v3/user/logout + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/login' + /volumeMounts: + get: + tags: + - VolumeMounts + summary: GET /api/v3/volumeMounts + security: + - authToken: [] + parameters: [] + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + post: + tags: + - VolumeMounts + summary: POST /api/v3/volumeMounts + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/volumeMountCreate' + /volumeMounts/{name}: + get: + tags: + - VolumeMounts + summary: GET /api/v3/volumeMounts/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + patch: + tags: + - VolumeMounts + summary: PATCH /api/v3/volumeMounts/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + delete: + tags: + - VolumeMounts + summary: DELETE /api/v3/volumeMounts/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + /volumeMounts/yaml: + post: + tags: + - VolumeMounts + summary: POST /api/v3/volumeMounts/yaml + security: + - authToken: [] + parameters: [] + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + properties: + application: + type: string + format: binary + /volumeMounts/yaml/{name}: + patch: + tags: + - VolumeMounts + summary: PATCH /api/v3/volumeMounts/yaml/:name + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + type: object + /volumeMounts/{name}/link: + post: + tags: + - VolumeMounts + summary: POST /api/v3/volumeMounts/:name/link + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '201': + description: Created + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + '400': + description: Bad Request + '401': + description: Not Authorized + '409': + description: Duplicate Name + '500': + description: Internal Server Error + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/volumeMountCreate' + delete: + tags: + - VolumeMounts + summary: DELETE /api/v3/volumeMounts/:name/link + security: + - authToken: [] + parameters: + - name: name + in: path + required: true + schema: + type: string + responses: + '204': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error diff --git a/generate-swagger.sh b/generate-swagger.sh index 77bc013b..3b458750 100755 --- a/generate-swagger.sh +++ b/generate-swagger.sh @@ -1 +1,2 @@ -docker run -v ./docs:/docs mitjaziv/swagger-codegen-cli generate -i /docs/swagger.yaml -l swagger -o /docs \ No newline at end of file +#!/bin/bash +node scripts/generate-swagger.js \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index f6878851..49756055 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "dependencies": { "@datasance/ecn-viewer": "0.5.4", "@kubernetes/client-node": "^0.22.3", + "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", "@opentelemetry/instrumentation-express": "^0.48.1", @@ -66,6 +67,7 @@ "umzug": "^3.7.0", "underscore": "1.13.6", "uuid": "11.1.0", + "ws": "^8.18.0", "xss-clean": "0.1.1" }, "bin": { @@ -77,11 +79,12 @@ "chai": "5.1.1", "chai-as-promised": "7.1.2", "chai-http": "4.4.0", - "eslint": "9.16.0", + "eslint": "9.28.0", "eslint-config-google": "0.14.0", + "js-yaml": "^4.1.0", "mocha": "10.6.0", "mocha-junit-reporter": "2.2.1", - "newman": "^6.2.0", + "newman": "^6.2.1", "newman-reporter-junitfull": "1.1.1", "nyc": "15.1.0", "sequelize-cli": "6.6.2", @@ -105,44 +108,44 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.26.2", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", - "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.25.9", + "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", - "picocolors": "^1.0.0" + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.24.4", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.4.tgz", - "integrity": "sha512-vg8Gih2MLK+kOkHJp4gBEIkyaIi00jgWot2D9QOmmfLC8jINSOzmCLta6Bvz/JSBCqnegV0L80jhxkol5GWNfQ==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.27.5.tgz", + "integrity": "sha512-KiRAp/VoJaWkkte84TvUd9qjdbZAdiqyvMxrGl1N6vzFogKmaLgoM3L1kgtLicp2HP5fBJS8JrZKLVIZGVJAVg==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.5.tgz", - "integrity": "sha512-tVQRucExLQ02Boi4vdPp49svNGcfL2GhdTCT9aldhXgCJVAI21EtRfBettiuLUwce/7r6bFdgs6JFkcdTiFttA==", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.27.4.tgz", + "integrity": "sha512-bXYxrXFubeYdvB0NhD/NBB3Qi6aZeV20GOWVI47t2dkecCEoneR4NPVcb7abpXDEvejgrUfFtG6vG/zxAKmg+g==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.24.2", - "@babel/generator": "^7.24.5", - "@babel/helper-compilation-targets": "^7.23.6", - "@babel/helper-module-transforms": "^7.24.5", - "@babel/helpers": "^7.24.5", - "@babel/parser": "^7.24.5", - "@babel/template": "^7.24.0", - "@babel/traverse": "^7.24.5", - "@babel/types": "^7.24.5", + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.27.3", + "@babel/helpers": "^7.27.4", + "@babel/parser": "^7.27.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.27.4", + "@babel/types": "^7.27.3", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -164,12 +167,12 @@ "dev": true }, "node_modules/@babel/core/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -181,9 +184,9 @@ } }, "node_modules/@babel/core/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/@babel/core/node_modules/semver": { @@ -196,29 +199,30 @@ } }, "node_modules/@babel/generator": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.5.tgz", - "integrity": "sha512-x32i4hEXvr+iI0NEoEfDKzlemF8AmtOP8CcrRaEcpzysWuoEb1KknpcvMsHKPONoKZiDuItklgWhB18xEhr9PA==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.5.tgz", + "integrity": "sha512-ZGhA37l0e/g2s1Cnzdix0O3aLYm66eF8aufiVteOgnwxgnRP8GoyMj7VWsgWnQbVKXyge7hqrFh2K2TQM6t1Hw==", "dev": true, "dependencies": { - "@babel/types": "^7.24.5", + "@babel/parser": "^7.27.5", + "@babel/types": "^7.27.3", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", - "jsesc": "^2.5.1" + "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", - "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.23.5", - "@babel/helper-validator-option": "^7.23.5", - "browserslist": "^4.22.2", + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -250,63 +254,28 @@ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", - "dev": true, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", - "dev": true, - "dependencies": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", - "dev": true, - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-module-imports": { - "version": "7.24.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz", - "integrity": "sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", "dev": true, "dependencies": { - "@babel/types": "^7.24.0" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.5.tgz", - "integrity": "sha512-9GxeY8c2d2mdQUP1Dye0ks3VDyIMS98kt/llQ2nUId8IsWqTF0l1LkSX0/uP7l7MCDrzXS009Hyhe2gzTiGW8A==", + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.27.3.tgz", + "integrity": "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg==", "dev": true, "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-module-imports": "^7.24.3", - "@babel/helper-simple-access": "^7.24.5", - "@babel/helper-split-export-declaration": "^7.24.5", - "@babel/helper-validator-identifier": "^7.24.5" + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.27.3" }, "engines": { "node": ">=6.9.0" @@ -315,77 +284,53 @@ "@babel/core": "^7.0.0" } }, - "node_modules/@babel/helper-simple-access": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.5.tgz", - "integrity": "sha512-uH3Hmf5q5n7n8mz7arjUlDOCbttY/DW4DYhE6FUsjKJ/oYC1kQQUvwEQWxRwUpX9qQKRXeqLwWxrqilMrf32sQ==", - "dev": true, - "dependencies": { - "@babel/types": "^7.24.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.5.tgz", - "integrity": "sha512-5CHncttXohrHk8GWOFCcCl4oRD9fKosWlIRgWm4ql9VYioKm52Mk2xsmoohvm7f3JoiLSM5ZgJuRaf5QZZYd3Q==", - "dev": true, - "dependencies": { - "@babel/types": "^7.24.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-string-parser": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", - "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.25.9", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", - "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", - "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.0.tgz", - "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.6.tgz", + "integrity": "sha512-muE8Tt8M22638HU31A3CgfSUciwz1fhATfoVai05aPXGor//CdWDCbnlY1yvBPo07njuVOCNGCSp/GTt12lIug==", "dev": true, "dependencies": { - "@babel/template": "^7.27.0", - "@babel/types": "^7.27.0" + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.0.tgz", - "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.5.tgz", + "integrity": "sha512-OsQd175SxWkGlzbny8J3K8TnnDD0N3lrIUtB92xwyRpzaenGZhxDvxN/JgU00U3CDZNj9tPuDJ5H0WS4Nt3vKg==", "dev": true, "dependencies": { - "@babel/types": "^7.27.0" + "@babel/types": "^7.27.3" }, "bin": { "parser": "bin/babel-parser.js" @@ -395,33 +340,30 @@ } }, "node_modules/@babel/template": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.0.tgz", - "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.26.2", - "@babel/parser": "^7.27.0", - "@babel/types": "^7.27.0" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.5.tgz", - "integrity": "sha512-7aaBLeDQ4zYcUFDUD41lJc1fG8+5IU9DaNSJAgal866FGvmD5EbWQgnEC6kO1gGLsX0esNkfnJSndbTXA3r7UA==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.24.2", - "@babel/generator": "^7.24.5", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.24.5", - "@babel/parser": "^7.24.5", - "@babel/types": "^7.24.5", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.4.tgz", + "integrity": "sha512-oNcu2QbHqts9BtOWJosOVJapWjBDSxGCpFvikNR5TGDYDQf3JwpIoMzIKrvfoti93cLfPJEG4tH9SPVeyCGgdA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.3", + "@babel/parser": "^7.27.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.3", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -430,12 +372,12 @@ } }, "node_modules/@babel/traverse/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -456,19 +398,19 @@ } }, "node_modules/@babel/traverse/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/@babel/types": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.0.tgz", - "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.6.tgz", + "integrity": "sha512-ETyHEk2VHHvl9b9jZP5IHPavHYk57EhanlRRuae9XCpb/j5bDCbPPMOBfCWhnl/7EDJz0jEMCi/RhccCE8r1+Q==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.25.9", - "@babel/helper-validator-identifier": "^7.25.9" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -490,16 +432,19 @@ "integrity": "sha512-Eu8BhBAhHyU6S3RdOPyiKpq3DhRUcEQQlU02BBWTdI5e6j5Iqv6Q72AFBw+AaE0NeO7PSNz8x7jQj77OX7jU5g==" }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", - "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz", + "integrity": "sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==", "dev": true, "dependencies": { - "eslint-visitor-keys": "^3.3.0" + "eslint-visitor-keys": "^3.4.3" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, + "funding": { + "url": "https://opencollective.com/eslint" + }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } @@ -526,12 +471,12 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.19.0.tgz", - "integrity": "sha512-zdHg2FPIFNKPdcHWtiNT+jEFCHYVplAXRDlQDyqy0zGx/q2parwh7brGJSiTxRk/TSMkbM//zt/f5CHgyTyaSQ==", + "version": "0.20.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.20.1.tgz", + "integrity": "sha512-OL0RJzC/CBzli0DrrR31qzj6d6i6Mm3HByuhflhl4LOBiWxN+3i6/t/ZQQNii4tjksXi8r2CRW1wMpWA2ULUEw==", "dev": true, "dependencies": { - "@eslint/object-schema": "^2.1.4", + "@eslint/object-schema": "^2.1.6", "debug": "^4.3.1", "minimatch": "^3.1.2" }, @@ -540,9 +485,9 @@ } }, "node_modules/@eslint/config-array/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -550,9 +495,9 @@ } }, "node_modules/@eslint/config-array/node_modules/debug": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", - "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { "ms": "^2.1.3" @@ -584,19 +529,31 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, + "node_modules/@eslint/config-helpers": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.3.tgz", + "integrity": "sha512-u180qk2Um1le4yf0ruXH3PYFeEZeYC3p/4wCTKrr2U1CmGdzGi3KtY0nuPDH48UJxlKCC5RDzbcbh4X0XlqgHg==", + "dev": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@eslint/core": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.9.0.tgz", - "integrity": "sha512-7ATR9F0e4W85D/0w7cU0SNj7qkAexMG+bAHEZOjo9akvGuhHE2m7umzWzfnpa0XAg5Kxc1BWmtPMV67jJ+9VUg==", + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.14.0.tgz", + "integrity": "sha512-qIbV0/JZr7iSDjqAc60IqbLdsj9GDt16xQtWD+B78d/HAlvysGdZZ6rpJHGAc2T0FQx1X6thsSPdnoiGKdNtdg==", "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/eslintrc": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.2.0.tgz", - "integrity": "sha512-grOjVNN8P3hjJn/eIETF1wwd12DdnwFDoyceUJLYYdkpbwq3nLi+4fqrTAONx7XDALqlL220wC/RHSC/QTI/0w==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", "dev": true, "dependencies": { "ajv": "^6.12.4", @@ -617,9 +574,9 @@ } }, "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -627,9 +584,9 @@ } }, "node_modules/@eslint/eslintrc/node_modules/debug": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz", - "integrity": "sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { "ms": "^2.1.3" @@ -662,39 +619,56 @@ "dev": true }, "node_modules/@eslint/js": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.16.0.tgz", - "integrity": "sha512-tw2HxzQkrbeuvyj1tG2Yqq+0H9wGoI2IMk4EOsQeX+vmd75FtJAzf+gTA69WF+baUKRYQ3x2kbLE08js5OsTVg==", + "version": "9.28.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.28.0.tgz", + "integrity": "sha512-fnqSjGWd/CoIp4EXIxWVK/sHA6DOHN4+8Ix2cX5ycOY7LG0UY8nHCU5pIp2eaE1Mc7Qd8kHspYNzYXT2ojPLzg==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" } }, "node_modules/@eslint/object-schema": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.4.tgz", - "integrity": "sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==", + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.6.tgz", + "integrity": "sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, "node_modules/@eslint/plugin-kit": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.3.tgz", - "integrity": "sha512-2b/g5hRmpbb1o4GnTZax9N9m0FXzz9OV42ZzI4rDDMDuHUqigAiQCEWChBWCY4ztAGVRjoWT19v0yMmc5/L5kA==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.2.tgz", + "integrity": "sha512-4SaFZCNfJqvk/kenHpI8xvN42DMaoycy4PzKc5otHxRswww1kAt82OlBuwRVLofCACCTZEcla2Ydxv8scMXaTg==", "dev": true, "dependencies": { + "@eslint/core": "^0.15.0", "levn": "^0.4.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@eslint/plugin-kit/node_modules/@eslint/core": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.0.tgz", + "integrity": "sha512-b7ePw78tEWWkpgZCDYkbqDOP8dmM6qe+AOC6iuJqlq1R/0ahMAeH3qynpnqKFGkMltrp44ohV4ubGyvLX28tzw==", + "dev": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, "node_modules/@faker-js/faker": { "version": "5.5.3", "resolved": "https://registry.npmjs.org/@faker-js/faker/-/faker-5.5.3.tgz", "integrity": "sha512-R11tGE6yIFwqpaIqcfkcg7AICXzFg14+5h5v0TfF/9+RMDL6jhzCy/pxHVOfbALGdtVYdt6JdR21tuxEgl34dw==", + "deprecated": "Please update to a newer version.", "dev": true }, "node_modules/@gar/promisify": { @@ -704,9 +678,9 @@ "optional": true }, "node_modules/@grpc/grpc-js": { - "version": "1.13.3", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.3.tgz", - "integrity": "sha512-FTXHdOoPbZrBjlVLHuKbDZnsTxXv2BlHF57xw6LuThXacXvtkahEPED0CKMk6obZDf65Hv4k3z62eyPNpvinIg==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.4.tgz", + "integrity": "sha512-GsFaMXCkMqkKIvwCQjCrwH+GHbPKBjhwo/8ZuUkWHqbI73Kky9I+pQltrlT0+MWpedCoosda53lgjYfyEPgxBg==", "dependencies": { "@grpc/proto-loader": "^0.7.13", "@js-sdsl/ordered-map": "^4.4.2" @@ -716,9 +690,9 @@ } }, "node_modules/@grpc/proto-loader": { - "version": "0.7.14", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.14.tgz", - "integrity": "sha512-oS0FyK8eGNBJC6aB/qsS4LOxCYQlBniNzp6W8IdjlRVRGs0FOK9dS84OV+kXGaZf8Ozeos8fbUMJUGGzSpOCzQ==", + "version": "0.7.15", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.15.tgz", + "integrity": "sha512-tMXdRCfYVixjuFK+Hk0Q1s38gV9zDiDJfWL3h1rv4Qc39oILCu1TRTDt7+fGUI8K4G1Fj125Hx/ru3azECWTyQ==", "dependencies": { "lodash.camelcase": "^4.3.0", "long": "^5.0.0", @@ -732,60 +706,6 @@ "node": ">=6" } }, - "node_modules/@grpc/proto-loader/node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@grpc/proto-loader/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/@grpc/proto-loader/node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@grpc/proto-loader/node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "engines": { - "node": ">=12" - } - }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -835,9 +755,9 @@ } }, "node_modules/@humanwhocodes/retry": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.1.tgz", - "integrity": "sha512-c7hNEllBlenFTHBky65mhq8WD2kbN9Q6gk0bTk8lSBvc554jpXSkST1iePudpt7+A/AQvuHs9EMqjHDXMY1lrA==", + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", "dev": true, "engines": { "node": ">=18.18" @@ -865,9 +785,9 @@ } }, "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", "dev": true, "engines": { "node": ">=12" @@ -876,6 +796,18 @@ "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/@isaacs/cliui/node_modules/emoji-regex": { "version": "9.2.2", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", @@ -914,6 +846,23 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/@isaacs/fs-minipass": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", @@ -1048,9 +997,9 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", - "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", "dev": true, "dependencies": { "@jridgewell/set-array": "^1.2.1", @@ -1080,9 +1029,9 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", "dev": true }, "node_modules/@jridgewell/trace-mapping": { @@ -1147,66 +1096,91 @@ } }, "node_modules/@kubernetes/client-node/@cypress/request@3.0.8": {}, - "node_modules/@kubernetes/client-node/node_modules/chownr": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", - "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "node_modules/@msgpack/msgpack": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@msgpack/msgpack/-/msgpack-3.1.2.tgz", + "integrity": "sha512-JEW4DEtBzfe8HvUYecLU9e6+XJnKDlUAIve8FvPzF3Kzs6Xo/KuZkZJsDH0wJXl/qEZbeeE7edxDNY3kMs39hQ==", "engines": { - "node": ">=18" + "node": ">= 18" } }, - "node_modules/@kubernetes/client-node/node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", "engines": { - "node": ">=16 || 14 >=14.17" + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" } }, - "node_modules/@kubernetes/client-node/node_modules/minizlib": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", - "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", "dependencies": { - "minipass": "^7.1.2" + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" }, "engines": { - "node": ">= 18" + "node": ">= 8" } }, - "node_modules/@kubernetes/client-node/node_modules/tar": { - "version": "7.4.3", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", - "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", "dependencies": { - "@isaacs/fs-minipass": "^4.0.0", - "chownr": "^3.0.0", - "minipass": "^7.1.2", - "minizlib": "^3.0.1", - "mkdirp": "^3.0.1", - "yallist": "^5.0.0" + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" }, "engines": { - "node": ">=18" + "node": ">= 8" } }, - "node_modules/@kubernetes/client-node/node_modules/yallist": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", - "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", - "engines": { - "node": ">=18" + "node_modules/@npmcli/fs": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", + "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", + "optional": true, + "dependencies": { + "@gar/promisify": "^1.0.1", + "semver": "^7.3.5" } }, - "node_modules/@noble/hashes": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", - "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "node_modules/@npmcli/move-file": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", + "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "optional": true, + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, "engines": { - "node": "^14.21.3 || >=16" + "node": ">=10" + } + }, + "node_modules/@npmcli/move-file/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "optional": true, + "bin": { + "mkdirp": "bin/cmd.js" }, - "funding": { - "url": "https://paulmillr.com/funding/" + "engines": { + "node": ">=10" } }, "node_modules/@one-ini/wasm": { @@ -1223,39 +1197,42 @@ "node": ">=8.0.0" } }, - "node_modules/@opentelemetry/context-async-hooks": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-2.0.0.tgz", - "integrity": "sha512-IEkJGzK1A9v3/EHjXh3s2IiFc6L4jfK+lNgKVgUjeUJQRRhnVFMIO3TAvKwonm9O1HebCuoOt98v8bZW7oVQHA==", - "engines": { - "node": "^18.19.0 || >=20.6.0" + "node_modules/@opentelemetry/api-logs": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", + "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "dependencies": { + "@opentelemetry/api": "^1.3.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-2.0.0.tgz", + "integrity": "sha512-IEkJGzK1A9v3/EHjXh3s2IiFc6L4jfK+lNgKVgUjeUJQRRhnVFMIO3TAvKwonm9O1HebCuoOt98v8bZW7oVQHA==", + "engines": { + "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, "node_modules/@opentelemetry/core": { - "version": "1.30.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.30.1.tgz", - "integrity": "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", + "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", "dependencies": { - "@opentelemetry/semantic-conventions": "1.28.0" + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { - "node": ">=14" + "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/core/node_modules/@opentelemetry/semantic-conventions": { - "version": "1.28.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz", - "integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==", - "engines": { - "node": ">=14" - } - }, "node_modules/@opentelemetry/exporter-logs-otlp-grpc": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-grpc/-/exporter-logs-otlp-grpc-0.200.0.tgz", @@ -1275,20 +1252,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-logs-otlp-grpc/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-logs-otlp-http": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-http/-/exporter-logs-otlp-http-0.200.0.tgz", @@ -1307,31 +1270,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-logs-otlp-http/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/exporter-logs-otlp-http/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-logs-otlp-proto": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-proto/-/exporter-logs-otlp-proto-0.200.0.tgz", @@ -1352,31 +1290,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-logs-otlp-proto/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1413,21 +1326,22 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/core": { + "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/resources": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", "dependencies": { + "@opentelemetry/core": "2.0.0", "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/exporter-metrics-otlp-http": { + "node_modules/@opentelemetry/exporter-metrics-otlp-http": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", @@ -1445,7 +1359,7 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/resources": { + "node_modules/@opentelemetry/exporter-metrics-otlp-http/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", @@ -1460,21 +1374,6 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-grpc/node_modules/@opentelemetry/sdk-metrics": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", - "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.9.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-metrics-otlp-proto": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-proto/-/exporter-metrics-otlp-proto-0.200.0.tgz", @@ -1494,38 +1393,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/exporter-metrics-otlp-http": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", - "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/otlp-exporter-base": "0.200.0", - "@opentelemetry/otlp-transformer": "0.200.0", - "@opentelemetry/resources": "2.0.0", - "@opentelemetry/sdk-metrics": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1541,21 +1408,6 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/exporter-metrics-otlp-proto/node_modules/@opentelemetry/sdk-metrics": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", - "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.9.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-prometheus": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-prometheus/-/exporter-prometheus-0.200.0.tgz", @@ -1572,20 +1424,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1601,21 +1439,6 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/exporter-prometheus/node_modules/@opentelemetry/sdk-metrics": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", - "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.9.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-trace-otlp-grpc": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-grpc/-/exporter-trace-otlp-grpc-0.200.0.tgz", @@ -1636,20 +1459,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-trace-otlp-grpc/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-trace-otlp-grpc/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1683,20 +1492,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-trace-otlp-http/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-trace-otlp-http/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1730,20 +1525,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/exporter-trace-otlp-proto/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/exporter-trace-otlp-proto/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -1776,33 +1557,37 @@ "@opentelemetry/api": "^1.0.0" } }, - "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/core": { + "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/resources": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", "dependencies": { + "@opentelemetry/core": "2.0.0", "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/exporter-zipkin/node_modules/@opentelemetry/resources": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", - "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", + "node_modules/@opentelemetry/instrumentation": { + "version": "0.200.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", + "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/semantic-conventions": "^1.29.0" + "@opentelemetry/api-logs": "0.200.0", + "@types/shimmer": "^1.2.0", + "import-in-the-middle": "^1.8.1", + "require-in-the-middle": "^7.1.1", + "shimmer": "^1.2.1" }, "engines": { "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { - "@opentelemetry/api": ">=1.3.0 <1.10.0" + "@opentelemetry/api": "^1.3.0" } }, "node_modules/@opentelemetry/instrumentation-express": { @@ -1821,75 +1606,10 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/api-logs": { + "node_modules/@opentelemetry/instrumentation-http": { "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/instrumentation-express/node_modules/@opentelemetry/instrumentation": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", - "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", - "dependencies": { - "@opentelemetry/api-logs": "0.200.0", - "@types/shimmer": "^1.2.0", - "import-in-the-middle": "^1.8.1", - "require-in-the-middle": "^7.1.1", - "shimmer": "^1.2.1" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/instrumentation-express/node_modules/acorn": { - "version": "8.14.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", - "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/@opentelemetry/instrumentation-express/node_modules/import-in-the-middle": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", - "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", - "dependencies": { - "acorn": "^8.14.0", - "acorn-import-attributes": "^1.9.5", - "cjs-module-lexer": "^1.2.2", - "module-details-from-path": "^1.0.3" - } - }, - "node_modules/@opentelemetry/instrumentation-http": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.200.0.tgz", - "integrity": "sha512-9tqGbCJikhYU68y3k9mi6yWsMyMeCcwoQuHvIXan5VvvPPQ5WIZaV6Mxu/MCVe4swRNoFs8Th+qyj0TZV5ELvw==", + "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.200.0.tgz", + "integrity": "sha512-9tqGbCJikhYU68y3k9mi6yWsMyMeCcwoQuHvIXan5VvvPPQ5WIZaV6Mxu/MCVe4swRNoFs8Th+qyj0TZV5ELvw==", "dependencies": { "@opentelemetry/core": "2.0.0", "@opentelemetry/instrumentation": "0.200.0", @@ -1903,71 +1623,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/@opentelemetry/instrumentation": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", - "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", - "dependencies": { - "@opentelemetry/api-logs": "0.200.0", - "@types/shimmer": "^1.2.0", - "import-in-the-middle": "^1.8.1", - "require-in-the-middle": "^7.1.1", - "shimmer": "^1.2.1" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/acorn": { - "version": "8.14.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", - "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/import-in-the-middle": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", - "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", - "dependencies": { - "acorn": "^8.14.0", - "acorn-import-attributes": "^1.9.5", - "cjs-module-lexer": "^1.2.2", - "module-details-from-path": "^1.0.3" - } - }, "node_modules/@opentelemetry/otlp-exporter-base": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.200.0.tgz", @@ -1983,20 +1638,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/otlp-exporter-base/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/otlp-grpc-exporter-base": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-grpc-exporter-base/-/otlp-grpc-exporter-base-0.200.0.tgz", @@ -2014,20 +1655,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/otlp-grpc-exporter-base/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/otlp-transformer": { "version": "0.200.0", "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.200.0.tgz", @@ -2048,31 +1675,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -2088,21 +1690,6 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/otlp-transformer/node_modules/@opentelemetry/sdk-metrics": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", - "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.9.0 <1.10.0" - } - }, "node_modules/@opentelemetry/propagator-b3": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-2.0.0.tgz", @@ -2117,20 +1704,6 @@ "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/propagator-b3/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/propagator-jaeger": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-2.0.0.tgz", @@ -2145,26 +1718,26 @@ "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/propagator-jaeger/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "node_modules/@opentelemetry/resources": { + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.30.1.tgz", + "integrity": "sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==", "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" + "@opentelemetry/core": "1.30.1", + "@opentelemetry/semantic-conventions": "1.28.0" }, "engines": { - "node": "^18.19.0 || >=20.6.0" + "node": ">=14" }, "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/resources": { + "node_modules/@opentelemetry/resources/node_modules/@opentelemetry/core": { "version": "1.30.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.30.1.tgz", - "integrity": "sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.30.1.tgz", + "integrity": "sha512-OOCM2C/QIURhJMuKaekP3TRBxBKxG/TWWA0TL2J6nXUtDnuCtccy49LUJF8xPFXMX+0LMcxFpCo8M9cGY1W6rQ==", "dependencies": { - "@opentelemetry/core": "1.30.1", "@opentelemetry/semantic-conventions": "1.28.0" }, "engines": { @@ -2198,32 +1771,37 @@ "@opentelemetry/api": ">=1.4.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", + "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/resources": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", "dependencies": { - "@opentelemetry/api": "^1.3.0" + "@opentelemetry/core": "2.0.0", + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { - "node": ">=8.0.0" + "node": "^18.19.0 || >=20.6.0" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/core": { + "node_modules/@opentelemetry/sdk-metrics": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", + "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" + "@opentelemetry/core": "2.0.0", + "@opentelemetry/resources": "2.0.0" }, "engines": { "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" + "@opentelemetry/api": ">=1.9.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-logs/node_modules/@opentelemetry/resources": { + "node_modules/@opentelemetry/sdk-metrics/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", @@ -2273,127 +1851,29 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/api-logs": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.200.0.tgz", - "integrity": "sha512-IKJBQxh91qJ+3ssRly5hYEJ8NDHu9oY/B1PXVSCWf7zytmYO9RNLB0Ox9XQ/fJ8m6gY6Q6NtBWlmXfaXt5Uc4Q==", - "dependencies": { - "@opentelemetry/api": "^1.3.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/core": { + "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/resources": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", + "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", "dependencies": { + "@opentelemetry/core": "2.0.0", "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { "node": "^18.19.0 || >=20.6.0" }, "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" + "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/exporter-metrics-otlp-http": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.200.0.tgz", - "integrity": "sha512-5BiR6i8yHc9+qW7F6LqkuUnIzVNA7lt0qRxIKcKT+gq3eGUPHZ3DY29sfxI3tkvnwMgtnHDMNze5DdxW39HsAw==", + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.0.0.tgz", + "integrity": "sha512-qQnYdX+ZCkonM7tA5iU4fSRsVxbFGml8jbxOgipRGMFHKaXKHQ30js03rTobYjKjIfnOsZSbHKWF0/0v0OQGfw==", "dependencies": { "@opentelemetry/core": "2.0.0", - "@opentelemetry/otlp-exporter-base": "0.200.0", - "@opentelemetry/otlp-transformer": "0.200.0", "@opentelemetry/resources": "2.0.0", - "@opentelemetry/sdk-metrics": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/instrumentation": { - "version": "0.200.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.200.0.tgz", - "integrity": "sha512-pmPlzfJd+vvgaZd/reMsC8RWgTXn2WY1OWT5RT42m3aOn5532TozwXNDhg1vzqJ+jnvmkREcdLr27ebJEQt0Jg==", - "dependencies": { - "@opentelemetry/api-logs": "0.200.0", - "@types/shimmer": "^1.2.0", - "import-in-the-middle": "^1.8.1", - "require-in-the-middle": "^7.1.1", - "shimmer": "^1.2.1" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/resources": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", - "integrity": "sha512-rnZr6dML2z4IARI4zPGQV4arDikF/9OXZQzrC01dLmn0CZxU5U5OLd/m1T7YkGRj5UitjeoCtg/zorlgMQcdTg==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.3.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/@opentelemetry/sdk-metrics": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-2.0.0.tgz", - "integrity": "sha512-Bvy8QDjO05umd0+j+gDeWcTaVa1/R2lDj/eOvjzpm8VQj1K1vVZJuyjThpV5/lSHyYW2JaHF2IQ7Z8twJFAhjA==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.9.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/acorn": { - "version": "8.14.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", - "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/@opentelemetry/sdk-node/node_modules/import-in-the-middle": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.13.1.tgz", - "integrity": "sha512-k2V9wNm9B+ysuelDTHjI9d5KPc4l8zAZTGqj+pcynvWkypZd857ryzN8jNC7Pg2YZXNMJcHRPpaDyCBbNyVRpA==", - "dependencies": { - "acorn": "^8.14.0", - "acorn-import-attributes": "^1.9.5", - "cjs-module-lexer": "^1.2.2", - "module-details-from-path": "^1.0.3" - } - }, - "node_modules/@opentelemetry/sdk-trace-base": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-2.0.0.tgz", - "integrity": "sha512-qQnYdX+ZCkonM7tA5iU4fSRsVxbFGml8jbxOgipRGMFHKaXKHQ30js03rTobYjKjIfnOsZSbHKWF0/0v0OQGfw==", - "dependencies": { - "@opentelemetry/core": "2.0.0", - "@opentelemetry/resources": "2.0.0", - "@opentelemetry/semantic-conventions": "^1.29.0" + "@opentelemetry/semantic-conventions": "^1.29.0" }, "engines": { "node": "^18.19.0 || >=20.6.0" @@ -2402,20 +1882,6 @@ "@opentelemetry/api": ">=1.3.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/sdk-trace-base/node_modules/@opentelemetry/resources": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-2.0.0.tgz", @@ -2447,24 +1913,10 @@ "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-trace-node/node_modules/@opentelemetry/core": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-2.0.0.tgz", - "integrity": "sha512-SLX36allrcnVaPYG3R78F/UZZsBsvbc7lMCLx37LyH5MJ1KAAZ2E3mW9OAD3zGz0G8q/BtoS5VUrjzDydhD6LQ==", - "dependencies": { - "@opentelemetry/semantic-conventions": "^1.29.0" - }, - "engines": { - "node": "^18.19.0 || >=20.6.0" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, "node_modules/@opentelemetry/semantic-conventions": { - "version": "1.32.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.32.0.tgz", - "integrity": "sha512-s0OpmpQFSfMrmedAn9Lhg4KWJELHCU6uU9dtIJ28N8UGhf9Y55im5X8fEzwhwDwiSqN+ZPSNrDJF7ivf/AuRPQ==", + "version": "1.34.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.34.0.tgz", + "integrity": "sha512-aKcOkyrorBGlajjRdVoJWHTxfxO1vCNHLJVlSDaRHDIdjU+pX8IYQPvPDkYiujKLbRnWU+1TBwEt0QRgSm4SGA==", "engines": { "node": ">=14" } @@ -2583,16 +2035,18 @@ "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" }, "node_modules/@rushstack/node-core-library": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/@rushstack/node-core-library/-/node-core-library-4.3.0.tgz", - "integrity": "sha512-JuNZ7lwaYQ4R1TugpryyWBn4lIxK+L7fF+muibFp0by5WklG22nsvH868fuBoZMLo5FqAs6WFOifNos4PJjWSA==", - "dependencies": { - "fs-extra": "~7.0.1", + "version": "5.13.0", + "resolved": "https://registry.npmjs.org/@rushstack/node-core-library/-/node-core-library-5.13.0.tgz", + "integrity": "sha512-IGVhy+JgUacAdCGXKUrRhwHMTzqhWwZUI+qEPcdzsb80heOw0QPbhhoVsoiMF7Klp8eYsp7hzpScMXmOa3Uhfg==", + "dependencies": { + "ajv": "~8.13.0", + "ajv-draft-04": "~1.0.0", + "ajv-formats": "~3.0.1", + "fs-extra": "~11.3.0", "import-lazy": "~4.0.0", "jju": "~1.4.0", "resolve": "~1.22.1", - "semver": "~7.5.4", - "z-schema": "~5.0.2" + "semver": "~7.5.4" }, "peerDependencies": { "@types/node": "*" @@ -2603,26 +2057,51 @@ } } }, + "node_modules/@rushstack/node-core-library/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@rushstack/node-core-library/node_modules/ajv-draft-04": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz", + "integrity": "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==", + "peerDependencies": { + "ajv": "^8.5.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, "node_modules/@rushstack/node-core-library/node_modules/fs-extra": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz", - "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==", + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.0.tgz", + "integrity": "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew==", "dependencies": { - "graceful-fs": "^4.1.2", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, "engines": { - "node": ">=6 <7 || >=8" + "node": ">=14.14" } }, - "node_modules/@rushstack/node-core-library/node_modules/jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==", - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } + "node_modules/@rushstack/node-core-library/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" }, "node_modules/@rushstack/node-core-library/node_modules/lru-cache": { "version": "6.0.0", @@ -2650,19 +2129,19 @@ } }, "node_modules/@rushstack/node-core-library/node_modules/universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", "engines": { - "node": ">= 4.0.0" + "node": ">= 10.0.0" } }, "node_modules/@rushstack/terminal": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@rushstack/terminal/-/terminal-0.11.0.tgz", - "integrity": "sha512-LKz7pv0G9Py5uULahNSixK1pTqIIKd103pAGhDW51YfzPojvmO5wfITe0PEUNAJZjuufN/KgeRW83dJo1gL2rQ==", + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/@rushstack/terminal/-/terminal-0.15.2.tgz", + "integrity": "sha512-7Hmc0ysK5077R/IkLS9hYu0QuNafm+TbZbtYVzCMbeOdMjaRboLKrhryjwZSRJGJzu+TV1ON7qZHeqf58XfLpA==", "dependencies": { - "@rushstack/node-core-library": "4.3.0", + "@rushstack/node-core-library": "5.13.0", "supports-color": "~8.1.1" }, "peerDependencies": { @@ -2689,11 +2168,11 @@ } }, "node_modules/@rushstack/ts-command-line": { - "version": "4.21.0", - "resolved": "https://registry.npmjs.org/@rushstack/ts-command-line/-/ts-command-line-4.21.0.tgz", - "integrity": "sha512-z38FLUCn8M9FQf19gJ9eltdwkvc47PxvJmVZS6aKwbBAa3Pis3r3A+ZcBCVPNb9h/Tbga+i0tHdzoSGUoji9GQ==", + "version": "4.23.7", + "resolved": "https://registry.npmjs.org/@rushstack/ts-command-line/-/ts-command-line-4.23.7.tgz", + "integrity": "sha512-Gr9cB7DGe6uz5vq2wdr89WbVDKz0UeuFEn5H2CfWDe7JvjFFaiV15gi6mqDBTbHhHCWS7w8mF1h3BnIfUndqdA==", "dependencies": { - "@rushstack/terminal": "0.11.0", + "@rushstack/terminal": "0.15.2", "@types/argparse": "1.0.38", "argparse": "~1.0.9", "string-argv": "~0.3.1" @@ -2713,41 +2192,41 @@ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" }, "node_modules/@sentry-internal/tracing": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry-internal/tracing/-/tracing-7.115.0.tgz", - "integrity": "sha512-n1w3eJadvzkL4HebjtJGHre8Z9WbYpPw5GxSNI8ZFM+OTnhzzCAEcNL2C4tr7ssD2Lkao4+N3KaigJi54geOmg==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry-internal/tracing/-/tracing-7.120.3.tgz", + "integrity": "sha512-Ausx+Jw1pAMbIBHStoQ6ZqDZR60PsCByvHdw/jdH9AqPrNE9xlBSf9EwcycvmrzwyKspSLaB52grlje2cRIUMg==", "dev": true, "dependencies": { - "@sentry/core": "7.115.0", - "@sentry/types": "7.115.0", - "@sentry/utils": "7.115.0" + "@sentry/core": "7.120.3", + "@sentry/types": "7.120.3", + "@sentry/utils": "7.120.3" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/core": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry/core/-/core-7.115.0.tgz", - "integrity": "sha512-LSacE6rY/pJY4esXdLex5qVjo82DX6sQuvDLcEcR00bvRWGWMxSi2SipeW4RLbKmYyi0Ub+T+tUJxIOViyqyXw==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry/core/-/core-7.120.3.tgz", + "integrity": "sha512-vyy11fCGpkGK3qI5DSXOjgIboBZTriw0YDx/0KyX5CjIjDDNgp5AGgpgFkfZyiYiaU2Ww3iFuKo4wHmBusz1uA==", "dev": true, "dependencies": { - "@sentry/types": "7.115.0", - "@sentry/utils": "7.115.0" + "@sentry/types": "7.120.3", + "@sentry/utils": "7.120.3" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/integrations": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry/integrations/-/integrations-7.115.0.tgz", - "integrity": "sha512-0a75FIfG2mLPTmQ2QYFYoh3yvHqGT+D4SBAcsWVZEG24lNCiofSHnjffzIOXZX+2Spi1nY+cxIt9ItSyS2Z8VQ==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry/integrations/-/integrations-7.120.3.tgz", + "integrity": "sha512-6i/lYp0BubHPDTg91/uxHvNui427df9r17SsIEXa2eKDwQ9gW2qRx5IWgvnxs2GV/GfSbwcx4swUB3RfEWrXrQ==", "dev": true, "dependencies": { - "@sentry/core": "7.115.0", - "@sentry/types": "7.115.0", - "@sentry/utils": "7.115.0", + "@sentry/core": "7.120.3", + "@sentry/types": "7.120.3", + "@sentry/utils": "7.120.3", "localforage": "^1.8.1" }, "engines": { @@ -2755,37 +2234,37 @@ } }, "node_modules/@sentry/node": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry/node/-/node-7.115.0.tgz", - "integrity": "sha512-Y8kiwHqiICLkraSTsm7O/MWkfakRXOjhwpv4f3f+5CmPIigW0YCMTQZ3sSX+NhnvDhdkmakWy3tH9CX8+T2Ykg==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry/node/-/node-7.120.3.tgz", + "integrity": "sha512-t+QtekZedEfiZjbkRAk1QWJPnJlFBH/ti96tQhEq7wmlk3VszDXraZvLWZA0P2vXyglKzbWRGkT31aD3/kX+5Q==", "dev": true, "dependencies": { - "@sentry-internal/tracing": "7.115.0", - "@sentry/core": "7.115.0", - "@sentry/integrations": "7.115.0", - "@sentry/types": "7.115.0", - "@sentry/utils": "7.115.0" + "@sentry-internal/tracing": "7.120.3", + "@sentry/core": "7.120.3", + "@sentry/integrations": "7.120.3", + "@sentry/types": "7.120.3", + "@sentry/utils": "7.120.3" }, "engines": { "node": ">=8" } }, "node_modules/@sentry/types": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry/types/-/types-7.115.0.tgz", - "integrity": "sha512-KbhDS0DX+lk9VFCCR4AwPdiU9KUAH+vI+5HBLlgCNMY7KRGxRLnpXi3VyGi80iRdt2gi8sg2ncsVhc+SunBx7w==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry/types/-/types-7.120.3.tgz", + "integrity": "sha512-C4z+3kGWNFJ303FC+FxAd4KkHvxpNFYAFN8iMIgBwJdpIl25KZ8Q/VdGn0MLLUEHNLvjob0+wvwlcRBBNLXOow==", "dev": true, "engines": { "node": ">=8" } }, "node_modules/@sentry/utils": { - "version": "7.115.0", - "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-7.115.0.tgz", - "integrity": "sha512-MhrpHOMPwsjlXE3vnfFFyexneozPluaMCgL7MzH2iL0m7FxXG8A9CEe7W9sVG8hh1kw8ksYz1ryb2Mx2L+UTJA==", + "version": "7.120.3", + "resolved": "https://registry.npmjs.org/@sentry/utils/-/utils-7.120.3.tgz", + "integrity": "sha512-UDAOQJtJDxZHQ5Nm1olycBIsz2wdGX8SdzyGVHmD8EOQYAeDZQyIlQYohDe9nazdIOQLZCIc3fU0G9gqVLkaGQ==", "dev": true, "dependencies": { - "@sentry/types": "7.115.0" + "@sentry/types": "7.120.3" }, "engines": { "node": ">=8" @@ -2801,38 +2280,38 @@ } }, "node_modules/@sinonjs/fake-timers": { - "version": "11.2.2", - "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz", - "integrity": "sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw==", + "version": "11.3.1", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-11.3.1.tgz", + "integrity": "sha512-EVJO7nW5M/F5Tur0Rf2z/QoMo+1Ia963RiMtapiQrEWvY0iBUvADo8Beegwjpnle5BHkyHuoxSTW3jF43H1XRA==", "dev": true, "dependencies": { - "@sinonjs/commons": "^3.0.0" + "@sinonjs/commons": "^3.0.1" } }, "node_modules/@sinonjs/samsam": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.0.tgz", - "integrity": "sha512-Bp8KUVlLp8ibJZrnvq2foVhP0IVX2CIprMJPK0vqGqgrDa0OHVKeZyBykqskkrdxV6yKBPmGasO8LVjAKR3Gew==", + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@sinonjs/samsam/-/samsam-8.0.2.tgz", + "integrity": "sha512-v46t/fwnhejRSFTGqbpn9u+LQ9xJDse10gNnPgAcxgdoCDMXj/G2asWAC/8Qs+BAZDicX+MNZouXT1A7c83kVw==", "dev": true, "dependencies": { - "@sinonjs/commons": "^2.0.0", + "@sinonjs/commons": "^3.0.1", "lodash.get": "^4.4.2", - "type-detect": "^4.0.8" + "type-detect": "^4.1.0" } }, - "node_modules/@sinonjs/samsam/node_modules/@sinonjs/commons": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-2.0.0.tgz", - "integrity": "sha512-uLa0j859mMrg2slwQYdO/AkrOfmH+X6LTVmNTS9CqexuE2IvVORIkSpJLqePAbEnKJ77aMmCwr1NUZ57120Xcg==", + "node_modules/@sinonjs/samsam/node_modules/type-detect": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", "dev": true, - "dependencies": { - "type-detect": "4.0.8" + "engines": { + "node": ">=4" } }, "node_modules/@sinonjs/text-encoding": { - "version": "0.7.2", - "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz", - "integrity": "sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ==", + "version": "0.7.3", + "resolved": "https://registry.npmjs.org/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz", + "integrity": "sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA==", "dev": true }, "node_modules/@testim/chrome-version": { @@ -2841,6 +2320,15 @@ "integrity": "sha512-kIhULpw9TrGYnHp/8VfdcneIcxKnLixmADtukQRtJUmsVlMg0niMkwV0xZmi8hqa57xqilIHjWFA0GKvEjVU5g==", "optional": true }, + "node_modules/@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "optional": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/@tootallnate/quickjs-emscripten": { "version": "0.23.0", "resolved": "https://registry.npmjs.org/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz", @@ -2853,9 +2341,9 @@ "integrity": "sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==" }, "node_modules/@types/chai": { - "version": "4.3.16", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.16.tgz", - "integrity": "sha512-PatH4iOdyh3MyWtmHVFXLWCCIhUbopaltqddG9BzB+gMIzee2MJrvd+jouii9Z3wzQJruGWAm7WOMjgfG8hQlQ==", + "version": "4.3.20", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.20.tgz", + "integrity": "sha512-/pC9HAB5I/xMlc5FP77qjCnI16ChlJfW0tGa0IUcFn38VJrTV6DeZ60NU5KZBtaOZqjdpwTWohz5HU1RrhiYxQ==", "dev": true }, "node_modules/@types/cookiejar": { @@ -2873,9 +2361,9 @@ } }, "node_modules/@types/estree": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", - "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", "dev": true }, "node_modules/@types/json-schema": { @@ -2885,16 +2373,16 @@ "dev": true }, "node_modules/@types/ms": { - "version": "0.7.34", - "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", - "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==" }, "node_modules/@types/node": { - "version": "22.15.17", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.15.17.tgz", - "integrity": "sha512-wIX2aSZL5FE+MR0JlvF87BNVrtFWf6AE6rxSE9X7OwnVvoyCQjpzSRJ+M87se/4QCkCiebQAqrJ0y6fwIyi7nw==", + "version": "24.0.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.1.tgz", + "integrity": "sha512-MX4Zioh39chHlDJbKmEgydJDS3tspMP/lnQC67G3SWsTnb9NeYVWOjkxpOSy4oMfPs4StcWHwBrvUb4ybfnuaw==", "dependencies": { - "undici-types": "~6.21.0" + "undici-types": "~7.8.0" } }, "node_modules/@types/shimmer": { @@ -2913,9 +2401,9 @@ } }, "node_modules/@types/validator": { - "version": "13.11.10", - "resolved": "https://registry.npmjs.org/@types/validator/-/validator-13.11.10.tgz", - "integrity": "sha512-e2PNXoXLr6Z+dbfx5zSh9TRlXJrELycxiaXznp4S5+D2M3b9bqJEitNHA5923jhnB2zzFiZHa2f0SI1HoIahpg==" + "version": "13.15.1", + "resolved": "https://registry.npmjs.org/@types/validator/-/validator-13.15.1.tgz", + "integrity": "sha512-9gG6ogYcoI2mCMLdcO0NYI0AYrbxIjv0MDmy/5Ywo6CpWWrqYayc+mmgxRsCgtcGJm9BSbXkMsmxGah1iGHAAQ==" }, "node_modules/@types/yauzl": { "version": "2.10.3", @@ -2927,10 +2415,13 @@ } }, "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "optional": true + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", + "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", + "dev": true, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } }, "node_modules/abort-controller": { "version": "3.0.0", @@ -2992,44 +2483,18 @@ } }, "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "optional": true, - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/agent-base/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", "optional": true, - "dependencies": { - "ms": "2.1.2" - }, "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "node": ">= 14" } }, - "node_modules/agent-base/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "optional": true - }, "node_modules/agentkeepalive": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", - "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz", + "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", "optional": true, "dependencies": { "humanize-ms": "^1.2.1" @@ -3067,6 +2532,42 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, "node_modules/ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", @@ -3162,6 +2663,7 @@ "version": "3.0.1", "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", + "deprecated": "This package is no longer supported.", "optional": true, "dependencies": { "delegates": "^1.0.0", @@ -3208,13 +2710,13 @@ } }, "node_modules/array-buffer-byte-length": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", - "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", + "integrity": "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", "dev": true, "dependencies": { - "call-bind": "^1.0.5", - "is-array-buffer": "^3.0.4" + "call-bound": "^1.0.3", + "is-array-buffer": "^3.0.5" }, "engines": { "node": ">= 0.4" @@ -3229,17 +2731,19 @@ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" }, "node_modules/array-includes": { - "version": "3.1.8", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", - "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", "define-properties": "^1.2.1", - "es-abstract": "^1.23.2", - "es-object-atoms": "^1.0.0", - "get-intrinsic": "^1.2.4", - "is-string": "^1.0.7" + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -3249,19 +2753,18 @@ } }, "node_modules/arraybuffer.prototype.slice": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", - "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.4.tgz", + "integrity": "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", "dev": true, "dependencies": { "array-buffer-byte-length": "^1.0.1", - "call-bind": "^1.0.5", + "call-bind": "^1.0.8", "define-properties": "^1.2.1", - "es-abstract": "^1.22.3", - "es-errors": "^1.2.1", - "get-intrinsic": "^1.2.3", - "is-array-buffer": "^3.0.4", - "is-shared-array-buffer": "^1.0.2" + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4" }, "engines": { "node": ">= 0.4" @@ -3326,9 +2829,18 @@ } }, "node_modules/async": { - "version": "3.2.5", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", - "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==" + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==" + }, + "node_modules/async-function": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", + "integrity": "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + "dev": true, + "engines": { + "node": ">= 0.4" + } }, "node_modules/asynckit": { "version": "0.4.0", @@ -3377,9 +2889,9 @@ } }, "node_modules/aws4": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.1.tgz", - "integrity": "sha512-u5w79Rd7SU4JaIlA/zFqG+gOiuq25q5VLyZ8E+ijJeILuTxVzZgp2CaGw/UTw6pXYN9XMO9yiqj/nEHmhTG5CA==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz", + "integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==", "dev": true }, "node_modules/axios": { @@ -3623,9 +3135,9 @@ "dev": true }, "node_modules/bn.js": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.0.tgz", - "integrity": "sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA==" + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.12.2.tgz", + "integrity": "sha512-n4DSx829VRTRByMRGdjQ9iqsN0Bh4OolPsFnaZBLcbi8iXcB+kJ9s7EnRt4wILZNV3kPLHkRVfOc/HvhC3ovDw==" }, "node_modules/body-parser": { "version": "1.20.3", @@ -3668,12 +3180,13 @@ "version": "3.2.0", "resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz", "integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", "dev": true }, "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "dependencies": { "balanced-match": "^1.0.0" } @@ -3682,7 +3195,6 @@ "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, "dependencies": { "fill-range": "^7.1.1" }, @@ -3711,9 +3223,9 @@ "dev": true }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.25.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.0.tgz", + "integrity": "sha512-PJ8gYKeS5e/whHBh8xrwYK+dAvEj7JXtz6uTucnMRB8OiGTsKccFekoRrjajPBHV8oOY+2tI4uxeceSimKwMFA==", "dev": true, "funding": [ { @@ -3730,10 +3242,10 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001718", + "electron-to-chromium": "^1.5.160", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" @@ -3806,31 +3318,195 @@ "node": ">= 0.8" } }, - "node_modules/caching-transform": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", - "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", - "dev": true, + "node_modules/cacache": { + "version": "15.3.0", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", + "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", + "optional": true, "dependencies": { - "hasha": "^5.0.0", - "make-dir": "^3.0.0", - "package-hash": "^4.0.0", - "write-file-atomic": "^3.0.0" - }, - "engines": { - "node": ">=8" - } + "@npmcli/fs": "^1.0.0", + "@npmcli/move-file": "^1.0.1", + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "glob": "^7.1.4", + "infer-owner": "^1.0.4", + "lru-cache": "^6.0.0", + "minipass": "^3.1.1", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.2", + "mkdirp": "^1.0.3", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^8.0.1", + "tar": "^6.0.2", + "unique-filename": "^1.1.1" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/cacache/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "optional": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "optional": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "optional": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cacache/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "optional": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/cacache/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "optional": true, + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cacache/node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "optional": true, + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cacache/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "optional": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/cacache/node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/caching-transform": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/caching-transform/-/caching-transform-4.0.0.tgz", + "integrity": "sha512-kpqOvwXnjjN44D89K5ccQC+RUrsy7jB/XLlRrx0D7/2HNcTPqzsb6XgYoErwko6QsV184CA2YgS1fxDiiDZMWA==", + "dev": true, + "dependencies": { + "hasha": "^5.0.0", + "make-dir": "^3.0.0", + "package-hash": "^4.0.0", + "write-file-atomic": "^3.0.0" + }, + "engines": { + "node": ">=8" + } }, "node_modules/call-bind": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", - "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "dev": true, "dependencies": { + "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.1" + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" }, "engines": { "node": ">= 0.4" @@ -3879,9 +3555,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001620", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001620.tgz", - "integrity": "sha512-WJvYsOjd1/BYUY6SNGUosK9DUidBPDTnOARHp3fSmFO1ekdxaY6nKRttEVrfMmYi80ctS0kz1wiWmm14fVc3ew==", + "version": "1.0.30001722", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001722.tgz", + "integrity": "sha512-DCQHBBZtiK6JVkAGw7drvAMK0Q0POD/xZvEmDp6baiMMP6QXXk9HpD6mNYBZWhOPG6LvIDb82ITqtWjhDckHCA==", "dev": true, "funding": [ { @@ -4042,16 +3718,10 @@ "integrity": "sha512-Wmza/JzL0SiWz7kl6MhIKT5ceIlnFPJX+lwUGj7Clhy5MMldsSoJR0+uvRzOS5Kv45Mq7t1PoE8TsOA9bzvb6g==" }, "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -4064,6 +3734,9 @@ "engines": { "node": ">= 8.10.0" }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, "optionalDependencies": { "fsevents": "~2.3.2" } @@ -4089,14 +3762,14 @@ } }, "node_modules/chromedriver": { - "version": "125.0.0", - "resolved": "https://registry.npmjs.org/chromedriver/-/chromedriver-125.0.0.tgz", - "integrity": "sha512-wWXrxWLWqXRTmRZDtPigs+ys44srlpHTpsL7MHnZc9iaE1oIB0hslSVeem6TcsEb1Ou8nvPx3vs5bPwCI6+VHg==", + "version": "137.0.3", + "resolved": "https://registry.npmjs.org/chromedriver/-/chromedriver-137.0.3.tgz", + "integrity": "sha512-ceBxOxaXl/3ddvxZicgASpL8HfxmUK/q/VI3STcyl0t+SvgydNqXQO1XWwqUIA/tMfREpUunumQvwH7r6hdT0Q==", "hasInstallScript": true, "optional": true, "dependencies": { "@testim/chrome-version": "^1.1.4", - "axios": "^1.6.7", + "axios": "^1.7.4", "compare-versions": "^6.1.0", "extract-zip": "^2.0.1", "proxy-agent": "^6.4.0", @@ -4107,7 +3780,7 @@ "chromedriver": "bin/chromedriver" }, "engines": { - "node": ">=18" + "node": ">=20" } }, "node_modules/circular-json": { @@ -4193,29 +3866,16 @@ "dev": true }, "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", "dependencies": { "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", + "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "node": ">=12" } }, "node_modules/color-convert": { @@ -4300,9 +3960,9 @@ } }, "node_modules/command-line-usage/node_modules/typical": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/typical/-/typical-7.1.1.tgz", - "integrity": "sha512-T+tKVNs6Wu7IWiAce5BgMd7OZfNYUndHwc5MknN+UHOudi7sGZzuHdCadllRuqJ3fPtgFtIH9+lt9qRv6lmpfA==", + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/typical/-/typical-7.3.0.tgz", + "integrity": "sha512-ya4mg/30vm+DOWfBg4YK3j2WD6TWtRkCbasOJr40CseYENzCUby/7rIvXA99JGsQHeNxLbnXdyLLxKSv3tauFw==", "engines": { "node": ">=12.17" } @@ -4323,9 +3983,9 @@ "dev": true }, "node_modules/compare-versions": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-6.1.0.tgz", - "integrity": "sha512-LNZQXhqUvqUTotpZ00qLSaify3b4VFD588aRr8MKFw4CMUr98ytzCW5wDH5qx/DEY5kCDXcbcRuCqL0szEf2tg==", + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/compare-versions/-/compare-versions-6.1.1.tgz", + "integrity": "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg==", "optional": true }, "node_modules/component-emitter": { @@ -4589,14 +4249,14 @@ } }, "node_modules/data-view-buffer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz", - "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.2.tgz", + "integrity": "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bound": "^1.0.3", "es-errors": "^1.3.0", - "is-data-view": "^1.0.1" + "is-data-view": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -4606,29 +4266,29 @@ } }, "node_modules/data-view-byte-length": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz", - "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.2.tgz", + "integrity": "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bound": "^1.0.3", "es-errors": "^1.3.0", - "is-data-view": "^1.0.1" + "is-data-view": "^1.0.2" }, "engines": { "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/inspect-js" } }, "node_modules/data-view-byte-offset": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz", - "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.1.tgz", + "integrity": "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-data-view": "^1.0.1" }, @@ -4680,9 +4340,9 @@ } }, "node_modules/deep-eql": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.1.tgz", - "integrity": "sha512-nwQCf6ne2gez3o1MxWifqkciwt0zhl0LO1/UwVu4uMBuPmflWM4oQ70XMqHqnBJA+nhzncaqL9HVL6KkHJ28lw==", + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", "dev": true, "engines": { "node": ">=6" @@ -4721,6 +4381,7 @@ "version": "1.1.4", "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", @@ -4787,9 +4448,9 @@ } }, "node_modules/deglob/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -4800,6 +4461,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -4884,9 +4546,9 @@ } }, "node_modules/detect-libc": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", - "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz", + "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==", "engines": { "node": ">=8" } @@ -4943,6 +4605,19 @@ "resolved": "https://registry.npmjs.org/dottie/-/dottie-2.0.6.tgz", "integrity": "sha512-iGCHkfUc5kFekGiqhe8B/mdaurD+lakO9txNnTvKtA6PISrw86LgqHvRzWYPyoE2Ph5aMIrCw9/uko6XHTKCwA==" }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/eastasianwidth": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", @@ -5027,9 +4702,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.772", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.772.tgz", - "integrity": "sha512-jFfEbxR/abTTJA3ci+2ok1NTuOBBtB4jH+UT6PUmRN+DY3WSD4FFRsgoVQ+QNIJ0T7wrXwzsWCI2WKC46b++2A==", + "version": "1.5.167", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.167.tgz", + "integrity": "sha512-LxcRvnYO5ez2bMOFpbuuVuAI5QNeY1ncVytE/KXaL6ZNfzX1yPlAO0nSOyIHx2fVAuUprMqPs/TdVhUFZy7SIQ==", "dev": true }, "node_modules/elliptic": { @@ -5130,57 +4805,65 @@ } }, "node_modules/es-abstract": { - "version": "1.23.3", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", - "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==", + "version": "1.24.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.0.tgz", + "integrity": "sha512-WSzPgsdLtTcQwm4CROfS5ju2Wa1QQcVeT37jFjYzdFz1r9ahadC8B8/a4qxJxM+09F18iumCdRmlr96ZYkQvEg==", "dev": true, "dependencies": { - "array-buffer-byte-length": "^1.0.1", - "arraybuffer.prototype.slice": "^1.0.3", + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", "available-typed-arrays": "^1.0.7", - "call-bind": "^1.0.7", - "data-view-buffer": "^1.0.1", - "data-view-byte-length": "^1.0.1", - "data-view-byte-offset": "^1.0.0", - "es-define-property": "^1.0.0", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", - "es-set-tostringtag": "^2.0.3", - "es-to-primitive": "^1.2.1", - "function.prototype.name": "^1.1.6", - "get-intrinsic": "^1.2.4", - "get-symbol-description": "^1.0.2", - "globalthis": "^1.0.3", - "gopd": "^1.0.1", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", - "has-proto": "^1.0.3", - "has-symbols": "^1.0.3", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", "hasown": "^2.0.2", - "internal-slot": "^1.0.7", - "is-array-buffer": "^3.0.4", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", "is-callable": "^1.2.7", - "is-data-view": "^1.0.1", + "is-data-view": "^1.0.2", "is-negative-zero": "^2.0.3", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.3", - "is-string": "^1.0.7", - "is-typed-array": "^1.1.13", - "is-weakref": "^1.0.2", - "object-inspect": "^1.13.1", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", "object-keys": "^1.1.1", - "object.assign": "^4.1.5", - "regexp.prototype.flags": "^1.5.2", - "safe-array-concat": "^1.1.2", - "safe-regex-test": "^1.0.3", - "string.prototype.trim": "^1.2.9", - "string.prototype.trimend": "^1.0.8", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", "string.prototype.trimstart": "^1.0.8", - "typed-array-buffer": "^1.0.2", - "typed-array-byte-length": "^1.0.1", - "typed-array-byte-offset": "^1.0.2", - "typed-array-length": "^1.0.6", - "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.15" + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19" }, "engines": { "node": ">= 0.4" @@ -5190,12 +4873,9 @@ } }, "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", - "dependencies": { - "get-intrinsic": "^1.2.4" - }, + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "engines": { "node": ">= 0.4" } @@ -5209,10 +4889,9 @@ } }, "node_modules/es-object-atoms": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", - "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", - "dev": true, + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "dependencies": { "es-errors": "^1.3.0" }, @@ -5221,28 +4900,28 @@ } }, "node_modules/es-set-tostringtag": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", - "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", - "dev": true, + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", "dependencies": { - "get-intrinsic": "^1.2.4", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", - "hasown": "^2.0.1" + "hasown": "^2.0.2" }, "engines": { "node": ">= 0.4" } }, "node_modules/es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.3.0.tgz", + "integrity": "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", "dev": true, "dependencies": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4" }, "engines": { "node": ">= 0.4" @@ -5310,9 +4989,9 @@ } }, "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", "engines": { "node": ">=6" } @@ -5356,29 +5035,30 @@ } }, "node_modules/eslint": { - "version": "9.16.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.16.0.tgz", - "integrity": "sha512-whp8mSQI4C8VXd+fLgSM0lh3UlmcFtVwUQjyKCFfsp+2ItAIYhlq/hqGahGqHE6cv9unM41VlqKk2VtKYR2TaA==", + "version": "9.28.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.28.0.tgz", + "integrity": "sha512-ocgh41VhRlf9+fVpe7QKzwLj9c92fDiqOj8Y3Sd4/ZmVA4Btx4PlUYPq4pp9JDyupkf1upbEXecxL2mwNV7jPQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", - "@eslint/config-array": "^0.19.0", - "@eslint/core": "^0.9.0", - "@eslint/eslintrc": "^3.2.0", - "@eslint/js": "9.16.0", - "@eslint/plugin-kit": "^0.2.3", + "@eslint/config-array": "^0.20.0", + "@eslint/config-helpers": "^0.2.1", + "@eslint/core": "^0.14.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.28.0", + "@eslint/plugin-kit": "^0.3.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", - "@humanwhocodes/retry": "^0.4.1", + "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", "@types/json-schema": "^7.0.15", "ajv": "^6.12.4", "chalk": "^4.0.0", - "cross-spawn": "^7.0.5", + "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^8.2.0", + "eslint-scope": "^8.3.0", "eslint-visitor-keys": "^4.2.0", "espree": "^10.3.0", "esquery": "^1.5.0", @@ -5453,9 +5133,9 @@ "dev": true }, "node_modules/eslint-module-utils": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz", - "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==", + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.0.tgz", + "integrity": "sha512-wALZ0HFoytlyh/1+4wuZ9FJCD/leWHQzzrxJ8+rebyReSLk7LApMyd3WJaLVoN+D5+WIdJyDK1c6JnE65V4Zyg==", "dev": true, "dependencies": { "debug": "^3.2.7" @@ -5521,9 +5201,9 @@ } }, "node_modules/eslint-plugin-node/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -5593,9 +5273,9 @@ } }, "node_modules/eslint-scope": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.2.0.tgz", - "integrity": "sha512-PHlWUfG6lvPc3yvP5A4PNyBL1W8fkDUccmI21JUu/+GKZBoH/W5u6usENXUrWFRsyoW5ACUjFGgAFQp5gUlb/A==", + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -5630,9 +5310,9 @@ } }, "node_modules/eslint-visitor-keys": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz", - "integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5642,9 +5322,9 @@ } }, "node_modules/eslint/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -5652,12 +5332,12 @@ } }, "node_modules/eslint/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -5681,9 +5361,9 @@ } }, "node_modules/eslint/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/esniff": { @@ -5702,14 +5382,14 @@ } }, "node_modules/espree": { - "version": "10.3.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz", - "integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==", + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", "dev": true, "dependencies": { - "acorn": "^8.14.0", + "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^4.2.0" + "eslint-visitor-keys": "^4.2.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -5719,9 +5399,9 @@ } }, "node_modules/espree/node_modules/acorn": { - "version": "8.14.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", - "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -5744,9 +5424,9 @@ } }, "node_modules/esquery": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", - "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", "dev": true, "dependencies": { "estraverse": "^5.1.0" @@ -6069,12 +5749,12 @@ } }, "node_modules/extract-zip/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -6101,9 +5781,9 @@ } }, "node_modules/extract-zip/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "optional": true }, "node_modules/extsprintf": { @@ -6118,8 +5798,33 @@ "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", @@ -6147,6 +5852,29 @@ "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", "dev": true }, + "node_modules/fast-uri": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.6.tgz", + "integrity": "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ] + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dependencies": { + "reusify": "^1.0.4" + } + }, "node_modules/fd-slicer": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", @@ -6235,7 +5963,6 @@ "version": "7.1.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6333,15 +6060,15 @@ } }, "node_modules/flatted": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", - "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", "dev": true }, "node_modules/follow-redirects": { - "version": "1.15.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", - "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", "funding": [ { "type": "individual", @@ -6358,12 +6085,18 @@ } }, "node_modules/for-each": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", - "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", "dev": true, "dependencies": { - "is-callable": "^1.1.3" + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/foreground-child": { @@ -6389,12 +6122,14 @@ } }, "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz", + "integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", "mime-types": "^2.1.12" }, "engines": { @@ -6472,32 +6207,45 @@ "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, "node_modules/fs-extra": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", - "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", - "optional": true, + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dev": true, "dependencies": { + "at-least-node": "^1.0.0", "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" }, "engines": { - "node": ">=14.14" + "node": ">=10" } }, "node_modules/fs-extra/node_modules/universalify": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "optional": true, + "dev": true, "engines": { "node": ">= 10.0.0" } }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "devOptional": true }, "node_modules/fsevents": { "version": "2.3.3", @@ -6534,15 +6282,17 @@ } }, "node_modules/function.prototype.name": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", - "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.8.tgz", + "integrity": "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.2.0", - "es-abstract": "^1.22.1", - "functions-have-names": "^1.2.3" + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7" }, "engines": { "node": ">= 0.4" @@ -6570,6 +6320,7 @@ "version": "4.0.4", "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "deprecated": "This package is no longer supported.", "optional": true, "dependencies": { "aproba": "^1.0.3 || ^2.0.0", @@ -6620,15 +6371,20 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", - "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -6646,6 +6402,27 @@ "node": ">=8.0.0" } }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stdin": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-6.0.0.tgz", + "integrity": "sha512-jp4tHawyV7+fkkSKyvjuLZswblUtz+SQKzSWnBbii16BuZksJlU1wuBYXY75r+duh/llF1ur6oNwi+2ZzjKZ7g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/get-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", @@ -6658,14 +6435,14 @@ } }, "node_modules/get-symbol-description": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", - "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.1.0.tgz", + "integrity": "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", "dev": true, "dependencies": { - "call-bind": "^1.0.5", + "call-bound": "^1.0.3", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4" + "get-intrinsic": "^1.2.6" }, "engines": { "node": ">= 0.4" @@ -6675,27 +6452,26 @@ } }, "node_modules/get-uri": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.3.tgz", - "integrity": "sha512-BzUrJBS9EcUb4cFol8r4W3v1cPsSyajLSthNkz5BxbpDcHN5tIrM10E2eNvfnvBn3DaT3DUgx0OpsBKkaOpanw==", + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-6.0.4.tgz", + "integrity": "sha512-E1b1lFFLvLgak2whF2xDBcOy6NLVGZBqqjJjsIhvopKfWWEi64pLVTWWehV8KlLerZkfNTA95sTe2OdJKm1OzQ==", "optional": true, "dependencies": { "basic-ftp": "^5.0.2", "data-uri-to-buffer": "^6.0.2", - "debug": "^4.3.4", - "fs-extra": "^11.2.0" + "debug": "^4.3.4" }, "engines": { "node": ">= 14" } }, "node_modules/get-uri/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -6707,9 +6483,9 @@ } }, "node_modules/get-uri/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "optional": true }, "node_modules/getpass": { @@ -6730,6 +6506,8 @@ "version": "8.1.0", "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -6760,6 +6538,7 @@ "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, "dependencies": { "brace-expansion": "^2.0.1" }, @@ -6818,11 +6597,11 @@ } }, "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dependencies": { - "get-intrinsic": "^1.1.3" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -6908,10 +6687,13 @@ } }, "node_modules/has-bigints": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", - "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", + "integrity": "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -6928,6 +6710,7 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dev": true, "dependencies": { "es-define-property": "^1.0.0" }, @@ -6936,9 +6719,13 @@ } }, "node_modules/has-proto": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", - "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.2.0.tgz", + "integrity": "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + "dev": true, + "dependencies": { + "dunder-proto": "^1.0.0" + }, "engines": { "node": ">= 0.4" }, @@ -6947,9 +6734,9 @@ } }, "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "engines": { "node": ">= 0.4" }, @@ -6961,7 +6748,6 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", - "dev": true, "dependencies": { "has-symbols": "^1.0.3" }, @@ -7043,15 +6829,6 @@ "node": ">=16.0.0" } }, - "node_modules/hexoid": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/hexoid/-/hexoid-1.0.0.tgz", - "integrity": "sha512-QFLV0taWQOZtvIRIAdBChesmogZrtuXvVWsFHZTk2SU+anspqZ2vMnoLg7IE1+Uk16N19APic1BuF8bC8c2m5g==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/hmac-drbg": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/hmac-drbg/-/hmac-drbg-1.0.1.tgz", @@ -7062,6 +6839,12 @@ "minimalistic-crypto-utils": "^1.0.1" } }, + "node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true + }, "node_modules/html-escaper": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", @@ -7069,9 +6852,9 @@ "dev": true }, "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", "optional": true }, "node_modules/http-errors": { @@ -7089,6 +6872,42 @@ "node": ">= 0.8" } }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "optional": true, + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/http-proxy-agent/node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "optional": true, + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/http-proxy-agent/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "optional": true + }, "node_modules/http-reasons": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/http-reasons/-/http-reasons-0.1.0.tgz", @@ -7155,25 +6974,25 @@ "integrity": "sha512-4EC57ddXrkaF0x83Oj8sM6SLQHAWXw90Skqu2M4AEWENZ3F02dFJE/GARA8igO79tcgYqGrD7ae4f5L3um2lgg==" }, "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", "optional": true, "dependencies": { - "agent-base": "6", + "agent-base": "^7.1.2", "debug": "4" }, "engines": { - "node": ">= 6" + "node": ">= 14" } }, "node_modules/https-proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -7185,9 +7004,9 @@ } }, "node_modules/https-proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "optional": true }, "node_modules/humanize-ms": { @@ -7245,9 +7064,9 @@ "dev": true }, "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", "dev": true, "dependencies": { "parent-module": "^1.0.0", @@ -7260,6 +7079,28 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/import-in-the-middle": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.14.0.tgz", + "integrity": "sha512-g5zLT0HaztRJWysayWYiUq/7E5H825QIiecMD2pI5QO7Wzr847l6GDvPvmZaDIdrDtS2w7qRczywxiK6SL5vRw==", + "dependencies": { + "acorn": "^8.14.0", + "acorn-import-attributes": "^1.9.5", + "cjs-module-lexer": "^1.2.2", + "module-details-from-path": "^1.0.3" + } + }, + "node_modules/import-in-the-middle/node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/import-lazy": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", @@ -7304,6 +7145,8 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "devOptional": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -7461,14 +7304,14 @@ } }, "node_modules/internal-slot": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", - "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", + "integrity": "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", "dev": true, "dependencies": { "es-errors": "^1.3.0", - "hasown": "^2.0.0", - "side-channel": "^1.0.4" + "hasown": "^2.0.2", + "side-channel": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -7516,13 +7359,14 @@ } }, "node_modules/is-array-buffer": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", - "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", + "integrity": "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.2.1" + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" }, "engines": { "node": ">= 0.4" @@ -7537,13 +7381,35 @@ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", "dev": true }, + "node_modules/is-async-function": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.1.1.tgz", + "integrity": "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + "dev": true, + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-bigint": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", - "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.1.0.tgz", + "integrity": "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", "dev": true, "dependencies": { - "has-bigints": "^1.0.1" + "has-bigints": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -7562,13 +7428,13 @@ } }, "node_modules/is-boolean-object": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", - "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", + "integrity": "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -7596,22 +7462,27 @@ } }, "node_modules/is-core-module": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", - "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dependencies": { - "hasown": "^2.0.0" + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-data-view": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", - "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.2.tgz", + "integrity": "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", "dev": true, "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", "is-typed-array": "^1.1.13" }, "engines": { @@ -7622,12 +7493,13 @@ } }, "node_modules/is-date-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.1.0.tgz", + "integrity": "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", "dev": true, "dependencies": { - "has-tostringtag": "^1.0.0" + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -7652,11 +7524,25 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, "engines": { "node": ">=0.10.0" } }, + "node_modules/is-finalizationregistry": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.1.1.tgz", + "integrity": "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + "dev": true, + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -7665,11 +7551,28 @@ "node": ">=8" } }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "node_modules/is-generator-function": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz", + "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==", "dev": true, + "dependencies": { + "call-bound": "^1.0.3", + "get-proto": "^1.0.0", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", "dependencies": { "is-extglob": "^2.1.1" }, @@ -7695,6 +7598,18 @@ "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", "optional": true }, + "node_modules/is-map": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-negative-zero": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", @@ -7711,7 +7626,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "engines": { "node": ">=0.12.0" } @@ -7725,12 +7639,13 @@ } }, "node_modules/is-number-object": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", - "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz", + "integrity": "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", "dev": true, "dependencies": { - "has-tostringtag": "^1.0.0" + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -7760,13 +7675,15 @@ "integrity": "sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==" }, "node_modules/is-regex": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" }, "engines": { "node": ">= 0.4" @@ -7789,13 +7706,25 @@ "node": ">=6" } }, + "node_modules/is-set": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-shared-array-buffer": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", - "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.4.tgz", + "integrity": "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", "dev": true, "dependencies": { - "call-bind": "^1.0.7" + "call-bound": "^1.0.3" }, "engines": { "node": ">= 0.4" @@ -7813,12 +7742,13 @@ } }, "node_modules/is-string": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", - "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.1.1.tgz", + "integrity": "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", "dev": true, "dependencies": { - "has-tostringtag": "^1.0.0" + "call-bound": "^1.0.3", + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -7828,12 +7758,14 @@ } }, "node_modules/is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.1.1.tgz", + "integrity": "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", "dev": true, "dependencies": { - "has-symbols": "^1.0.2" + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -7843,12 +7775,12 @@ } }, "node_modules/is-typed-array": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", - "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", "dev": true, "dependencies": { - "which-typed-array": "^1.1.14" + "which-typed-array": "^1.1.16" }, "engines": { "node": ">= 0.4" @@ -7881,13 +7813,44 @@ "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==", "optional": true }, + "node_modules/is-weakmap": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-weakref": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", - "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.1.1.tgz", + "integrity": "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + "dev": true, + "dependencies": { + "call-bound": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.4.tgz", + "integrity": "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2" + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -8011,75 +7974,6 @@ "node": ">=8" } }, - "node_modules/istanbul-lib-processinfo/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/istanbul-lib-processinfo/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/istanbul-lib-processinfo/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/istanbul-lib-processinfo/node_modules/p-map": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", - "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", - "dev": true, - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/istanbul-lib-processinfo/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/istanbul-lib-processinfo/node_modules/uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", @@ -8133,12 +8027,12 @@ } }, "node_modules/istanbul-lib-source-maps/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -8150,9 +8044,9 @@ } }, "node_modules/istanbul-lib-source-maps/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/istanbul-reports": { @@ -8169,16 +8063,13 @@ } }, "node_modules/jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", "dev": true, "dependencies": { "@isaacs/cliui": "^8.0.2" }, - "engines": { - "node": ">=14" - }, "funding": { "url": "https://github.com/sponsors/isaacs" }, @@ -8187,9 +8078,9 @@ } }, "node_modules/jake": { - "version": "10.9.1", - "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.1.tgz", - "integrity": "sha512-61btcOHNnLnsOdtLgA5efqQWjnSi/vow5HbI7HMdKKWqvrKR1bLK3BPlJn9gcSaP2ewuamUSMB5XEy76KUIS2w==", + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.2.tgz", + "integrity": "sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==", "dependencies": { "async": "^3.2.3", "chalk": "^4.0.2", @@ -8204,9 +8095,9 @@ } }, "node_modules/jake/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -8237,16 +8128,16 @@ } }, "node_modules/js-beautify": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.1.tgz", - "integrity": "sha512-ESjNzSlt/sWE8sciZH8kBF8BPlwXPwhR6pWKAw8bw4Bwj+iZcnKW6ONWUutJ7eObuBZQpiIb8S7OYspWrKt7rA==", + "version": "1.15.4", + "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.4.tgz", + "integrity": "sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==", "dev": true, "dependencies": { "config-chain": "^1.1.13", "editorconfig": "^1.0.4", - "glob": "^10.3.3", + "glob": "^10.4.2", "js-cookie": "^3.0.5", - "nopt": "^7.2.0" + "nopt": "^7.2.1" }, "bin": { "css-beautify": "js/bin/css-beautify.js", @@ -8257,22 +8148,13 @@ "node": ">=14" } }, - "node_modules/js-beautify/node_modules/abbrev": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", - "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", - "dev": true, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, "node_modules/js-beautify/node_modules/foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", "dev": true, "dependencies": { - "cross-spawn": "^7.0.0", + "cross-spawn": "^7.0.6", "signal-exit": "^4.0.1" }, "engines": { @@ -8283,23 +8165,21 @@ } }, "node_modules/js-beautify/node_modules/glob": { - "version": "10.3.15", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.15.tgz", - "integrity": "sha512-0c6RlJt1TICLyvJYIApxb8GsXoai0KUP7AxKKAtsYXdgJR1mGEUa7DgwShbdk1nly0PYoZj01xd4hzbq3fsjpw==", + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", "dev": true, "dependencies": { "foreground-child": "^3.1.0", - "jackspeak": "^2.3.6", - "minimatch": "^9.0.1", - "minipass": "^7.0.4", - "path-scurry": "^1.11.0" + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" }, "bin": { "glob": "dist/esm/bin.mjs" }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, "funding": { "url": "https://github.com/sponsors/isaacs" } @@ -8320,29 +8200,14 @@ } }, "node_modules/js-beautify/node_modules/minipass": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.1.tgz", - "integrity": "sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", "dev": true, "engines": { "node": ">=16 || 14 >=14.17" } }, - "node_modules/js-beautify/node_modules/nopt": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", - "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", - "dev": true, - "dependencies": { - "abbrev": "^2.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - } - }, "node_modules/js-beautify/node_modules/signal-exit": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", @@ -8408,15 +8273,15 @@ } }, "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", "dev": true, "bin": { "jsesc": "bin/jsesc" }, "engines": { - "node": ">=4" + "node": ">=6" } }, "node_modules/json-buffer": { @@ -8470,7 +8335,6 @@ "version": "6.1.0", "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "devOptional": true, "dependencies": { "universalify": "^2.0.0" }, @@ -8482,7 +8346,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "devOptional": true, "engines": { "node": ">= 10.0.0" } @@ -8547,12 +8410,12 @@ "dev": true }, "node_modules/jwk-to-pem": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/jwk-to-pem/-/jwk-to-pem-2.0.5.tgz", - "integrity": "sha512-L90jwellhO8jRKYwbssU9ifaMVqajzj3fpRjDKcsDzrslU9syRbFqfkXtT4B89HYAap+xsxNcxgBSB09ig+a7A==", + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/jwk-to-pem/-/jwk-to-pem-2.0.7.tgz", + "integrity": "sha512-cSVphrmWr6reVchuKQZdfSs4U9c5Y4hwZggPoz6cbVnTpAVgGRpEuQng86IyqLeGZlhTh+c4MAreB6KbdQDKHQ==", "dependencies": { "asn1.js": "^5.3.0", - "elliptic": "^6.5.4", + "elliptic": "^6.6.1", "safe-buffer": "^5.0.1" } }, @@ -8677,12 +8540,9 @@ "node_modules/lodash.get": { "version": "4.4.2", "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz", - "integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==" - }, - "node_modules/lodash.isequal": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", - "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" + "integrity": "sha512-z+Uw/vLuy6gQe8cfaFWD7p0wVv8fJl3mbzXh33RS+0oW2wvUqiRXiQ69gLWSLpgB5/6sU+r6BlQR0MBILadqTQ==", + "deprecated": "This package is deprecated. Use the optional chaining (?.) operator instead.", + "dev": true }, "node_modules/lodash.isfinite": { "version": "3.3.2", @@ -8712,9 +8572,9 @@ } }, "node_modules/long": { - "version": "5.2.3", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", - "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==" }, "node_modules/long-timeout": { "version": "0.1.1", @@ -8734,13 +8594,10 @@ } }, "node_modules/loupe": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.1.tgz", - "integrity": "sha512-edNu/8D5MKVfGVFRhFf8aAxiTM6Wumfz5XsaatSxlD3w4R1d/WEKUTydCdPGbl9K7QG/Ca3GnDV2sIKIpXRQcw==", - "dev": true, - "dependencies": { - "get-func-name": "^2.0.1" - } + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.3.tgz", + "integrity": "sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==", + "dev": true }, "node_modules/lru-cache": { "version": "8.0.5", @@ -8791,51 +8648,177 @@ "semver": "bin/semver.js" } }, - "node_modules/matcher": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", - "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", - "dev": true, + "node_modules/make-fetch-happen": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", + "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", + "optional": true, "dependencies": { - "escape-string-regexp": "^4.0.0" + "agentkeepalive": "^4.1.3", + "cacache": "^15.2.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^4.0.1", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^6.0.0", + "minipass": "^3.1.3", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^1.3.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.2", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^6.0.0", + "ssri": "^8.0.0" }, "engines": { - "node": ">=10" + "node": ">= 10" } }, - "node_modules/md5": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", - "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", - "dev": true, + "node_modules/make-fetch-happen/node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "optional": true, "dependencies": { - "charenc": "0.0.2", - "crypt": "0.0.2", - "is-buffer": "~1.1.6" - } - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "debug": "4" + }, "engines": { - "node": ">= 0.6" + "node": ">= 6.0.0" } }, - "node_modules/memoizee": { - "version": "0.4.15", - "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.4.15.tgz", - "integrity": "sha512-UBWmJpLZd5STPm7PMUlOw/TSy972M+z8gcyQ5veOnSDRREz/0bmpyTfKt3/51DhEBqCZQn1udM/5flcSPYhkdQ==", - "dev": true, + "node_modules/make-fetch-happen/node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "optional": true, "dependencies": { - "d": "^1.0.1", - "es5-ext": "^0.10.53", - "es6-weak-map": "^2.0.3", - "event-emitter": "^0.3.5", + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/make-fetch-happen/node_modules/http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "optional": true, + "dependencies": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "optional": true, + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "optional": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-fetch-happen/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "optional": true + }, + "node_modules/make-fetch-happen/node_modules/socks-proxy-agent": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", + "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", + "optional": true, + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/matcher": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", + "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/md5": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", + "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", + "dev": true, + "dependencies": { + "charenc": "0.0.2", + "crypt": "0.0.2", + "is-buffer": "~1.1.6" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memoizee": { + "version": "0.4.17", + "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.4.17.tgz", + "integrity": "sha512-DGqD7Hjpi/1or4F/aYAspXKNm5Yili0QDAFAY4QYvpqpgiY6+1jOfqpmByzjxbWd/T9mChbCArXAbDAsTm5oXA==", + "dev": true, + "dependencies": { + "d": "^1.0.2", + "es5-ext": "^0.10.64", + "es6-weak-map": "^2.0.3", + "event-emitter": "^0.3.5", "is-promise": "^2.2.2", "lru-queue": "^0.1.0", "next-tick": "^1.1.0", "timers-ext": "^0.1.7" + }, + "engines": { + "node": ">=0.12" } }, "node_modules/merge-descriptors": { @@ -8846,6 +8829,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, "node_modules/methods": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", @@ -8854,6 +8845,18 @@ "node": ">= 0.6" } }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, "node_modules/mime": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", @@ -8946,9 +8949,12 @@ } }, "node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dependencies": { + "yallist": "^4.0.0" + }, "engines": { "node": ">=8" } @@ -8965,16 +8971,21 @@ "node": ">= 8" } }, - "node_modules/minipass-collect/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/minipass-fetch": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", + "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", "optional": true, "dependencies": { - "yallist": "^4.0.0" + "minipass": "^3.1.0", + "minipass-sized": "^1.0.3", + "minizlib": "^2.0.0" }, "engines": { "node": ">=8" + }, + "optionalDependencies": { + "encoding": "^0.1.12" } }, "node_modules/minipass-flush": { @@ -8989,18 +9000,6 @@ "node": ">= 8" } }, - "node_modules/minipass-flush/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/minipass-pipeline": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", @@ -9013,18 +9012,6 @@ "node": ">=8" } }, - "node_modules/minipass-pipeline/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/minipass-sized": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", @@ -9037,18 +9024,6 @@ "node": ">=8" } }, - "node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/minizlib": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", @@ -9061,17 +9036,6 @@ "node": ">= 8" } }, - "node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/mkdirp": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", @@ -9143,12 +9107,12 @@ } }, "node_modules/mocha-junit-reporter/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -9160,18 +9124,29 @@ } }, "node_modules/mocha-junit-reporter/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, + "node_modules/mocha/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, "node_modules/mocha/node_modules/debug": { - "version": "4.3.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", - "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -9182,12 +9157,6 @@ } } }, - "node_modules/mocha/node_modules/debug/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, "node_modules/mocha/node_modules/minimatch": { "version": "5.1.6", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", @@ -9221,10 +9190,28 @@ "url": "https://github.com/chalk/supports-color?sponsor=1" } }, + "node_modules/mocha/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/module-details-from-path": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.3.tgz", - "integrity": "sha512-ySViT69/76t8VhE1xXHK6Ch4NcDd26gx0MzKXLO+F7NOtnqH68d9zF94nT8ZWSxXh8ELOERsnJO/sWt1xZYw5A==" + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", + "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==" }, "node_modules/moment": { "version": "2.30.1", @@ -9280,6 +9267,7 @@ "version": "1.4.5-lts.1", "resolved": "https://registry.npmjs.org/multer/-/multer-1.4.5-lts.1.tgz", "integrity": "sha512-ywPWvcDMeH+z9gQq5qYHCCy+ethsk4goepZ45GLD63fOu0YcNecQxi64nDs3qluZB+murG3/D4dJ7+dGctcCQQ==", + "deprecated": "Multer 1.x is impacted by a number of vulnerabilities, which have been patched in 2.x. You should upgrade to the latest 2.x version.", "dependencies": { "append-field": "^1.0.0", "busboy": "^1.0.0", @@ -9367,9 +9355,9 @@ } }, "node_modules/napi-build-utils": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", - "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", + "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==" }, "node_modules/natural-compare": { "version": "1.4.0", @@ -9391,21 +9379,48 @@ "node": ">= 0.4.0" } }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "engines": { - "node": ">= 0.6" + "node_modules/nconf/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" } }, - "node_modules/neo-async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", - "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", - "dev": true - }, - "node_modules/netmask": { + "node_modules/nconf/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true + }, + "node_modules/netmask": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/netmask/-/netmask-2.0.2.tgz", "integrity": "sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==", @@ -9415,9 +9430,9 @@ } }, "node_modules/newman": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/newman/-/newman-6.2.0.tgz", - "integrity": "sha512-CHo/wMv+Q9B3YcIJ18pdmY9XN9X8mc2hXso8yybeclV0BVPSFz1+P5vJELWg5DB/qJgxJOh+B+k+i9tTqfzcbw==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/newman/-/newman-6.2.1.tgz", + "integrity": "sha512-Zq8Sr5GFF+OXs5yIbyglLMKMh1WNMjYVV0yZaSBZ+DIgQOIWcxT8QTfbrl/YUGrLyT4rjpu+yZ/Z+kozw79GEA==", "dev": true, "dependencies": { "@postman/tough-cookie": "4.1.3-postman.1", @@ -9432,10 +9447,10 @@ "liquid-json": "0.3.1", "lodash": "4.17.21", "mkdirp": "3.0.1", - "postman-collection": "4.5.0", + "postman-collection": "4.4.0", "postman-collection-transformer": "4.1.8", - "postman-request": "2.88.1-postman.39", - "postman-runtime": "7.41.2", + "postman-request": "2.88.1-postman.34", + "postman-runtime": "7.39.1", "pretty-ms": "7.0.1", "semver": "7.6.3", "serialised-error": "1.1.3", @@ -9475,6 +9490,12 @@ "node": ">=4.0" } }, + "node_modules/newman/node_modules/async": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", + "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==", + "dev": true + }, "node_modules/next-tick": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz", @@ -9506,9 +9527,9 @@ "dev": true }, "node_modules/node-abi": { - "version": "3.62.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.62.0.tgz", - "integrity": "sha512-CPMcGa+y33xuL1E0TcNIu4YyaZCxnnvkVaEXrsosR3FxN+fV8xvb7Mzpb7IgKler10qeMkE6+Dp8qJhpzdq35g==", + "version": "3.75.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.75.0.tgz", + "integrity": "sha512-OhYaY5sDsIka7H7AtijtI9jwGYLyl29eQn/W623DiN/MIv5sUqc4g7BIDThX+gb7di9f6xK02nkp8sdfFWZLTg==", "dependencies": { "semver": "^7.3.5" }, @@ -9517,12 +9538,9 @@ } }, "node_modules/node-addon-api": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.0.tgz", - "integrity": "sha512-mNcltoe1R8o7STTegSOHdnJNN7s5EUvhoS7ShnTHDyOSd+8H+UdWODq6qSv67PjC8Zc5JRT8+oLAMCr0SIXw7g==", - "engines": { - "node": "^16 || ^18 || >= 20" - } + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-7.1.1.tgz", + "integrity": "sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==" }, "node_modules/node-fetch-npm": { "version": "2.0.4", @@ -9546,6 +9564,132 @@ "node": ">= 6.13.0" } }, + "node_modules/node-gyp": { + "version": "8.4.1", + "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", + "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", + "optional": true, + "dependencies": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^9.1.0", + "nopt": "^5.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": ">= 10.12.0" + } + }, + "node_modules/node-gyp/node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "optional": true + }, + "node_modules/node-gyp/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "optional": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/node-gyp/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "optional": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/node-gyp/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "optional": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/node-gyp/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/node-gyp/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "optional": true, + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-gyp/node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "optional": true, + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/node-gyp/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "optional": true, + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/node-oauth1": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/node-oauth1/-/node-oauth1-1.3.0.tgz", @@ -9565,9 +9709,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", "dev": true }, "node_modules/node-schedule": { @@ -9583,6 +9727,42 @@ "node": ">=6" } }, + "node_modules/nopt": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", + "dev": true, + "dependencies": { + "abbrev": "^2.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/normalize-package-data/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, + "bin": { + "semver": "bin/semver" + } + }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -9615,6 +9795,7 @@ "version": "6.0.2", "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "deprecated": "This package is no longer supported.", "optional": true, "dependencies": { "are-we-there-yet": "^3.0.0", @@ -9668,9 +9849,9 @@ } }, "node_modules/nyc/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -9705,6 +9886,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -9772,18 +9954,6 @@ "node": ">=8" } }, - "node_modules/nyc/node_modules/p-map": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", - "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", - "dev": true, - "dependencies": { - "aggregate-error": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/nyc/node_modules/resolve-from": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", @@ -9793,21 +9963,6 @@ "node": ">=8" } }, - "node_modules/nyc/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/nyc/node_modules/wrap-ansi": { "version": "6.2.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", @@ -9873,9 +10028,9 @@ } }, "node_modules/oauth4webapi": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/oauth4webapi/-/oauth4webapi-3.5.1.tgz", - "integrity": "sha512-txg/jZQwcbaF7PMJgY7aoxc9QuCxHVFMiEkDIJ60DwDz3PbtXPQnrzo+3X4IRYGChIwWLabRBRpf1k9hO9+xrQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/oauth4webapi/-/oauth4webapi-3.5.2.tgz", + "integrity": "sha512-VYz5BaP3izIrUc1GAVzIoz4JnljiW0YAUFObMBwsqDnfHxz2sjLu3W7/8vE8Ms9IbMewN9+1kcvhY3tMscAeGQ==", "optional": true, "funding": { "url": "https://github.com/sponsors/panva" @@ -9899,9 +10054,12 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -9916,14 +10074,16 @@ } }, "node_modules/object.assign": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", - "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", "dev": true, "dependencies": { - "call-bind": "^1.0.5", + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", "define-properties": "^1.2.1", - "has-symbols": "^1.0.3", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", "object-keys": "^1.1.1" }, "engines": { @@ -9986,13 +10146,13 @@ } }, "node_modules/openid-client": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-6.5.0.tgz", - "integrity": "sha512-fAfYaTnOYE2kQCqEJGX9KDObW2aw7IQy4jWpU/+3D3WoCFLbix5Hg6qIPQ6Js9r7f8jDUmsnnguRNCSw4wU/IQ==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-6.5.1.tgz", + "integrity": "sha512-DNq7s+Tm9wfMUTltl+kUJzwi+bsbeiZycDm1gJQbX6MTHwo1+Q0I3F+ccsxi1T3mYMaHATCSnWDridkZ3hnu1g==", "optional": true, "dependencies": { - "jose": "^6.0.10", - "oauth4webapi": "^3.5.1" + "jose": "^6.0.11", + "oauth4webapi": "^3.5.2" }, "funding": { "url": "https://github.com/sponsors/panva" @@ -10038,6 +10198,23 @@ "node": ">=0.10.0" } }, + "node_modules/own-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/own-keys/-/own-keys-1.0.1.tgz", + "integrity": "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + "dev": true, + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", @@ -10077,18 +10254,15 @@ } }, "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "optional": true, + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", + "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", + "dev": true, "dependencies": { "aggregate-error": "^3.0.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/p-try": { @@ -10101,43 +10275,31 @@ } }, "node_modules/pac-proxy-agent": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.0.1.tgz", - "integrity": "sha512-ASV8yU4LLKBAjqIPMbrgtaKIvxQri/yh2OpI+S6hVa9JRkUI3Y3NPFbfngDtY7oFtSMD3w31Xns89mDa3Feo5A==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-7.2.0.tgz", + "integrity": "sha512-TEB8ESquiLMc0lV8vcd5Ql/JAKAoyzHFXaStwjkzpOpC5Yv+pIzLfHvjTSdf3vpa2bMiUQrg9i6276yn8666aA==", "optional": true, "dependencies": { "@tootallnate/quickjs-emscripten": "^0.23.0", - "agent-base": "^7.0.2", + "agent-base": "^7.1.2", "debug": "^4.3.4", "get-uri": "^6.0.1", "http-proxy-agent": "^7.0.0", - "https-proxy-agent": "^7.0.2", - "pac-resolver": "^7.0.0", - "socks-proxy-agent": "^8.0.2" + "https-proxy-agent": "^7.0.6", + "pac-resolver": "^7.0.1", + "socks-proxy-agent": "^8.0.5" }, "engines": { "node": ">= 14" } }, - "node_modules/pac-proxy-agent/node_modules/agent-base": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", - "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", + "node_modules/pac-proxy-agent/node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/pac-proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "optional": true, - "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -10148,36 +10310,10 @@ } } }, - "node_modules/pac-proxy-agent/node_modules/http-proxy-agent": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", - "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", - "optional": true, - "dependencies": { - "agent-base": "^7.1.0", - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/pac-proxy-agent/node_modules/https-proxy-agent": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.4.tgz", - "integrity": "sha512-wlwpilI7YdjSkWaQ/7omYBMTliDcmCN8OLihO6I9B86g06lMyAoqgoDpV0XqoaPOKj+0DIdAvnsWfyAAhmimcg==", - "optional": true, - "dependencies": { - "agent-base": "^7.0.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, "node_modules/pac-proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "optional": true }, "node_modules/pac-resolver": { @@ -10208,6 +10344,12 @@ "node": ">=8" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -10314,12 +10456,18 @@ } }, "node_modules/path-scurry/node_modules/lru-cache": { - "version": "10.2.2", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz", - "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==", + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", "dev": true, "engines": { - "node": "14 || >=16.14" + "node": ">=16 || 14 >=14.17" } }, "node_modules/path-to-regexp": { @@ -10327,6 +10475,27 @@ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==" }, + "node_modules/path-type": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", + "integrity": "sha512-dUnb5dXUf+kzhC/W/F4e5/SkluXIFf5VUHolW1Eg1irn1hGWjPGdsRcvYJ1nD6lhk8Ir7VM0bHJKsYTx8Jx9OQ==", + "dev": true, + "dependencies": { + "pify": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/path-type/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/pathval": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", @@ -10375,15 +10544,15 @@ } }, "node_modules/pg-cloudflare": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz", - "integrity": "sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.2.5.tgz", + "integrity": "sha512-OOX22Vt0vOSRrdoUPKJ8Wi2OpE/o/h9T8X1s4qSkCedbNah9ei2W2765be8iMVxQUsvgT7zIAT2eIa9fs5+vtg==", "optional": true }, "node_modules/pg-connection-string": { - "version": "2.6.4", - "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.4.tgz", - "integrity": "sha512-v+Z7W/0EO707aNMaAEfiGnGL9sxxumwLl2fJvCQtMn9Fxsg+lPpPkdcyBSv/KFgpGdYkMfn+EI1Or2EHjpgLCA==" + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.9.0.tgz", + "integrity": "sha512-P2DEBKuvh5RClafLngkAuGe9OUlFV7ebu8w1kmaaOgPcpJd1RIFh7otETfI6hAR8YupOLFTY7nuvvIn7PLciUQ==" }, "node_modules/pg-int8": { "version": "1.0.1", @@ -10394,17 +10563,17 @@ } }, "node_modules/pg-pool": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.6.2.tgz", - "integrity": "sha512-Htjbg8BlwXqSBQ9V8Vjtc+vzf/6fVUuak/3/XXKA9oxZprwW3IMDQTGHP+KDmVL7rtd+R1QjbnCFPuTHm3G4hg==", + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.10.0.tgz", + "integrity": "sha512-DzZ26On4sQ0KmqnO34muPcmKbhrjmyiO4lCCR0VwEd7MjmiKf5NTg/6+apUEu0NF7ESa37CGzFxH513CoUmWnA==", "peerDependencies": { "pg": ">=8.0" } }, "node_modules/pg-protocol": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.6.1.tgz", - "integrity": "sha512-jPIlvgoD63hrEuihvIg+tJhoGjUsLPn6poJY9N5CnlPd91c2T18T/9zBtLxZSb1EhYxBRoZJtzScCaWlYLtktg==" + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.10.0.tgz", + "integrity": "sha512-IpdytjudNuLv8nhlHs/UrVBhU0e78J0oIS/0AVdTbWxSOkFUVdsHC/NrorO6nXsQNDTT1kzDSOMJubBQviX18Q==" }, "node_modules/pg-types": { "version": "2.2.0", @@ -10430,16 +10599,15 @@ } }, "node_modules/picocolors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", - "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "dev": true }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, "engines": { "node": ">=8.6" }, @@ -10510,9 +10678,9 @@ } }, "node_modules/pino-abstract-transport/node_modules/readable-stream": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.5.2.tgz", - "integrity": "sha512-yjavECdqeZ3GLXNgRXgeQEdz9fvDDkNKyHnbHRFtOr7/LcfgBcmct7t/ET+HaCTqfh06OzoAxrkN/IfjJBVe+g==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", @@ -10734,9 +10902,9 @@ } }, "node_modules/possible-typed-array-names": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", - "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", "dev": true, "engines": { "node": ">= 0.4" @@ -10778,9 +10946,9 @@ } }, "node_modules/postman-collection": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/postman-collection/-/postman-collection-4.5.0.tgz", - "integrity": "sha512-152JSW9pdbaoJihwjc7Q8lc3nPg/PC9lPTHdMk7SHnHhu/GBJB7b2yb9zG7Qua578+3PxkQ/HYBuXpDSvsf7GQ==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/postman-collection/-/postman-collection-4.4.0.tgz", + "integrity": "sha512-2BGDFcUwlK08CqZFUlIC8kwRJueVzPjZnnokWPtJCd9f2J06HBQpGL7t2P1Ud1NEsK9NHq9wdipUhWLOPj5s/Q==", "dev": true, "dependencies": { "@faker-js/faker": "5.5.3", @@ -10792,7 +10960,7 @@ "mime-format": "2.0.1", "mime-types": "2.1.35", "postman-url-encoder": "3.0.5", - "semver": "7.6.3", + "semver": "7.5.4", "uuid": "8.3.2" }, "engines": { @@ -10866,6 +11034,33 @@ "node": ">=0.10.0" } }, + "node_modules/postman-collection/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postman-collection/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/postman-collection/node_modules/uuid": { "version": "8.3.2", "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", @@ -10876,14 +11071,14 @@ } }, "node_modules/postman-request": { - "version": "2.88.1-postman.39", - "resolved": "https://registry.npmjs.org/postman-request/-/postman-request-2.88.1-postman.39.tgz", - "integrity": "sha512-rsncxxDlbn1YpygXSgJqbJzIjGlHFcZjbYDzeBPTQHMDfLuSTzZz735JHV8i1+lOROuJ7MjNap4eaSD3UijHzQ==", + "version": "2.88.1-postman.34", + "resolved": "https://registry.npmjs.org/postman-request/-/postman-request-2.88.1-postman.34.tgz", + "integrity": "sha512-GkolJ4cIzgamcwHRDkeZc/taFWO1u2HuGNML47K9ZAsFH2LdEkS5Yy8QanpzhjydzV3WWthl9v60J8E7SjKodQ==", "dev": true, "dependencies": { "@postman/form-data": "~3.1.1", "@postman/tough-cookie": "~4.1.3-postman.1", - "@postman/tunnel-agent": "^0.6.4", + "@postman/tunnel-agent": "^0.6.3", "aws-sign2": "~0.7.0", "aws4": "^1.12.0", "brotli": "^1.3.3", @@ -10905,7 +11100,7 @@ "uuid": "^8.3.2" }, "engines": { - "node": ">= 16" + "node": ">= 6" } }, "node_modules/postman-request/node_modules/qs": { @@ -10927,39 +11122,51 @@ } }, "node_modules/postman-runtime": { - "version": "7.41.2", - "resolved": "https://registry.npmjs.org/postman-runtime/-/postman-runtime-7.41.2.tgz", - "integrity": "sha512-efKnii+yBfqZMRjV5zFh4VXogLeZB58HmLkgT+/sZcjglth23wzp+QRlkl4nbgcL2SZX6e5cLI2/aG2Of3wMyg==", + "version": "7.39.1", + "resolved": "https://registry.npmjs.org/postman-runtime/-/postman-runtime-7.39.1.tgz", + "integrity": "sha512-IRNrBE0l1K3ZqQhQVYgF6MPuqOB9HqYncal+a7RpSS+sysKLhJMkC9SfUn1HVuOpokdPkK92ykvPzj8kCOLYAg==", "dev": true, "dependencies": { "@postman/tough-cookie": "4.1.3-postman.1", "async": "3.2.5", - "aws4": "1.13.1", + "aws4": "1.12.0", "handlebars": "4.7.8", "httpntlm": "1.8.13", - "jose": "5.6.3", + "jose": "4.14.4", "js-sha512": "0.9.0", "lodash": "4.17.21", "mime-types": "2.1.35", "node-forge": "1.3.1", "node-oauth1": "1.3.0", "performance-now": "2.1.0", - "postman-collection": "4.5.0", - "postman-request": "2.88.1-postman.39", - "postman-sandbox": "5.1.1", + "postman-collection": "4.4.0", + "postman-request": "2.88.1-postman.34", + "postman-sandbox": "4.7.1", "postman-url-encoder": "3.0.5", "serialised-error": "1.1.3", "strip-json-comments": "3.1.1", "uuid": "8.3.2" }, "engines": { - "node": ">=16" + "node": ">=12" } }, + "node_modules/postman-runtime/node_modules/async": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.5.tgz", + "integrity": "sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==", + "dev": true + }, + "node_modules/postman-runtime/node_modules/aws4": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz", + "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==", + "dev": true + }, "node_modules/postman-runtime/node_modules/jose": { - "version": "5.6.3", - "resolved": "https://registry.npmjs.org/jose/-/jose-5.6.3.tgz", - "integrity": "sha512-1Jh//hEEwMhNYPDDLwXHa2ePWgWiFNNUadVmguAAw2IJ6sj9mNxV5tGXJNqlMkJAybF6Lgw1mISDxTePP/187g==", + "version": "4.14.4", + "resolved": "https://registry.npmjs.org/jose/-/jose-4.14.4.tgz", + "integrity": "sha512-j8GhLiKmUAh+dsFXlX1aJCbt5KMibuKb+d7j1JaOJG6s2UjX1PQlW+OKB/sD4a/5ZYF4RcmYmLSndOoU3Lt/3g==", "dev": true, "funding": { "url": "https://github.com/sponsors/panva" @@ -10975,18 +11182,18 @@ } }, "node_modules/postman-sandbox": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/postman-sandbox/-/postman-sandbox-5.1.1.tgz", - "integrity": "sha512-RfCTMwz3OaqhYYgtoe3VlHGiQl9hEmJ9sPh/XOlNcuvd/km6ARSFkKXFvQaLFsTHyXcHaqpInKaQSJi23uTynA==", + "version": "4.7.1", + "resolved": "https://registry.npmjs.org/postman-sandbox/-/postman-sandbox-4.7.1.tgz", + "integrity": "sha512-H2wYSLK0mB588IaxoLrLoPbpmxsIcwFtgaK2c8gAsAQ+TgYFePwb4qdeVcYDMqmwrLd77/ViXkjasP/sBMz1sQ==", "dev": true, "dependencies": { "lodash": "4.17.21", - "postman-collection": "4.5.0", + "postman-collection": "4.4.0", "teleport-javascript": "1.0.0", - "uvm": "3.0.0" + "uvm": "2.1.1" }, "engines": { - "node": ">=16" + "node": ">=10" } }, "node_modules/postman-url-encoder": { @@ -11002,16 +11209,16 @@ } }, "node_modules/prebuild-install": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.2.tgz", - "integrity": "sha512-UnNke3IQb6sgarcZIDU3gbMeTp/9SSU1DAIkil7PrqG1vZlBtY5msYccSKSHDqa3hNg436IXK+SNImReuA1wEQ==", + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", + "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", "dependencies": { "detect-libc": "^2.0.0", "expand-template": "^2.0.3", "github-from-package": "0.0.0", "minimist": "^1.2.3", "mkdirp-classic": "^0.5.3", - "napi-build-utils": "^1.0.1", + "napi-build-utils": "^2.0.0", "node-abi": "^3.3.0", "pump": "^3.0.0", "rc": "^1.2.7", @@ -11064,9 +11271,9 @@ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" }, "node_modules/process-on-spawn": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.0.0.tgz", - "integrity": "sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/process-on-spawn/-/process-on-spawn-1.1.0.tgz", + "integrity": "sha512-JOnOPQ/8TZgjs1JIH/m9ni7FfimjNa/PRx7y/Wb5qdItsnhO0jE4AT7fC0HjC28DUQWDr50dwSYZLdRMlqDq3Q==", "dev": true, "dependencies": { "fromentries": "^1.2.0" @@ -11140,9 +11347,9 @@ "dev": true }, "node_modules/protobufjs": { - "version": "7.5.0", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.0.tgz", - "integrity": "sha512-Z2E/kOY1QjoMlCytmexzYfDm/w5fKAiRwpSzGtdnXW1zC88Z2yXazHHrOtwCzn+7wSxyE8PYM4rvVcMphF9sOA==", + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.3.tgz", + "integrity": "sha512-sildjKwVqOI2kmFDiXQ6aEB0fjYTafpEvIBs8tOR8qI4spuL9OPROLVu2qZqi/xgCfsHIwVqlaF8JBjWFHnKbw==", "hasInstallScript": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", @@ -11175,43 +11382,31 @@ } }, "node_modules/proxy-agent": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.4.0.tgz", - "integrity": "sha512-u0piLU+nCOHMgGjRbimiXmA9kM/L9EHh3zL81xCdp7m+Y2pHIsnmbdDoEDoAz5geaonNR6q6+yOPQs6n4T6sBQ==", + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-6.5.0.tgz", + "integrity": "sha512-TmatMXdr2KlRiA2CyDu8GqR8EjahTG3aY3nXjdzFyoZbmB8hrBsTyMezhULIXKnC0jpfjlmiZ3+EaCzoInSu/A==", "optional": true, "dependencies": { - "agent-base": "^7.0.2", + "agent-base": "^7.1.2", "debug": "^4.3.4", "http-proxy-agent": "^7.0.1", - "https-proxy-agent": "^7.0.3", + "https-proxy-agent": "^7.0.6", "lru-cache": "^7.14.1", - "pac-proxy-agent": "^7.0.1", + "pac-proxy-agent": "^7.1.0", "proxy-from-env": "^1.1.0", - "socks-proxy-agent": "^8.0.2" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/proxy-agent/node_modules/agent-base": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.1.tgz", - "integrity": "sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==", - "optional": true, - "dependencies": { - "debug": "^4.3.4" + "socks-proxy-agent": "^8.0.5" }, "engines": { "node": ">= 14" } }, "node_modules/proxy-agent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -11222,32 +11417,6 @@ } } }, - "node_modules/proxy-agent/node_modules/http-proxy-agent": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", - "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", - "optional": true, - "dependencies": { - "agent-base": "^7.1.0", - "debug": "^4.3.4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/proxy-agent/node_modules/https-proxy-agent": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.4.tgz", - "integrity": "sha512-wlwpilI7YdjSkWaQ/7omYBMTliDcmCN8OLihO6I9B86g06lMyAoqgoDpV0XqoaPOKj+0DIdAvnsWfyAAhmimcg==", - "optional": true, - "dependencies": { - "agent-base": "^7.0.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, "node_modules/proxy-agent/node_modules/lru-cache": { "version": "7.18.3", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", @@ -11258,9 +11427,9 @@ } }, "node_modules/proxy-agent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "optional": true }, "node_modules/proxy-from-env": { @@ -11269,15 +11438,21 @@ "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" }, "node_modules/psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } }, "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz", + "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==", "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" @@ -11287,7 +11462,6 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, "engines": { "node": ">=6" } @@ -11316,7 +11490,6 @@ "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, "funding": [ { "type": "github", @@ -11503,12 +11676,6 @@ "node": ">=4" } }, - "node_modules/read-pkg/node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, "node_modules/read-pkg/node_modules/load-json-file": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-2.0.0.tgz", @@ -11524,18 +11691,6 @@ "node": ">=4" } }, - "node_modules/read-pkg/node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, "node_modules/read-pkg/node_modules/parse-json": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", @@ -11548,18 +11703,6 @@ "node": ">=0.10.0" } }, - "node_modules/read-pkg/node_modules/path-type": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-2.0.0.tgz", - "integrity": "sha512-dUnb5dXUf+kzhC/W/F4e5/SkluXIFf5VUHolW1Eg1irn1hGWjPGdsRcvYJ1nD6lhk8Ir7VM0bHJKsYTx8Jx9OQ==", - "dev": true, - "dependencies": { - "pify": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/read-pkg/node_modules/pify": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", @@ -11569,15 +11712,6 @@ "node": ">=0.10.0" } }, - "node_modules/read-pkg/node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, "node_modules/read-pkg/node_modules/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", @@ -11618,16 +11752,40 @@ "node": ">= 12.13.0" } }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", + "integrity": "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/regexp.prototype.flags": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", - "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", + "integrity": "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-errors": "^1.3.0", - "set-function-name": "^2.0.1" + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2" }, "engines": { "node": ">= 0.4" @@ -11669,6 +11827,14 @@ "node": ">=0.10.0" } }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/require-in-the-middle": { "version": "7.5.2", "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-7.5.2.tgz", @@ -11683,9 +11849,9 @@ } }, "node_modules/require-in-the-middle/node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dependencies": { "ms": "^2.1.3" }, @@ -11738,17 +11904,20 @@ "dev": true }, "node_modules/resolve": { - "version": "1.22.8", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", - "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", "dependencies": { - "is-core-module": "^2.13.0", + "is-core-module": "^2.16.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -11789,30 +11958,98 @@ "resolved": "https://registry.npmjs.org/retry-as-promised/-/retry-as-promised-7.0.4.tgz", "integrity": "sha512-XgmCoxKWkDofwH8WddD0w85ZfqYz+ZHlr5yo+3YUCfycWawU56T5ckWXsScsj5B8tqUcIG67DxXByo3VUgiAdA==" }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, "node_modules/rfc4648": { "version": "1.5.4", "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.5.4.tgz", "integrity": "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg==" }, - "node_modules/roarr": { - "version": "2.15.4", - "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", - "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", - "dev": true, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "devOptional": true, "dependencies": { - "boolean": "^3.0.1", - "detect-node": "^2.0.4", - "globalthis": "^1.0.1", - "json-stringify-safe": "^5.0.1", - "semver-compare": "^1.0.0", - "sprintf-js": "^1.1.2" + "glob": "^7.1.3" }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/run-async": { - "version": "2.4.1", + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "devOptional": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "devOptional": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "devOptional": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/roarr": { + "version": "2.15.4", + "resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz", + "integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==", + "dev": true, + "dependencies": { + "boolean": "^3.0.1", + "detect-node": "^2.0.4", + "globalthis": "^1.0.1", + "json-stringify-safe": "^5.0.1", + "semver-compare": "^1.0.0", + "sprintf-js": "^1.1.2" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/run-async": { + "version": "2.4.1", "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", "dev": true, @@ -11824,7 +12061,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, "funding": [ { "type": "github", @@ -11856,14 +12092,15 @@ } }, "node_modules/safe-array-concat": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", - "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.3.tgz", + "integrity": "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", - "get-intrinsic": "^1.2.4", - "has-symbols": "^1.0.3", + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", "isarray": "^2.0.5" }, "engines": { @@ -11898,15 +12135,37 @@ } ] }, + "node_modules/safe-push-apply": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/safe-push-apply/-/safe-push-apply-1.0.0.tgz", + "integrity": "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + "dev": true, + "dependencies": { + "es-errors": "^1.3.0", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-push-apply/node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, "node_modules/safe-regex-test": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", - "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bound": "^1.0.2", "es-errors": "^1.3.0", - "is-regex": "^1.1.4" + "is-regex": "^1.2.1" }, "engines": { "node": ">= 0.4" @@ -11916,9 +12175,9 @@ } }, "node_modules/safe-stable-stringify": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.4.3.tgz", - "integrity": "sha512-e2bDA2WJT0wxseVd4lsDP4+3ONX6HpMXQa1ZhFQ7SU+GjvORCmShbCMltrtIDfkYhVHrOcPtj+KhmDBdPdZD1g==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/safe-stable-stringify/-/safe-stable-stringify-2.5.0.tgz", + "integrity": "sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==", "engines": { "node": ">=10" } @@ -12080,19 +12339,15 @@ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", "dev": true }, - "node_modules/sequelize-cli/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "node_modules/sequelize-cli/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", "dev": true, "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" } }, "node_modules/sequelize-cli/node_modules/umzug": { @@ -12107,13 +12362,22 @@ "node": ">=6.0.0" } }, - "node_modules/sequelize-cli/node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "node_modules/sequelize-cli/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, "engines": { - "node": ">= 10.0.0" + "node": ">=10" } }, "node_modules/sequelize-pool": { @@ -12125,11 +12389,11 @@ } }, "node_modules/sequelize/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -12141,9 +12405,9 @@ } }, "node_modules/sequelize/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/sequelize/node_modules/uuid": { "version": "8.3.2", @@ -12234,6 +12498,7 @@ "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", @@ -12261,6 +12526,20 @@ "node": ">= 0.4" } }, + "node_modules/set-proto": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/set-proto/-/set-proto-1.0.0.tgz", + "integrity": "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + "dev": true, + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -12298,14 +12577,65 @@ "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==" }, "node_modules/side-channel": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", - "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", "dependencies": { - "call-bind": "^1.0.7", "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.4", - "object-inspect": "^1.13.1" + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -12422,9 +12752,9 @@ } }, "node_modules/snyk": { - "version": "1.1291.0", - "resolved": "https://registry.npmjs.org/snyk/-/snyk-1.1291.0.tgz", - "integrity": "sha512-CNm2VGBLMACNfmPcM1ByF9tpGlJUL7AlPFpwqqVKlLNnFIQk6o7EjmYJtQZzV6xbBy3+h2jWVh/OwfhFV/BeFg==", + "version": "1.1297.1", + "resolved": "https://registry.npmjs.org/snyk/-/snyk-1.1297.1.tgz", + "integrity": "sha512-l4bQPu90DvIKHs5h4aCo6ie4WwFq5pEB9IrLHONJzRwCY7ukT/z7rj0abYxR+aot/tnsQXcM/LgGIzIyy+3DVw==", "dev": true, "hasInstallScript": true, "dependencies": { @@ -12439,9 +12769,9 @@ } }, "node_modules/socks": { - "version": "2.8.3", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.3.tgz", - "integrity": "sha512-l5x7VUUWbjVFbafGLxPWkYsHIhEvmF85tbIeFZWc8ZPtoMyybuEhL7Jye/ooC4/d48FgOjSJXgsF/AJPYCW8Zw==", + "version": "2.8.5", + "resolved": "https://registry.npmjs.org/socks/-/socks-2.8.5.tgz", + "integrity": "sha512-iF+tNDQla22geJdTyJB1wM/qrX9DMRwWrciEPwWLPRWAUEM8sQiyxgckLxWT1f7+9VabJS0jTGGr4QgBuvi6Ww==", "optional": true, "dependencies": { "ip-address": "^9.0.5", @@ -12466,19 +12796,10 @@ "node": ">= 14" } }, - "node_modules/socks-proxy-agent/node_modules/agent-base": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", - "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", - "optional": true, - "engines": { - "node": ">= 14" - } - }, "node_modules/socks-proxy-agent/node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "optional": true, "dependencies": { "ms": "^2.1.3" @@ -12499,9 +12820,9 @@ "optional": true }, "node_modules/sonic-boom": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.0.1.tgz", - "integrity": "sha512-hTSD/6JMLyT4r9zeof6UtuBDpjJ9sO08/nmS5djaA9eozT9oOlNdpXSnzcgj4FTqpk3nkLrs61l4gip9r1HCrQ==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.2.0.tgz", + "integrity": "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww==", "dependencies": { "atomic-sleep": "^1.0.0" } @@ -12512,486 +12833,133 @@ "integrity": "sha512-2sqgzeFlid6N4Z2fUQ1cvFmTOLRi/sEDzSQ0OKYchqgoPmQBVyM3959qYx3fpS6Esef80KjmpgPeEr028dP3OA==" }, "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "devOptional": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/spawn-wrap": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", - "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", - "dev": true, - "dependencies": { - "foreground-child": "^2.0.0", - "is-windows": "^1.0.2", - "make-dir": "^3.0.0", - "rimraf": "^3.0.0", - "signal-exit": "^3.0.2", - "which": "^2.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/spawn-wrap/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/spawn-wrap/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/spawn-wrap/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/spawn-wrap/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/spdx-correct": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", - "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", - "dev": true, - "dependencies": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-exceptions": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", - "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", - "dev": true - }, - "node_modules/spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "dependencies": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "node_modules/spdx-license-ids": { - "version": "3.0.17", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz", - "integrity": "sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg==", - "dev": true - }, - "node_modules/split2": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", - "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", - "engines": { - "node": ">= 10.x" - } - }, - "node_modules/sprintf-js": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", - "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", - "devOptional": true - }, - "node_modules/sqlite3": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.1.7.tgz", - "integrity": "sha512-GGIyOiFaG+TUra3JIfkI/zGP8yZYLPQ0pl1bH+ODjiX57sPhrLU5sQJn1y9bDKZUFYkX1crlrPfSYt0BKKdkog==", - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^7.0.0", - "prebuild-install": "^7.1.1", - "tar": "^6.1.11" - }, - "optionalDependencies": { - "node-gyp": "8.x" - }, - "peerDependencies": { - "node-gyp": "8.x" - }, - "peerDependenciesMeta": { - "node-gyp": { - "optional": true - } - } - }, - "node_modules/sqlite3/node_modules/@npmcli/fs": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-1.1.1.tgz", - "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", - "optional": true, - "dependencies": { - "@gar/promisify": "^1.0.1", - "semver": "^7.3.5" - } - }, - "node_modules/sqlite3/node_modules/@npmcli/move-file": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@npmcli/move-file/-/move-file-1.1.2.tgz", - "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", - "deprecated": "This functionality has been moved to @npmcli/fs", - "optional": true, - "dependencies": { - "mkdirp": "^1.0.4", - "rimraf": "^3.0.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/sqlite3/node_modules/@tootallnate/once": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-1.1.2.tgz", - "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", - "optional": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/sqlite3/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "optional": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/sqlite3/node_modules/cacache": { - "version": "15.3.0", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-15.3.0.tgz", - "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", - "optional": true, - "dependencies": { - "@npmcli/fs": "^1.0.0", - "@npmcli/move-file": "^1.0.1", - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "glob": "^7.1.4", - "infer-owner": "^1.0.4", - "lru-cache": "^6.0.0", - "minipass": "^3.1.1", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.2", - "mkdirp": "^1.0.3", - "p-map": "^4.0.0", - "promise-inflight": "^1.0.1", - "rimraf": "^3.0.2", - "ssri": "^8.0.1", - "tar": "^6.0.2", - "unique-filename": "^1.1.1" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/sqlite3/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "optional": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/sqlite3/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "optional": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/sqlite3/node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "optional": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/sqlite3/node_modules/http-proxy-agent": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", - "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", - "optional": true, - "dependencies": { - "@tootallnate/once": "1", - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/sqlite3/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "optional": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/sqlite3/node_modules/make-fetch-happen": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", - "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", - "optional": true, - "dependencies": { - "agentkeepalive": "^4.1.3", - "cacache": "^15.2.0", - "http-cache-semantics": "^4.1.0", - "http-proxy-agent": "^4.0.1", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^6.0.0", - "minipass": "^3.1.3", - "minipass-collect": "^1.0.2", - "minipass-fetch": "^1.3.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.2", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^6.0.0", - "ssri": "^8.0.0" - }, - "engines": { - "node": ">= 10" - } - }, - "node_modules/sqlite3/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "optional": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "devOptional": true, "engines": { - "node": "*" + "node": ">=0.10.0" } }, - "node_modules/sqlite3/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", - "optional": true, + "node_modules/spawn-wrap": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/spawn-wrap/-/spawn-wrap-2.0.0.tgz", + "integrity": "sha512-EeajNjfN9zMnULLwhZZQU3GWBoFNkbngTUPfaawT4RkMiviTxcX0qfhVbGey39mfctfDHkWtuecgQ8NJcyQWHg==", + "dev": true, "dependencies": { - "yallist": "^4.0.0" + "foreground-child": "^2.0.0", + "is-windows": "^1.0.2", + "make-dir": "^3.0.0", + "rimraf": "^3.0.0", + "signal-exit": "^3.0.2", + "which": "^2.0.1" }, "engines": { "node": ">=8" } }, - "node_modules/sqlite3/node_modules/minipass-fetch": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-1.4.1.tgz", - "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", - "optional": true, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dev": true, "dependencies": { - "minipass": "^3.1.0", - "minipass-sized": "^1.0.3", - "minizlib": "^2.0.0" - }, - "engines": { - "node": ">=8" - }, - "optionalDependencies": { - "encoding": "^0.1.12" + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" } }, - "node_modules/sqlite3/node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "optional": true, - "bin": { - "mkdirp": "bin/cmd.js" - }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "dev": true + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.21", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.21.tgz", + "integrity": "sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg==", + "dev": true + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", "engines": { - "node": ">=10" + "node": ">= 10.x" } }, - "node_modules/sqlite3/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "optional": true + "node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "devOptional": true }, - "node_modules/sqlite3/node_modules/node-gyp": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-8.4.1.tgz", - "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", - "optional": true, + "node_modules/sqlite3": { + "version": "5.1.7", + "resolved": "https://registry.npmjs.org/sqlite3/-/sqlite3-5.1.7.tgz", + "integrity": "sha512-GGIyOiFaG+TUra3JIfkI/zGP8yZYLPQ0pl1bH+ODjiX57sPhrLU5sQJn1y9bDKZUFYkX1crlrPfSYt0BKKdkog==", + "hasInstallScript": true, "dependencies": { - "env-paths": "^2.2.0", - "glob": "^7.1.4", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^9.1.0", - "nopt": "^5.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" + "bindings": "^1.5.0", + "node-addon-api": "^7.0.0", + "prebuild-install": "^7.1.1", + "tar": "^6.1.11" }, - "bin": { - "node-gyp": "bin/node-gyp.js" + "optionalDependencies": { + "node-gyp": "8.x" }, - "engines": { - "node": ">= 10.12.0" + "peerDependencies": { + "node-gyp": "8.x" + }, + "peerDependenciesMeta": { + "node-gyp": { + "optional": true + } } }, - "node_modules/sqlite3/node_modules/nopt": { + "node_modules/sqlite3/node_modules/minipass": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", - "optional": true, - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - }, + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", "engines": { - "node": ">=6" + "node": ">=8" } }, - "node_modules/sqlite3/node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "optional": true, - "dependencies": { - "glob": "^7.1.3" - }, + "node_modules/sqlite3/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/sqlite3/node_modules/socks-proxy-agent": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", - "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", - "optional": true, - "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" + "mkdirp": "bin/cmd.js" }, "engines": { - "node": ">= 10" + "node": ">=10" } }, - "node_modules/sqlite3/node_modules/ssri": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", - "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", - "optional": true, + "node_modules/sqlite3/node_modules/tar": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", "dependencies": { - "minipass": "^3.1.1" + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" }, "engines": { - "node": ">= 8" - } - }, - "node_modules/sqlite3/node_modules/unique-filename": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", - "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", - "optional": true, - "dependencies": { - "unique-slug": "^2.0.0" - } - }, - "node_modules/sqlite3/node_modules/unique-slug": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", - "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", - "optional": true, - "dependencies": { - "imurmurhash": "^0.1.4" + "node": ">=10" } }, "node_modules/sqlstring": { @@ -13033,6 +13001,18 @@ "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==", "dev": true }, + "node_modules/ssri": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/ssri/-/ssri-8.0.1.tgz", + "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", + "optional": true, + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/stack-trace": { "version": "0.0.9", "resolved": "https://registry.npmjs.org/stack-trace/-/stack-trace-0.0.9.tgz", @@ -13077,15 +13057,6 @@ "pkg-conf": "^2.0.0" } }, - "node_modules/standard-engine/node_modules/get-stdin": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-6.0.0.tgz", - "integrity": "sha512-jp4tHawyV7+fkkSKyvjuLZswblUtz+SQKzSWnBbii16BuZksJlU1wuBYXY75r+duh/llF1ur6oNwi+2ZzjKZ7g==", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/standard/node_modules/acorn": { "version": "6.4.2", "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", @@ -13129,9 +13100,9 @@ } }, "node_modules/standard/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -13196,6 +13167,7 @@ "version": "5.4.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.4.0.tgz", "integrity": "sha512-UIpL91XGex3qtL6qwyCQJar2j3osKxK9e3ano3OcGEIRM4oWIpCkDg9x95AXEC2wMs7PnxzOkPZ2gq+tsMS9yg==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", "dev": true, "dependencies": { "ajv": "^6.5.0", @@ -13409,6 +13381,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -13553,6 +13526,7 @@ "version": "2.6.3", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, "dependencies": { "glob": "^7.1.3" @@ -13662,6 +13636,19 @@ "node": ">= 0.8" } }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/stream-buffers": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.3.tgz", @@ -13734,15 +13721,18 @@ } }, "node_modules/string.prototype.trim": { - "version": "1.2.9", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", - "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", + "version": "1.2.10", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.10.tgz", + "integrity": "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", "define-properties": "^1.2.1", - "es-abstract": "^1.23.0", - "es-object-atoms": "^1.0.0" + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -13752,15 +13742,19 @@ } }, "node_modules/string.prototype.trimend": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", - "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.9.tgz", + "integrity": "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -13858,12 +13852,12 @@ } }, "node_modules/superagent/node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "dependencies": { - "ms": "2.1.2" + "ms": "^2.1.3" }, "engines": { "node": ">=6.0" @@ -13875,13 +13869,13 @@ } }, "node_modules/superagent/node_modules/formidable": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/formidable/-/formidable-2.1.2.tgz", - "integrity": "sha512-CM3GuJ57US06mlpQ47YcunuUZ9jpm8Vx+P2CGt2j7HpgkKZO/DJYQ0Bobim8G6PFQmK5lOqOOdUXboU+h73A4g==", + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/formidable/-/formidable-2.1.5.tgz", + "integrity": "sha512-Oz5Hwvwak/DCaXVVUtPn4oLMLLy1CdclLKO1LFgU7XzDpVMUU5UjlSLpGMocyQNNk8F6IJW9M/YdooSn2MRI+Q==", "dev": true, "dependencies": { + "@paralleldrive/cuid2": "^2.2.2", "dezalgo": "^1.0.4", - "hexoid": "^1.0.0", "once": "^1.4.0", "qs": "^6.11.0" }, @@ -13902,9 +13896,9 @@ } }, "node_modules/superagent/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", "dev": true }, "node_modules/superagent/node_modules/once": { @@ -14098,26 +14092,26 @@ "node": ">=4" } }, - "node_modules/tar": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", - "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" }, "engines": { - "node": ">=10" + "node": ">=18" } }, "node_modules/tar-fs": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz", - "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.3.tgz", + "integrity": "sha512-090nwYJDmlhwFwEW3QQl+vaNnxsO2yVsd45eTKRBzSzu+hlb1w2K9inVq5b0ngXuLVqQ4ApvsUHHnu/zQNkWAg==", "dependencies": { "chownr": "^1.1.1", "mkdirp-classic": "^0.5.2", @@ -14166,37 +14160,39 @@ "safe-buffer": "~5.2.0" } }, - "node_modules/tar/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dependencies": { - "minipass": "^3.0.0" - }, + "node_modules/tar/node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", "engines": { - "node": ">= 8" + "node": ">=18" } }, - "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/tar/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tar/node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", "dependencies": { - "yallist": "^4.0.0" + "minipass": "^7.1.2" }, "engines": { - "node": ">=8" + "node": ">= 18" } }, - "node_modules/tar/node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, + "node_modules/tar/node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", "engines": { - "node": ">=10" + "node": ">=18" } }, "node_modules/tcp-port-used": { @@ -14253,9 +14249,9 @@ } }, "node_modules/test-exclude/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "dev": true, "dependencies": { "balanced-match": "^1.0.0", @@ -14266,6 +14262,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -14301,9 +14298,9 @@ "dev": true }, "node_modules/thread-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.0.0.tgz", - "integrity": "sha512-oUIFjxaUT6knhPtWgDMc29zF1FcSl0yXpapkyrQrCGEfYA2HUZXCilUtKyYIv6HkCyqSPAMkY+EG0GbyIrNDQg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/thread-stream/-/thread-stream-3.1.0.tgz", + "integrity": "sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==", "dependencies": { "real-require": "^0.2.0" } @@ -14315,13 +14312,16 @@ "dev": true }, "node_modules/timers-ext": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.7.tgz", - "integrity": "sha512-b85NUNzTSdodShTIbky6ZF02e8STtVVfD+fu4aXXShEELpozH+bCpJLYMPZbsABN2wDH7fJpqIoXxJpzbf0NqQ==", + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.8.tgz", + "integrity": "sha512-wFH7+SEAcKfJpfLPkrgMPvvwnEtj8W4IurvEyrKsDleXnKLCDw71w8jltvfLa8Rm4qQxxT4jmDBYbJG/z7qoww==", "dev": true, "dependencies": { - "es5-ext": "~0.10.46", - "next-tick": "1" + "es5-ext": "^0.10.64", + "next-tick": "^1.1.0" + }, + "engines": { + "node": ">=0.12" } }, "node_modules/tmp": { @@ -14340,7 +14340,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -14362,9 +14361,9 @@ "integrity": "sha512-OsLcGGbYF3rMjPUf8oKktyvCiUxSbqMMS39m33MAjLTC1DVIH6x3WSt63/M77ihI09+Sdfk1AXvfhCEeUmC7mg==" }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==" }, "node_modules/tunnel-agent": { "version": "0.6.0", @@ -14384,9 +14383,9 @@ "dev": true }, "node_modules/type": { - "version": "2.7.2", - "resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz", - "integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==", + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/type/-/type-2.7.3.tgz", + "integrity": "sha512-8j+1QmAbPvLZow5Qpi6NCaN8FB60p/6x8/vfNqOk/hC+HuvFZhL4+WfekuhQLiqFZXOgQdrs3B+XxEmCc6b3FQ==", "dev": true }, "node_modules/type-check": { @@ -14432,30 +14431,30 @@ } }, "node_modules/typed-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", - "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.3.tgz", + "integrity": "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bound": "^1.0.3", "es-errors": "^1.3.0", - "is-typed-array": "^1.1.13" + "is-typed-array": "^1.1.14" }, "engines": { "node": ">= 0.4" } }, "node_modules/typed-array-byte-length": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", - "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.3.tgz", + "integrity": "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", "dev": true, "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-proto": "^1.0.3", - "is-typed-array": "^1.1.13" + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14" }, "engines": { "node": ">= 0.4" @@ -14465,17 +14464,18 @@ } }, "node_modules/typed-array-byte-offset": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", - "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.4.tgz", + "integrity": "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.7", - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-proto": "^1.0.3", - "is-typed-array": "^1.1.13" + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9" }, "engines": { "node": ">= 0.4" @@ -14485,17 +14485,17 @@ } }, "node_modules/typed-array-length": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", - "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.7.tgz", + "integrity": "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", "dev": true, "dependencies": { "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-proto": "^1.0.3", "is-typed-array": "^1.1.13", - "possible-typed-array-names": "^1.0.0" + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6" }, "engines": { "node": ">= 0.4" @@ -14527,9 +14527,9 @@ } }, "node_modules/uglify-js": { - "version": "3.19.2", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.2.tgz", - "integrity": "sha512-S8KA6DDI47nQXJSi2ctQ629YzwOVs+bQML6DAtvy0wgNdpi+0ySpQK0g2pxBq2xfF2z3YCscu7NNA8nXT9PlIQ==", + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", "dev": true, "optional": true, "bin": { @@ -14551,13 +14551,13 @@ } }, "node_modules/umzug": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/umzug/-/umzug-3.7.0.tgz", - "integrity": "sha512-r/L2Zlilgv3SKhmP2nkA9x2Xi1PKtu2K34/i/s7AYJ2mLjEO+IxETJAK7CKf6l3QOvoy5/ChykeX9qt6ykRz6Q==", + "version": "3.8.2", + "resolved": "https://registry.npmjs.org/umzug/-/umzug-3.8.2.tgz", + "integrity": "sha512-BEWEF8OJjTYVC56GjELeHl/1XjFejrD7aHzn+HldRJTx+pL1siBrKHZC8n4K/xL3bEzVA9o++qD1tK2CpZu4KA==", "dependencies": { "@rushstack/ts-command-line": "^4.12.2", "emittery": "^0.13.0", - "glob": "^8.0.3", + "fast-glob": "^3.3.2", "pony-cause": "^2.1.4", "type-fest": "^4.0.0" }, @@ -14566,9 +14566,9 @@ } }, "node_modules/umzug/node_modules/type-fest": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.18.2.tgz", - "integrity": "sha512-+suCYpfJLAe4OXS6+PPXjW3urOS4IoP9waSiLuXfLgqZODKw/aWwASvzqE886wA0kQgGy0mIWyhd87VpqIy6Xg==", + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", "engines": { "node": ">=16" }, @@ -14577,15 +14577,18 @@ } }, "node_modules/unbox-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", - "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.1.0.tgz", + "integrity": "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", + "call-bound": "^1.0.3", "has-bigints": "^1.0.2", - "has-symbols": "^1.0.3", - "which-boxed-primitive": "^1.0.2" + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -14597,9 +14600,9 @@ "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==" }, "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==" + "version": "7.8.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz", + "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==" }, "node_modules/uniq": { "version": "1.0.1", @@ -14607,6 +14610,24 @@ "integrity": "sha512-Gw+zz50YNKPDKXs+9d+aKAjVwpjNwqzvNpLigIruT4HA9lMZNdMqs9x07kKHB/L9WRzqp4+DlTU5s4wG2esdoA==", "dev": true }, + "node_modules/unique-filename": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "optional": true, + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/unique-slug": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "optional": true, + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", @@ -14625,9 +14646,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.16", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.16.tgz", - "integrity": "sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", "dev": true, "funding": [ { @@ -14644,8 +14665,8 @@ } ], "dependencies": { - "escalade": "^3.1.2", - "picocolors": "^1.0.1" + "escalade": "^3.2.0", + "picocolors": "^1.1.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -14658,7 +14679,6 @@ "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, "dependencies": { "punycode": "^2.1.0" } @@ -14712,17 +14732,23 @@ } }, "node_modules/uvm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/uvm/-/uvm-3.0.0.tgz", - "integrity": "sha512-dATVpxsNfFBpHNdq6sy4/CV2UnoRbV8tvvkK0VrUPnm+o7dK6fnir4LEm8czeDdpbw2KKDKjIPcRSZY4AEwEZA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/uvm/-/uvm-2.1.1.tgz", + "integrity": "sha512-BZ5w8adTpNNr+zczOBRpaX/hH8UPKAf7fmCnidrcsqt3bn8KT9bDIfuS7hgRU9RXgiN01su2pwysBONY6w8W5w==", "dev": true, "dependencies": { - "flatted": "3.3.1" + "flatted": "3.2.6" }, "engines": { - "node": ">=16" + "node": ">=10" } }, + "node_modules/uvm/node_modules/flatted": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.6.tgz", + "integrity": "sha512-0sQoMh9s0BYsm+12Huy/rkKxVu4R1+r96YX5cG44rHV0pQ6iC3Q+mkoMFaGWObMFYQxCVT+ssG1ksneA2MI9KQ==", + "dev": true + }, "node_modules/validate-npm-package-license": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", @@ -14734,9 +14760,9 @@ } }, "node_modules/validator": { - "version": "13.12.0", - "resolved": "https://registry.npmjs.org/validator/-/validator-13.12.0.tgz", - "integrity": "sha512-c1Q0mCiPlgdTVVVIJIrBuxNicYE+t/7oKeI9MWLj3fh/uq2Pxh/3eeWbVZ4OcGW1TUf53At0njHw5SMdA3tmMg==", + "version": "13.15.15", + "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.15.tgz", + "integrity": "sha512-BgWVbCI72aIQy937xbawcs+hrVaN/CZ2UwutgaJ36hGqRrLNM+f5LUT/YPRbo8IV/ASeFzXszezV+y2+rq3l8A==", "engines": { "node": ">= 0.10" } @@ -14785,16 +14811,70 @@ } }, "node_modules/which-boxed-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.1.1.tgz", + "integrity": "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + "dev": true, + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.2.1.tgz", + "integrity": "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + "dev": true, + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-builtin-type/node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + "dev": true + }, + "node_modules/which-collection": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", "dev": true, "dependencies": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -14807,15 +14887,17 @@ "dev": true }, "node_modules/which-typed-array": { - "version": "1.1.15", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", - "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", + "version": "1.1.19", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", "dev": true, "dependencies": { "available-typed-arrays": "^1.0.7", - "call-bind": "^1.0.7", - "for-each": "^0.3.3", - "gopd": "^1.0.1", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" }, "engines": { @@ -14872,17 +14954,16 @@ "dev": true }, "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/wrap-ansi?sponsor=1" @@ -14906,68 +14987,6 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -15088,20 +15107,20 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dependencies": { - "cliui": "^7.0.2", + "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "string-width": "^4.2.0", + "string-width": "^4.2.3", "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" + "yargs-parser": "^21.1.1" }, "engines": { - "node": ">=10" + "node": ">=12" } }, "node_modules/yargs-parser": { @@ -15151,6 +15170,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/yargs/node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, "node_modules/yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", @@ -15172,34 +15199,6 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } - }, - "node_modules/z-schema": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/z-schema/-/z-schema-5.0.5.tgz", - "integrity": "sha512-D7eujBWkLa3p2sIpJA0d1pr7es+a7m0vFAnZLlCEKq/Ij2k0MLi9Br2UPxoxdYystm5K1yeBGzub0FlYUEWj2Q==", - "dependencies": { - "lodash.get": "^4.4.2", - "lodash.isequal": "^4.5.0", - "validator": "^13.7.0" - }, - "bin": { - "z-schema": "bin/z-schema" - }, - "engines": { - "node": ">=8.0.0" - }, - "optionalDependencies": { - "commander": "^9.4.1" - } - }, - "node_modules/z-schema/node_modules/commander": { - "version": "9.5.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz", - "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==", - "optional": true, - "engines": { - "node": "^12.20.0 || >=14" - } } } } diff --git a/package.json b/package.json index 9660f935..d5061e20 100644 --- a/package.json +++ b/package.json @@ -57,9 +57,16 @@ "dependencies": { "@datasance/ecn-viewer": "0.5.4", "@kubernetes/client-node": "^0.22.3", + "@msgpack/msgpack": "^3.1.2", + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", + "@opentelemetry/instrumentation-express": "^0.48.1", + "@opentelemetry/instrumentation-http": "^0.200.0", + "@opentelemetry/resources": "^1.8.0", + "@opentelemetry/sdk-node": "^0.200.0", "axios": "1.8.4", - "body-parser": "^1.20.3", "bignumber.js": "^9.3.0", + "body-parser": "^1.20.3", "child_process": "1.0.2", "command-line-args": "5.2.1", "command-line-usage": "7.0.3", @@ -103,15 +110,10 @@ "sqlite3": "^5.1.7", "string-format": "2.0.0", "umzug": "^3.7.0", - "uuid": "11.1.0", "underscore": "1.13.6", - "xss-clean": "0.1.1", - "@opentelemetry/api": "^1.9.0", - "@opentelemetry/resources": "^1.8.0", - "@opentelemetry/sdk-node": "^0.200.0", - "@opentelemetry/exporter-trace-otlp-http": "^0.200.0", - "@opentelemetry/instrumentation-http": "^0.200.0", - "@opentelemetry/instrumentation-express": "^0.48.1" + "uuid": "11.1.0", + "ws": "^8.18.0", + "xss-clean": "0.1.1" }, "devDependencies": { "acorn": "8.11.3", @@ -119,18 +121,19 @@ "chai": "5.1.1", "chai-as-promised": "7.1.2", "chai-http": "4.4.0", - "eslint": "9.16.0", + "eslint": "9.28.0", "eslint-config-google": "0.14.0", "mocha": "10.6.0", "mocha-junit-reporter": "2.2.1", - "newman": "^6.2.0", + "newman": "^6.2.1", "newman-reporter-junitfull": "1.1.1", "nyc": "15.1.0", "sequelize-cli": "6.6.2", "sinon": "17.0.1", "sinon-chai": "3.7.0", "snyk": "^1.1291.0", - "standard": "12.0.1" + "standard": "12.0.1", + "js-yaml": "^4.1.0" }, "files": [ "/scripts", @@ -145,6 +148,11 @@ "overrides": { "@kubernetes/client-node": { "request": "@cypress/request@3.0.8" + }, + "sqlite3": { + "prebuild-install": { + "tar-fs": "^2.1.3" + } } } } diff --git a/scripts/generate-swagger.js b/scripts/generate-swagger.js new file mode 100644 index 00000000..e04a61ee --- /dev/null +++ b/scripts/generate-swagger.js @@ -0,0 +1,778 @@ +const fs = require('fs') +const path = require('path') +const yaml = require('js-yaml') + +// Base Swagger configuration +const swaggerBase = { + openapi: '3.0.0', + info: { + title: 'Datasance PoT Controller', + version: '3.5.0', + description: 'Datasance PoT Controller REST API Documentation' + }, + servers: [ + { + url: 'http://localhost:51121/api/v3' + } + ], + tags: [ + { name: 'Controller', description: 'Manage your controller' }, + { name: 'ioFog', description: 'Manage your agents' }, + { name: 'Application', description: 'Manage your applications' }, + { name: 'Application Template', description: 'Manage your application templates' }, + { name: 'Catalog', description: 'Manage your catalog' }, + { name: 'Registries', description: 'Manage your registries' }, + { name: 'Microservices', description: 'Manage your microservices' }, + { name: 'Routing', description: 'Manage your routes' }, + { name: 'Router', description: 'Manage your Default Router' }, + { name: 'Edge Resource', description: 'Manage your Edge Resources' }, + { name: 'Diagnostics', description: 'Diagnostic your microservices' }, + { name: 'Tunnel', description: 'Manage ssh tunnels' }, + { name: 'Agent', description: 'Used by your agents to communicate with your controller' }, + { name: 'User', description: 'Manage your users' }, + { name: 'Secrets', description: 'Manage your secrets' }, + { name: 'Certificates', description: 'Manage your certificates' }, + { name: 'Services', description: 'Manage your services' }, + { name: 'VolumeMounts', description: 'Manage your volume mounts' }, + { name: 'ConfigMap', description: 'Manage your config maps' } + ], + components: { + securitySchemes: { + authToken: { + type: 'http', + scheme: 'bearer', + bearerFormat: 'JWT', + description: 'JWT token for authentication (user or agent)' + } + }, + schemas: {} + }, + security: [ + { + authToken: [] + } + ], + paths: {} +} + +// Common response headers +const commonHeaders = { + 'X-Timestamp': { + description: 'FogController server timestamp', + schema: { + type: 'number' + } + } +} + +// Map HTTP methods to response schemas +const responseSchemas = { + get: { + '200': { + description: 'Success', + headers: commonHeaders, + content: { + 'application/json': { + schema: { + type: 'object' + } + } + } + }, + '401': { + description: 'Not Authorized' + }, + '404': { + description: 'Not Found' + }, + '500': { + description: 'Internal Server Error' + } + }, + post: { + '201': { + description: 'Created', + headers: commonHeaders, + content: { + 'application/json': { + schema: { + type: 'object' + } + } + } + }, + '400': { + description: 'Bad Request' + }, + '401': { + description: 'Not Authorized' + }, + '409': { + description: 'Duplicate Name' + }, + '500': { + description: 'Internal Server Error' + } + }, + put: { + '200': { + description: 'Success', + headers: commonHeaders, + content: { + 'application/json': { + schema: { + type: 'object' + } + } + } + }, + '400': { + description: 'Bad Request' + }, + '401': { + description: 'Not Authorized' + }, + '404': { + description: 'Not Found' + }, + '500': { + description: 'Internal Server Error' + } + }, + delete: { + '204': { + description: 'Success', + headers: commonHeaders + }, + '401': { + description: 'Not Authorized' + }, + '404': { + description: 'Not Found' + }, + '500': { + description: 'Internal Server Error' + } + } +} + +// Convert JSON Schema to OpenAPI Schema +function convertJsonSchemaToOpenAPI (schema) { + if (!schema) return {} + const openAPISchema = { ...schema } + + // Remove JSON Schema specific properties + delete openAPISchema.id + delete openAPISchema.$schema + + // Remove OpenAPI-incompatible properties + delete openAPISchema.if + delete openAPISchema.then + delete openAPISchema.const + delete openAPISchema.optional + + // Handle required arrays + if (openAPISchema.required && Array.isArray(openAPISchema.required) && openAPISchema.required.length === 0) { + delete openAPISchema.required + } + + // Convert $ref to OpenAPI format + if (openAPISchema.$ref) { + const refPath = openAPISchema.$ref.replace(/^\//, '') + // Only add #/components/schemas/ if it's not already there + if (!refPath.startsWith('#/components/schemas/')) { + openAPISchema.$ref = `#/components/schemas/${refPath}` + } else { + openAPISchema.$ref = refPath + } + } + + // Handle properties + if (openAPISchema.properties) { + Object.keys(openAPISchema.properties).forEach(key => { + if (openAPISchema.properties[key].$ref) { + const refPath = openAPISchema.properties[key].$ref.replace(/^\//, '') + // Only add #/components/schemas/ if it's not already there + if (!refPath.startsWith('#/components/schemas/')) { + openAPISchema.properties[key].$ref = `#/components/schemas/${refPath}` + } else { + openAPISchema.properties[key].$ref = refPath + } + } + // Remove additional properties from nested objects + if (openAPISchema.properties[key].type === 'object') { + delete openAPISchema.properties[key].additionalProperties + } + // Handle items in arrays + if (openAPISchema.properties[key].items) { + if (openAPISchema.properties[key].items.type === 'object') { + delete openAPISchema.properties[key].items.additionalProperties + // Convert key/value pairs to properties + if (openAPISchema.properties[key].items.key && openAPISchema.properties[key].items.value) { + openAPISchema.properties[key].items = { + type: 'object', + properties: { + key: openAPISchema.properties[key].items.key, + value: openAPISchema.properties[key].items.value + } + } + } + } + // Handle array item references + if (openAPISchema.properties[key].items.$ref) { + const refPath = openAPISchema.properties[key].items.$ref.replace(/^\//, '') + // Only add #/components/schemas/ if it's not already there + if (!refPath.startsWith('#/components/schemas/')) { + openAPISchema.properties[key].items = { + $ref: `#/components/schemas/${refPath}` + } + } else { + openAPISchema.properties[key].items = { + $ref: refPath + } + } + } + } + // Handle required arrays in properties + if (openAPISchema.properties[key].required && Array.isArray(openAPISchema.properties[key].required)) { + if (openAPISchema.properties[key].required.length === 0) { + delete openAPISchema.properties[key].required + } + } + // Handle intermediateCert optional property + if (key === 'intermediateCert') { + delete openAPISchema.properties[key].optional + } + // Handle microserviceDelete additionalProperties + if (key === 'additionalProperties') { + openAPISchema.properties[key] = { + type: 'object', + additionalProperties: true + } + } + // Handle serviceCreate resource required + if (key === 'resource' && openAPISchema.properties[key].required) { + if (!Array.isArray(openAPISchema.properties[key].required)) { + openAPISchema.properties[key].required = ['cpu', 'memory'] + } + } + }) + } + + // Handle array items + if (openAPISchema.items) { + if (openAPISchema.items.$ref) { + const refPath = openAPISchema.items.$ref.replace(/^\//, '') + // Only add #/components/schemas/ if it's not already there + if (!refPath.startsWith('#/components/schemas/')) { + openAPISchema.items.$ref = `#/components/schemas/${refPath}` + } else { + openAPISchema.items.$ref = refPath + } + } + // Remove additional properties from array items + if (openAPISchema.items.type === 'object') { + delete openAPISchema.items.additionalProperties + } + } + + // Handle allOf/anyOf + if (openAPISchema.allOf) { + openAPISchema.allOf = openAPISchema.allOf.map(item => { + const converted = convertJsonSchemaToOpenAPI(item) + // Remove const from routerMode in anyOf/allOf + if (converted.properties && converted.properties.routerMode) { + delete converted.properties.routerMode.const + } + return converted + }) + } + if (openAPISchema.anyOf) { + openAPISchema.anyOf = openAPISchema.anyOf.map(item => { + const converted = convertJsonSchemaToOpenAPI(item) + // Remove const from routerMode in anyOf/allOf + if (converted.properties && converted.properties.routerMode) { + delete converted.properties.routerMode.const + } + return converted + }) + } + + // Remove additionalProperties at root level + delete openAPISchema.additionalProperties + + return openAPISchema +} + +// Convert path parameters from :param to {param} format and remove /api/v3 prefix +function convertPathParameters (path) { + // Remove /api/v3 prefix if present + const pathWithoutPrefix = path.replace(/^\/api\/v3/, '') + // Convert :param to {param} + return pathWithoutPrefix.replace(/:([^/]+)/g, '{$1}') +} + +// Get response schema based on route path and method +function getResponseSchema (path, method) { + const baseResponse = JSON.parse(JSON.stringify(responseSchemas[method] || responseSchemas.get)) + // Customize response schema based on path + if (path.includes('/application')) { + if (method === 'get') { + baseResponse['200'].content['application/json'].schema = { + type: 'object', + properties: { + applications: { + type: 'array', + items: { + type: 'object', + properties: { + name: { type: 'string' }, + description: { type: 'string' }, + version: { type: 'string' }, + microservices: { + type: 'array', + items: { + type: 'object', + properties: { + name: { type: 'string' }, + config: { type: 'object' } + } + } + } + } + } + } + } + } + } + } else if (path.includes('/microservices')) { + if (method === 'get') { + baseResponse['200'].content['application/json'].schema = { + type: 'object', + properties: { + microservices: { + type: 'array', + items: { + type: 'object', + properties: { + uuid: { type: 'string' }, + name: { type: 'string' }, + config: { type: 'object' } + } + } + } + } + } + } + } + return baseResponse +} + +// Get request body schema based on route path and method +function getRequestBodySchema (path, method) { + // Handle YAML file upload endpoints + if (path.endsWith('/yaml')) { + return { + required: true, + content: { + 'multipart/form-data': { + schema: { + type: 'object', + properties: { + application: { + type: 'string', + format: 'binary' + } + } + } + } + } + } + } + + // Map routes to their corresponding schemas based on service validations + const schemaMapping = { + // Application routes + '/application': { + post: 'applicationCreate', + put: 'applicationUpdate', + patch: 'applicationPatch' + }, + // Microservice routes + '/microservices': { + post: 'microserviceCreate', + put: 'microserviceUpdate' + }, + // Iofog routes + '/iofog': { + post: 'iofogCreate', + put: 'iofogUpdate' + }, + // Agent routes + '/agent': { + post: 'agentProvision', + put: 'updateAgentConfig' + }, + // Routing routes + '/routing': { + post: 'routingCreate', + put: 'routingUpdate' + }, + // Secret routes + '/secret': { + post: 'secretCreate', + put: 'secretUpdate' + }, + // Service routes + '/service': { + post: 'serviceCreate', + put: 'serviceUpdate' + }, + // Certificate routes + '/certificate': { + post: 'certificateCreate', + put: 'caCreate' + }, + // Config map routes + '/configMap': { + post: 'configMapCreate', + put: 'configMapUpdate' + }, + // Volume mount routes + '/volumeMount': { + post: 'volumeMountCreate', + put: 'volumeMountUpdate' + }, + // Edge resource routes + '/edgeResource': { + post: 'edgeResourceCreate', + put: 'edgeResourceUpdate' + }, + // Application template routes + '/applicationTemplate': { + post: 'applicationTemplateCreate', + put: 'applicationTemplateUpdate', + patch: 'applicationTemplatePatch' + }, + // User routes + '/user': { + post: 'login', + put: 'refresh' + }, + // Catalog routes + '/catalog': { + post: 'catalogItemCreate', + put: 'catalogItemUpdate' + }, + // Registry routes + '/registry': { + post: 'registryCreate', + put: 'registryUpdate' + }, + // Tunnel routes + '/tunnel': { + post: 'tunnelCreate' + }, + // Config routes + '/config': { + put: 'configUpdate' + } + } + + // Find the matching schema for this path and method + for (const [routePath, methods] of Object.entries(schemaMapping)) { + if (path.includes(routePath) && methods[method]) { + const schemaName = methods[method] + return { + required: true, + content: { + 'application/json': { + schema: { + $ref: `#/components/schemas/${schemaName}` + } + } + } + } + } + } + + // Default JSON request body + return { + required: true, + content: { + 'application/json': { + schema: { + type: 'object' + } + } + } + } +} + +// Find the most similar tag from the tag list +function findMostSimilarTag (path) { + const pathSegments = path.split('/').filter(Boolean) + if (pathSegments.length === 0) return 'Controller' + + // Get the first meaningful segment (skip empty strings) + const firstSegment = pathSegments[0] + + // Define tag mapping for common paths + const tagMapping = { + 'application': 'Application', + 'applications': 'Application', + 'flow': 'Application', + 'flows': 'Application', + 'template': 'Application Template', + 'templates': 'Application Template', + 'catalog': 'Catalog', + 'registry': 'Registries', + 'registries': 'Registries', + 'microservice': 'Microservices', + 'microservices': 'Microservices', + 'route': 'Routing', + 'routes': 'Routing', + 'routing': 'Routing', + 'router': 'Router', + 'edgeResource': 'Edge Resource', + 'edgeResources': 'Edge Resource', + 'edge-resource': 'Edge Resource', + 'edge-resources': 'Edge Resource', + 'edge_resource': 'Edge Resource', + 'edge_resources': 'Edge Resource', + 'diagnostic': 'Diagnostics', + 'diagnostics': 'Diagnostics', + 'tunnel': 'Tunnel', + 'tunnels': 'Tunnel', + 'agent': 'Agent', + 'agents': 'Agent', + 'user': 'User', + 'users': 'User', + 'secret': 'Secrets', + 'secrets': 'Secrets', + 'certificate': 'Certificates', + 'certificates': 'Certificates', + 'service': 'Services', + 'services': 'Services', + 'volume': 'VolumeMounts', + 'volumes': 'VolumeMounts', + 'config': 'ConfigMap', + 'configs': 'ConfigMap', + 'iofog': 'ioFog' + } + + // Try to find an exact match first (case-insensitive) + const lowerFirstSegment = firstSegment.toLowerCase() + if (tagMapping[lowerFirstSegment]) { + return tagMapping[lowerFirstSegment] + } + + // Try to find a match with different case formats + const possibleFormats = [ + firstSegment, + firstSegment.toLowerCase(), + firstSegment.toUpperCase(), + firstSegment.replace(/([A-Z])/g, '-$1').toLowerCase(), + firstSegment.replace(/([A-Z])/g, '_$1').toLowerCase(), + firstSegment.charAt(0).toUpperCase() + firstSegment.slice(1) + ] + + for (const format of possibleFormats) { + if (tagMapping[format]) { + return tagMapping[format] + } + } + + // If no exact match, try to find the most similar tag + const tagNames = swaggerBase.tags.map(tag => tag.name.toLowerCase()) + let bestMatch = 'Controller' + let bestScore = 0 + + for (const tagName of tagNames) { + // Check if the path segment contains the tag name or vice versa + if (lowerFirstSegment.includes(tagName) || tagName.includes(lowerFirstSegment)) { + const score = Math.min(lowerFirstSegment.length, tagName.length) + if (score > bestScore) { + bestScore = score + bestMatch = swaggerBase.tags.find(tag => tag.name.toLowerCase() === tagName).name + } + } + } + + return bestMatch +} + +// Process route file +function processRouteFile (filePath) { + const routeFile = require(filePath) + const paths = {} + routeFile.forEach(route => { + // Skip WebSocket endpoints + if (route.method.toLowerCase() === 'ws') { + return + } + + const originalPath = route.path + const path = convertPathParameters(originalPath) + const method = route.method.toLowerCase() + if (!paths[path]) { + paths[path] = {} + } + // Extract parameters from path + const pathParams = [] + const pathRegex = /{([^}]+)}/g + let match + while ((match = pathRegex.exec(path)) !== null) { + pathParams.push({ + name: match[1], + in: 'path', + required: true, + schema: { + type: 'string' + } + }) + } + // Create path object + paths[path][method] = { + tags: [findMostSimilarTag(path)], + summary: `${method.toUpperCase()} ${originalPath}`, + security: [{ authToken: [] }], + parameters: pathParams, + responses: getResponseSchema(path, method) + } + // Add request body for POST/PUT/PATCH + if (['post', 'put', 'patch'].includes(method)) { + paths[path][method].requestBody = getRequestBodySchema(path, method) + } + }) + return paths +} + +// Process schema file +function processSchemaFile (filePath) { + const schemaFile = require(filePath) + const schemas = {} + + // Process all inner schemas first + if (schemaFile.innerSchemas) { + schemaFile.innerSchemas.forEach(schema => { + if (schema.id) { + const schemaName = schema.id.replace(/^\//, '') + schemas[schemaName] = convertJsonSchemaToOpenAPI(schema) + } + }) + } + + // Then process main schemas + if (schemaFile.mainSchemas) { + schemaFile.mainSchemas.forEach(schema => { + if (schema.id) { + const schemaName = schema.id.replace(/^\//, '') + schemas[schemaName] = convertJsonSchemaToOpenAPI(schema) + } + }) + } + + // Handle direct schema exports + Object.keys(schemaFile).forEach(key => { + if (typeof schemaFile[key] === 'object' && schemaFile[key].type) { + const schemaName = key + schemas[schemaName] = convertJsonSchemaToOpenAPI(schemaFile[key]) + } + }) + + return schemas +} + +// Main function +function generateSwagger () { + const routesDir = path.join(__dirname, '../src/routes') + const schemasDir = path.join(__dirname, '../src/schemas') + + // First, add base schemas that are commonly referenced + const baseSchemas = { + image: { + type: 'object', + properties: { + name: { type: 'string' }, + registry: { type: 'string' } + }, + required: ['name'] + }, + volumeMappings: { + type: 'object', + properties: { + hostDestination: { type: 'string' }, + containerDestination: { type: 'string' }, + accessMode: { type: 'string' }, + type: { type: 'string', enum: ['volume', 'bind'] } + }, + required: ['hostDestination', 'containerDestination', 'accessMode'] + }, + ports: { + type: 'object', + properties: { + internal: { type: 'integer' }, + external: { type: 'integer' }, + protocol: { type: 'string', enum: ['tcp', 'udp'] } + }, + required: ['internal', 'external'] + }, + extraHosts: { + type: 'object', + properties: { + name: { type: 'string' }, + address: { type: 'string' } + }, + required: ['name', 'address'] + }, + env: { + type: 'object', + properties: { + key: { type: 'string' }, + value: { type: 'string' }, + valueFromSecret: { type: 'string' }, + valueFromConfigMap: { type: 'string' } + }, + required: ['key'], + oneOf: [ + { required: ['value'] }, + { required: ['valueFromSecret'] }, + { required: ['valueFromConfigMap'] } + ] + } + } + + // Initialize schemas with base schemas + const allSchemas = { ...baseSchemas } + + // Process all schema files + fs.readdirSync(schemasDir) + .filter(file => file.endsWith('.js')) + .forEach(file => { + const schemaDefinitions = processSchemaFile(path.join(schemasDir, file)) + Object.assign(allSchemas, schemaDefinitions) + }) + + // Add all schemas to the OpenAPI document + swaggerBase.components.schemas = allSchemas + + // Process all route files + fs.readdirSync(routesDir) + .filter(file => file.endsWith('.js')) + .forEach(file => { + const routePaths = processRouteFile(path.join(routesDir, file)) + Object.assign(swaggerBase.paths, routePaths) + }) + + // Write to YAML file + const yamlStr = yaml.dump(swaggerBase, { + noRefs: true, // Disable YAML anchors and references + lineWidth: -1, // Disable line wrapping + noCompatMode: true // Use modern YAML features + }) + fs.writeFileSync(path.join(__dirname, '../docs/swagger-test.yaml'), yamlStr) + console.log('Swagger YAML generated successfully!') +} + +generateSwagger() diff --git a/src/config/controller.yaml b/src/config/controller.yaml index 236bccf5..58f58c47 100644 --- a/src/config/controller.yaml +++ b/src/config/controller.yaml @@ -6,7 +6,23 @@ app: # Server Configuration server: port: 51121 # Server port number - devMode: true # Development mode flag + devMode: true + webSocket: + perMessageDeflate: false + allowExtensions: false # Disable all extensions + pingInterval: 30000 # Ping interval in milliseconds (30 seconds) + pongTimeout: 10000 # Pong timeout in milliseconds (10 seconds) + handshakeTimeout: 10000 # 10 seconds + maxPayload: 1048576 # 1MB + maxFrameSize: 65536 # 64KB + session: + timeout: 3600000 # Session timeout in milliseconds (1 hour) + maxConnections: 100 # Maximum connections per session + cleanupInterval: 30000 # Session cleanup interval (30 seconds) + security: + maxConnectionsPerIp: 10 + maxRequestsPerMinute: 60 + maxPayload: 1048576 # 1MB # ssl: # path: # key: "" # SSL key file path @@ -80,6 +96,9 @@ systemImages: router: "1": "ghcr.io/datasance/router:latest" "2": "ghcr.io/datasance/router:latest" + debug: + "1": "ghcr.io/datasance/node-debugger:latest" + "2": "ghcr.io/datasance/node-debugger:latest" # Diagnostics Configuration diagnostics: diff --git a/src/config/env-mapping.js b/src/config/env-mapping.js index 8418a043..b125c323 100644 --- a/src/config/env-mapping.js +++ b/src/config/env-mapping.js @@ -7,6 +7,16 @@ module.exports = { 'SERVER_PORT': 'server.port', 'SERVER_DEV_MODE': 'server.devMode', + 'WS_PING_INTERVAL': 'server.webSocket.pingInterval', + 'WS_PONG_TIMEOUT': 'server.webSocket.pongTimeout', + 'WS_MAX_PAYLOAD': 'server.webSocket.maxPayload', + 'WS_SESSION_TIMEOUT': 'server.webSocket.session.timeout', + 'WS_SESSION_MAX_CONNECTIONS': 'server.webSocket.session.maxConnections', + 'WS_CLEANUP_INTERVAL': 'server.webSocket.session.cleanupInterval', + 'WS_SECURITY_MAX_CONNECTIONS_PER_IP': 'server.webSocket.security.maxConnectionsPerIp', + 'WS_SECURITY_MAX_REQUESTS_PER_MINUTE': 'server.webSocket.security.maxRequestsPerMinute', + 'WS_SECURITY_MAX_PAYLOAD': 'server.webSocket.security.maxPayload', + // SSL Configuration 'SSL_PATH_KEY': 'server.ssl.path.key', 'SSL_PATH_CERT': 'server.ssl.path.cert', @@ -63,6 +73,8 @@ module.exports = { // System Images Configuration 'ROUTER_IMAGE_1': 'systemImages.router.1', 'ROUTER_IMAGE_2': 'systemImages.router.2', + 'DEBUG_IMAGE_1': 'systemImages.debug.1', + 'DEBUG_IMAGE_2': 'systemImages.debug.2', // Diagnostics Configuration 'DIAGNOSTICS_DIRECTORY': 'diagnostics.directory', diff --git a/src/controllers/application-controller.js b/src/controllers/application-controller.js index a0dc79bb..dc362314 100644 --- a/src/controllers/application-controller.js +++ b/src/controllers/application-controller.js @@ -49,6 +49,12 @@ const getApplicationEndPoint = async function (req) { return application } +const getSystemApplicationEndPoint = async function (req) { + const name = req.params.name + const application = await ApplicationService.getSystemApplicationEndPoint({ name }, false) + return application +} + const patchApplicationEndPoint = async function (req) { const application = req.body const name = req.params.name @@ -115,6 +121,7 @@ module.exports = { getApplicationsByUserEndPoint: (getApplicationsByUserEndPoint), getApplicationsBySystemEndPoint: (getApplicationsBySystemEndPoint), getApplicationEndPoint: (getApplicationEndPoint), + getSystemApplicationEndPoint: (getSystemApplicationEndPoint), getApplicationByIdEndPoint: (getApplicationByIdEndPoint), updateApplicationEndPoint: (updateApplicationEndPoint), updateApplicationYAMLEndPoint: (updateApplicationYAMLEndPoint), diff --git a/src/controllers/iofog-controller.js b/src/controllers/iofog-controller.js index b8f876ac..a89950f2 100644 --- a/src/controllers/iofog-controller.js +++ b/src/controllers/iofog-controller.js @@ -94,6 +94,23 @@ async function setFogPruneCommandEndPoint (req) { return FogService.setFogPruneCommandEndPoint(fog, false) } +async function enableNodeExecEndPoint (req) { + const execData = { + uuid: req.params.uuid, + image: req.body.image + } + + return FogService.enableNodeExecEndPoint(execData, false) +} + +async function disableNodeExecEndPoint (req) { + const fogData = { + uuid: req.params.uuid + } + + return FogService.disableNodeExecEndPoint(fogData, false) +} + module.exports = { createFogEndPoint: (createFogEndPoint), updateFogEndPoint: (updateFogEndPoint), @@ -105,5 +122,7 @@ module.exports = { setFogRebootCommandEndPoint: (setFogRebootCommandEndPoint), getHalHardwareInfoEndPoint: (getHalHardwareInfoEndPoint), getHalUsbInfoEndPoint: (getHalUsbInfoEndPoint), - setFogPruneCommandEndPoint: (setFogPruneCommandEndPoint) + setFogPruneCommandEndPoint: (setFogPruneCommandEndPoint), + enableNodeExecEndPoint: (enableNodeExecEndPoint), + disableNodeExecEndPoint: (disableNodeExecEndPoint) } diff --git a/src/controllers/microservices-controller.js b/src/controllers/microservices-controller.js index fc8418c2..c599fbb5 100644 --- a/src/controllers/microservices-controller.js +++ b/src/controllers/microservices-controller.js @@ -32,6 +32,11 @@ const getMicroserviceEndPoint = async function (req) { return MicroservicesService.getMicroserviceEndPoint(microserviceUuid, false) } +const getSystemMicroserviceEndPoint = async function (req) { + const microserviceUuid = req.params.uuid + return MicroservicesService.getSystemMicroserviceEndPoint(microserviceUuid, false) +} + const listMicroserviceByPubTagEndPoint = async function (req) { const pubTag = req.params.tag return MicroservicesService.listMicroserviceByPubTagEndPoint(pubTag) @@ -72,6 +77,14 @@ const updateMicroserviceYAMLEndPoint = async function (req) { return MicroservicesService.updateMicroserviceEndPoint(microserviceUuid, microservice, false) } +const updateSystemMicroserviceYAMLEndPoint = async function (req) { + const microserviceUuid = req.params.uuid + const fileContent = req.file.buffer.toString() + const microservice = await YAMLParserService.parseMicroserviceFile(fileContent) + await rvaluesVarSubstition(microservice, { self: microservice }) + return MicroservicesService.updateSystemMicroserviceEndPoint(microserviceUuid, microservice, false) +} + const deleteMicroserviceEndPoint = async function (req) { const microserviceUuid = req.params.uuid const microserviceData = req.body || {} @@ -86,6 +99,14 @@ const getMicroservicesByApplicationEndPoint = async function (req) { return MicroservicesService.listMicroservicesEndPoint({ applicationName, flowId }, false) } +const getSystemMicroservicesByApplicationEndPoint = async function (req) { + // API Retro compatibility + const flowId = req.query.flowId + + const applicationName = req.query.application + return MicroservicesService.listSystemMicroservicesEndPoint({ applicationName, flowId }, false) +} + const createMicroserviceRouteEndPoint = async function (req) { const sourceUuid = req.params.uuid const destUuid = req.params.receiverUuid @@ -191,6 +212,7 @@ const deleteSystemMicroserviceExecEndPoint = async function (req) { module.exports = { createMicroserviceOnFogEndPoint: (createMicroserviceOnFogEndPoint), getMicroserviceEndPoint: (getMicroserviceEndPoint), + getSystemMicroserviceEndPoint: (getSystemMicroserviceEndPoint), listMicroserviceByPubTagEndPoint: (listMicroserviceByPubTagEndPoint), listMicroserviceBySubTagEndPoint: (listMicroserviceBySubTagEndPoint), updateMicroserviceEndPoint: (updateMicroserviceEndPoint), @@ -199,6 +221,7 @@ module.exports = { rebuildSystemMicroserviceEndPoint: (rebuildSystemMicroserviceEndPoint), deleteMicroserviceEndPoint: (deleteMicroserviceEndPoint), getMicroservicesByApplicationEndPoint: (getMicroservicesByApplicationEndPoint), + getSystemMicroservicesByApplicationEndPoint: (getSystemMicroservicesByApplicationEndPoint), createMicroserviceRouteEndPoint: (createMicroserviceRouteEndPoint), deleteMicroserviceRouteEndPoint: (deleteMicroserviceRouteEndPoint), createMicroservicePortMappingEndPoint: (createMicroservicePortMappingEndPoint), @@ -213,6 +236,7 @@ module.exports = { deleteSystemMicroserviceVolumeMappingEndPoint: (deleteSystemMicroserviceVolumeMappingEndPoint), createMicroserviceYAMLEndPoint: (createMicroserviceYAMLEndPoint), updateMicroserviceYAMLEndPoint: (updateMicroserviceYAMLEndPoint), + updateSystemMicroserviceYAMLEndPoint: (updateSystemMicroserviceYAMLEndPoint), createMicroserviceExecEndPoint: (createMicroserviceExecEndPoint), deleteMicroserviceExecEndPoint: (deleteMicroserviceExecEndPoint), createSystemMicroserviceExecEndPoint: (createSystemMicroserviceExecEndPoint), diff --git a/src/data/constants.js b/src/data/constants.js index 1d17f71e..4c75fdb2 100644 --- a/src/data/constants.js +++ b/src/data/constants.js @@ -1,3 +1,4 @@ module.exports = { - ROUTER_CATALOG_NAME: 'Router' + ROUTER_CATALOG_NAME: 'Router', + DEBUG_CATALOG_NAME: 'Debug' } diff --git a/src/data/managers/microservice-exec-status-manager.js b/src/data/managers/microservice-exec-status-manager.js new file mode 100644 index 00000000..ff5edec7 --- /dev/null +++ b/src/data/managers/microservice-exec-status-manager.js @@ -0,0 +1,37 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const MicroserviceExecStatus = models.MicroserviceExecStatus + +const microserviceExecStatusExcludedFields = [ + 'id', + 'microservice_uuid', + 'microserviceUuid', + 'created_at', + 'updated_at' +] + +class MicroserviceExecStatusManager extends BaseManager { + getEntity () { + return MicroserviceExecStatus + } + + findAllExcludeFields (where, transaction) { + return this.findAllWithAttributes(where, { exclude: microserviceExecStatusExcludedFields }, transaction) + } +} + +const instance = new MicroserviceExecStatusManager() +module.exports = instance diff --git a/src/data/managers/microservice-manager.js b/src/data/managers/microservice-manager.js index 68ca789e..6d361ef6 100644 --- a/src/data/managers/microservice-manager.js +++ b/src/data/managers/microservice-manager.js @@ -492,6 +492,36 @@ class MicroserviceManager extends BaseManager { }, { transaction: transaction }) } + async findAllSystemExcludeFields (where, transaction) { + return Microservice.findAll({ + include: [ + { + model: Application, + as: 'application', + required: true, + where: { isSystem: true } + }, + { + model: Tags, + as: 'pubTags', + attributes: ['value'], + through: { attributes: [] } + }, + { + model: Tags, + as: 'subTags', + attributes: ['value'], + through: { attributes: [] } + } + ], + where: where, + order: [['name', 'ASC']], + attributes: { + exclude: microserviceExcludedFields + } + }, { transaction: transaction }) + } + findOneWithCategory (where, transaction) { return Microservice.findOne({ include: [ diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql index f3fed896..e87893b1 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql @@ -771,7 +771,20 @@ ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; -ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_id TEXT; +ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_ids TEXT; +ALTER TABLE Microservices ADD COLUMN schedule INT DEFAULT 50; + +CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + status VARCHAR(255) DEFAULT 'PENDING', + exec_session_id VARCHAR(255), + microservice_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); COMMIT; \ No newline at end of file diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql index 2a0c682a..74cca3e7 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql @@ -771,4 +771,18 @@ ALTER TABLE "Microservices" ADD COLUMN pid_mode VARCHAR(32); ALTER TABLE "Microservices" ADD COLUMN ipc_mode VARCHAR(32); ALTER TABLE "Microservices" ADD COLUMN exec_enabled BOOLEAN DEFAULT false; -ALTER TABLE "MicroserviceStatuses" ADD COLUMN exec_session_id TEXT; +ALTER TABLE "MicroserviceStatuses" ADD COLUMN exec_session_ids TEXT; + +ALTER TABLE "Microservices" ADD COLUMN schedule INT DEFAULT 50; + +CREATE TABLE IF NOT EXISTS "MicroserviceExecStatuses" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + status VARCHAR(255) DEFAULT 'PENDING', + exec_session_id VARCHAR(255), + microservice_uuid VARCHAR(32), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_exec_status_microservice_uuid ON "MicroserviceExecStatuses" (microservice_uuid); diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql index 42e98b3b..75b56bf1 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql @@ -758,4 +758,18 @@ ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; -ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_id TEXT; \ No newline at end of file +ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_ids TEXT; + +ALTER TABLE Microservices ADD COLUMN schedule INT DEFAULT 50; + +CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + status VARCHAR(255) DEFAULT 'PENDING', + exec_session_id VARCHAR(255), + microservice_uuid VARCHAR(32), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); \ No newline at end of file diff --git a/src/data/models/index.js b/src/data/models/index.js index 483a3c8a..4eb51ae1 100644 --- a/src/data/models/index.js +++ b/src/data/models/index.js @@ -71,6 +71,7 @@ db.initDB = async (isStart) => { // Configure system images const fogTypes = await db.FogType.findAll({}) await configureImage(db, constants.ROUTER_CATALOG_NAME, fogTypes, config.get('systemImages.router', {})) + await configureImage(db, constants.DEBUG_CATALOG_NAME, fogTypes, config.get('systemImages.debug', {})) } } diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index f414fa89..7806b389 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -75,6 +75,11 @@ module.exports = (sequelize, DataTypes) => { field: 'ipc_mode', defaultValue: '' }, + schedule: { + type: DataTypes.INTEGER, + field: 'schedule', + defaultValue: 50 + }, imageSnapshot: { type: DataTypes.TEXT, field: 'image_snapshot', @@ -168,6 +173,11 @@ module.exports = (sequelize, DataTypes) => { as: 'microserviceStatus' }) + Microservice.hasOne(models.MicroserviceExecStatus, { + foreignKey: 'microservice_uuid', + as: 'microserviceExecStatus' + }) + Microservice.hasMany(models.MicroserviceEnv, { foreignKey: 'microservice_uuid', as: 'env' diff --git a/src/data/models/microserviceExecStatus.js b/src/data/models/microserviceExecStatus.js new file mode 100644 index 00000000..d9688405 --- /dev/null +++ b/src/data/models/microserviceExecStatus.js @@ -0,0 +1,36 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const MicroserviceExecStatus = sequelize.define('MicroserviceExecStatus', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + status: { + type: DataTypes.TEXT, + field: 'status' + }, + execSessionId: { + type: DataTypes.TEXT, + field: 'exec_session_id' + } + }, { + tableName: 'MicroserviceExecStatuses', + // add the timestamp attributes (updatedAt, createdAt) + timestamps: true, + underscored: true + }) + MicroserviceExecStatus.associate = function (models) { + MicroserviceExecStatus.belongsTo(models.Microservice, { + foreignKey: { + name: 'microserviceUuid', + field: 'microservice_uuid' + }, + as: 'microservice', + onDelete: 'cascade' + }) + } + return MicroserviceExecStatus +} diff --git a/src/data/models/microservicestatus.js b/src/data/models/microservicestatus.js index fde87048..61a71b37 100644 --- a/src/data/models/microservicestatus.js +++ b/src/data/models/microservicestatus.js @@ -66,10 +66,21 @@ module.exports = (sequelize, DataTypes) => { defaultValue: '', field: 'ip_address' }, - execSessionId: { + execSessionIds: { type: DataTypes.TEXT, - defaultValue: '', - field: 'exec_session_id' + defaultValue: '[]', + field: 'exec_session_ids', + get () { + const value = this.getDataValue('execSessionIds') + try { + return JSON.parse(value) + } catch (e) { + return [] + } + }, + set (value) { + this.setDataValue('execSessionIds', JSON.stringify(value)) + } } }, { tableName: 'MicroserviceStatuses', diff --git a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql index c9e84aa9..62c66bc5 100644 --- a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql +++ b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql @@ -11,7 +11,8 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -34,7 +35,9 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'), + (6, 1, 'ghcr.io/datasance/node-debugger:latest'), + (6, 2, 'ghcr.io/datasance/node-debugger:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql index 629a028c..19a9ae04 100644 --- a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql +++ b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql @@ -11,7 +11,8 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO "FogTypes" (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -34,6 +35,8 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'), + (6, 1, 'ghcr.io/datasance/node-debugger:latest'), + (6, 2, 'ghcr.io/datasance/node-debugger:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql index 3835917c..da57229f 100644 --- a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql +++ b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql @@ -9,7 +9,8 @@ VALUES ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -32,4 +33,6 @@ VALUES (4, 1, 'ghcr.io/datasance/edge-guard:latest'), (4, 2, 'ghcr.io/datasance/edge-guard:latest'), (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'); + (5, 2, 'ghcr.io/datasance/router:latest'), + (6, 1, 'ghcr.io/datasance/node-debugger:latest'), + (6, 2, 'ghcr.io/datasance/node-debugger:latest'); diff --git a/src/enums/fog-state.js b/src/enums/fog-state.js index 8c3a4d09..2c7d712a 100644 --- a/src/enums/fog-state.js +++ b/src/enums/fog-state.js @@ -17,6 +17,7 @@ const fogState = { STOPPED: 'STOPPED', WAITING: 'WAITING', WARNING: 'WARNING', + DEBUGGING: 'DEBUGGING', DEPROVISIONED: 'DEPROVISIONED', ERROR: 'ERROR', NOT_PROVISIONED: 'NOT_PROVISIONED' diff --git a/src/enums/microservice-state.js b/src/enums/microservice-state.js index 6a596c40..ac985f44 100644 --- a/src/enums/microservice-state.js +++ b/src/enums/microservice-state.js @@ -30,4 +30,10 @@ const microserviceState = { CREATING: 'CREATING' } -module.exports = microserviceState +const microserviceExecState = { + PENDING: 'PENDING', + ACTIVE: 'ACTIVE', + INACTIVE: 'INACTIVE' +} + +module.exports = { microserviceState, microserviceExecState } diff --git a/src/jobs/fog-status-job.js b/src/jobs/fog-status-job.js index 8842d118..6f96ff7a 100644 --- a/src/jobs/fog-status-job.js +++ b/src/jobs/fog-status-job.js @@ -16,8 +16,9 @@ const TransactionDecorator = require('../decorators/transaction-decorator') const FogManager = require('../data/managers/iofog-manager') const MicroserviceManager = require('../data/managers/microservice-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') const MicroserviceService = require('../services/microservices-service') -const MicroserviceStates = require('../enums/microservice-state') +const { microserviceState, microserviceExecState } = require('../enums/microservice-state') const FogStates = require('../enums/fog-state') const Config = require('../config') @@ -64,7 +65,11 @@ async function _updateMicroserviceStatus (unknownFogUuids, transaction) { const microserviceStatusIds = microservices .filter((microservice) => microservice.microserviceStatus) .map((microservice) => microservice.microserviceStatus.id) - await MicroserviceStatusManager.update({ id: microserviceStatusIds }, { status: MicroserviceStates.UNKNOWN }, transaction) + const microserviceExecStatusIds = microservices + .filter((microservice) => microservice.microserviceExecStatus) + .map((microservice) => microservice.microserviceExecStatus.id) + await MicroserviceStatusManager.update({ id: microserviceStatusIds }, { status: microserviceState.UNKNOWN }, transaction) + await MicroserviceExecStatusManager.update({ id: microserviceExecStatusIds }, { execSesssionId: '', status: microserviceExecState.INACTIVE }, transaction) return microservices } diff --git a/src/jobs/stopped-app-status-job.js b/src/jobs/stopped-app-status-job.js index e6cf6f25..c31b77e8 100644 --- a/src/jobs/stopped-app-status-job.js +++ b/src/jobs/stopped-app-status-job.js @@ -15,7 +15,9 @@ const TransactionDecorator = require('../decorators/transaction-decorator') const MicroserviceManager = require('../data/managers/microservice-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') -const MicroserviceStates = require('../enums/microservice-state') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') +const { microserviceState, microserviceExecState } = require('../enums/microservice-state') + const Config = require('../config') const ApplicationManager = require('../data/managers/application-manager') @@ -41,10 +43,19 @@ async function _updateMicroserviceStatusStopped (stoppedMicroservices, transacti const microserviceUuids = stoppedMicroservices.map((microservice) => microservice.uuid) const microservices = await MicroserviceManager.findAllWithStatuses({ uuid: microserviceUuids }, transaction) const microserviceStatusIds = microservices - .filter((microservice) => microservice.microserviceStatus && (microservice.microserviceStatus.status === MicroserviceStates.DELETED || - microservice.microserviceStatus.status === MicroserviceStates.DELETING)) + .filter((microservice) => microservice.microserviceStatus && (microservice.microserviceStatus.status === microserviceState.DELETED || + microservice.microserviceStatus.status === microserviceState.DELETING)) .map((microservice) => microservice.microserviceStatus.id) - await MicroserviceStatusManager.update({ id: microserviceStatusIds }, { status: MicroserviceStates.STOPPED }, transaction) + const microserviceExecStatusIds = microservices + .filter((microservice) => + microservice.microserviceStatus && + (microservice.microserviceStatus.status === microserviceState.DELETED || + microservice.microserviceStatus.status === microserviceState.DELETING) && + microservice.microserviceExecStatus + ) + .map((microservice) => microservice.microserviceExecStatus.id) + await MicroserviceStatusManager.update({ id: microserviceStatusIds }, { status: microserviceState.STOPPED }, transaction) + await MicroserviceExecStatusManager.update({ id: microserviceExecStatusIds }, { execSesssionId: '', status: microserviceExecState.INACTIVE }, transaction) return microservices } diff --git a/src/routes/agent.js b/src/routes/agent.js index 9196b4e5..ccfa6a7b 100644 --- a/src/routes/agent.js +++ b/src/routes/agent.js @@ -14,7 +14,7 @@ const constants = require('../helpers/constants') const AgentController = require('../controllers/agent-controller') const ResponseDecorator = require('../decorators/response-decorator') - +const WebSocketServer = require('../websocket/server') const Errors = require('../helpers/errors') const logger = require('../logger') @@ -638,5 +638,48 @@ module.exports = [ logger.apiRes({ req: req, res: res, responseObject: responseObject }) } + }, + { + method: 'ws', + path: '/api/v3/agent/exec/:microserviceUuid', + middleware: async (ws, req) => { + logger.apiReq(req) + try { + const token = req.headers.authorization + if (!token) { + logger.error('WebSocket connection failed: Missing authentication token') + try { + ws.close(1008, 'Missing authentication token') + } catch (error) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: error.message, + originalError: 'Missing authentication token' + })) + } + return + } + + // Initialize WebSocket connection for agent + const wsServer = WebSocketServer.getInstance() + await wsServer.handleConnection(ws, req) + } catch (error) { + logger.error('Error in agent WebSocket connection:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url, + microserviceUuid: req.params.microserviceUuid + })) + try { + if (ws.readyState === ws.OPEN) { + ws.close(1008, error.message || 'Authentication failed') + } + } catch (closeError) { + logger.error('Error closing agent WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + } } ] diff --git a/src/routes/application.js b/src/routes/application.js index 0a6e6f60..3feb7c94 100644 --- a/src/routes/application.js +++ b/src/routes/application.js @@ -173,6 +173,39 @@ module.exports = [ }) } }, + { + method: 'get', + path: '/api/v3/application/system/:name', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + const getSystemApplicationEndPoint = ResponseDecorator.handleErrors(ApplicationController.getSystemApplicationEndPoint, successCode, errorCodes) + + // Add keycloak.protect() middleware to protect the route + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const responseObject = await getSystemApplicationEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + return null + }) + } + }, { method: 'patch', path: '/api/v3/application/:name', diff --git a/src/routes/iofog.js b/src/routes/iofog.js index 73ab91ac..aae10cab 100644 --- a/src/routes/iofog.js +++ b/src/routes/iofog.js @@ -372,6 +372,76 @@ module.exports = [ .status(responseObject.code) .send(responseObject.body) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'post', + path: '/api/v3/iofog/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errCodes = [ + { + code: 400, + errors: [Errors.ValidationError] + }, + { + code: 401, + errors: [Errors.AuthenticationError] + }, + { + code: 404, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const enableNodeExecEndPoint = ResponseDecorator.handleErrors(FogController.enableNodeExecEndPoint, + successCode, errCodes) + const responseObject = await enableNodeExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, + { + method: 'delete', + path: '/api/v3/iofog/:uuid/exec', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errCodes = [ + { + code: 400, + errors: [Errors.ValidationError] + }, + { + code: 401, + errors: [Errors.AuthenticationError] + }, + { + code: 404, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const disableNodeExecEndPoint = ResponseDecorator.handleErrors(FogController.disableNodeExecEndPoint, + successCode, errCodes) + const responseObject = await disableNodeExecEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) }) } diff --git a/src/routes/microservices.js b/src/routes/microservices.js index 7c1d1f02..b0b08a5f 100644 --- a/src/routes/microservices.js +++ b/src/routes/microservices.js @@ -16,6 +16,7 @@ const ResponseDecorator = require('../decorators/response-decorator') const Errors = require('../helpers/errors') const logger = require('../logger') const keycloak = require('../config/keycloak.js').initKeycloak() +const WebSocketServer = require('../websocket/server') module.exports = [ { @@ -45,6 +46,33 @@ module.exports = [ }) } }, + { + method: 'get', + path: '/api/v3/microservices/system', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getSystemMicroservicesByApplicationEndPoint = ResponseDecorator.handleErrors(MicroservicesController.getSystemMicroservicesByApplicationEndPoint, + successCode, errorCodes) + const responseObject = await getSystemMicroservicesByApplicationEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, { method: 'post', path: '/api/v3/microservices', @@ -141,6 +169,37 @@ module.exports = [ }) } }, + { + method: 'get', + path: '/api/v3/microservices/system/:uuid', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE', 'Developer', 'Viewer'])(req, res, async () => { + const getSystemMicroserviceEndPoint = ResponseDecorator.handleErrors(MicroservicesController.getSystemMicroserviceEndPoint, + successCode, errorCodes) + const responseObject = await getSystemMicroserviceEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, { method: 'get', path: '/api/v3/microservices/pub/:tag', @@ -382,6 +441,43 @@ module.exports = [ }) } }, + { + method: 'patch', + path: '/api/v3/microservices/system/yaml/:uuid', + supportSubstitution: true, + fileInput: 'microservice', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_NO_CONTENT + const errorCodes = [ + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + }, + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_NOT_FOUND, + errors: [Errors.NotFoundError] + } + ] + + await keycloak.protect(['SRE'])(req, res, async () => { + const updateSystemMicroserviceYAMLEndPoint = ResponseDecorator.handleErrors(MicroservicesController.updateSystemMicroserviceYAMLEndPoint, + successCode, errorCodes) + const responseObject = await updateSystemMicroserviceYAMLEndPoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, { method: 'delete', path: '/api/v3/microservices/:uuid', @@ -984,4 +1080,47 @@ module.exports = [ }) } }, + { + method: 'ws', + path: '/api/v3/microservices/exec/:microserviceUuid', + middleware: async (ws, req) => { + logger.apiReq(req) + try { + const token = req.headers.authorization + if (!token) { + logger.error('WebSocket connection failed: Missing authentication token') + try { + ws.close(1008, 'Missing authentication token') + } catch (error) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: error.message, + originalError: 'Missing authentication token' + })) + } + return + } + + // Initialize WebSocket connection for microservice + const wsServer = WebSocketServer.getInstance() + await wsServer.handleConnection(ws, req) + } catch (error) { + logger.error('Error in microservice WebSocket connection:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url, + microserviceUuid: req.params.microserviceUuid + })) + try { + if (ws.readyState === ws.OPEN) { + ws.close(1008, error.message || 'Authentication failed') + } + } catch (closeError) { + logger.error('Error closing microservice WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + } + } ] diff --git a/src/schemas/agent.js b/src/schemas/agent.js index e42787db..cf4e6c9c 100644 --- a/src/schemas/agent.js +++ b/src/schemas/agent.js @@ -145,7 +145,7 @@ const microserviceStatus = { 'memoryUsage': { 'type': 'number' }, 'ipAddress': { 'type': 'string' }, 'ipAddressExternal': { 'type': 'string' }, - 'execSessionId': { 'type': 'string' } + 'execSessionIds': { 'type': 'array', 'items': { 'type': 'string' } } }, 'required': ['id'], 'additionalProperties': true diff --git a/src/schemas/iofog.js b/src/schemas/iofog.js index ca423c12..eaf608bc 100644 --- a/src/schemas/iofog.js +++ b/src/schemas/iofog.js @@ -46,13 +46,6 @@ const iofogCreate = { 'messagingPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, 'interRouterPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, 'edgeRouterPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, - 'requireSsl': { 'type': 'string' }, - 'sslProfile': { 'type': 'string' }, - 'saslMechanisms': { 'type': 'string' }, - 'authenticatePeer': { 'type': 'string' }, - 'caCert': { 'type': 'string' }, - 'tlsCert': { 'type': 'string' }, - 'tlsKey': { 'type': 'string' }, 'host': { 'type': 'string' }, 'tags': { 'type': 'array', @@ -118,13 +111,6 @@ const iofogUpdate = { 'messagingPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, 'interRouterPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, 'edgeRouterPort': { 'type': 'integer', 'minimum': 1, 'maximum': 65535 }, - 'requireSsl': { 'type': 'string' }, - 'sslProfile': { 'type': 'string' }, - 'saslMechanisms': { 'type': 'string' }, - 'authenticatePeer': { 'type': 'string' }, - 'caCert': { 'type': 'string' }, - 'tlsCert': { 'type': 'string' }, - 'tlsKey': { 'type': 'string' }, 'host': { 'type': 'string' }, 'upstreamRouters': { 'type': 'array', @@ -277,9 +263,30 @@ const iofogTag = { 'type': 'string' } +const enableNodeExec = { + 'id': '/enableNodeExec', + 'type': 'object', + 'properties': { + 'uuid': { 'type': 'string' }, + 'image': { 'type': 'string' } + }, + 'required': ['uuid'], + 'additionalProperties': true +} + +const disableNodeExec = { + 'id': '/disableNodeExec', + 'type': 'object', + 'properties': { + 'uuid': { 'type': 'string' } + }, + 'required': ['uuid'], + 'additionalProperties': true +} + module.exports = { mainSchemas: [iofogCreate, iofogUpdate, iofogDelete, iofogGet, iofogGenerateProvision, iofogSetVersionCommand, - iofogReboot, iofogFilters, halGet, iofogPrune, defaultRouterCreate, iofogTag], + iofogReboot, iofogFilters, halGet, iofogPrune, defaultRouterCreate, iofogTag, enableNodeExec, disableNodeExec], innerSchemas: [filter, iofogTag] } diff --git a/src/schemas/microservice.js b/src/schemas/microservice.js index e3bed274..44aacc3d 100644 --- a/src/schemas/microservice.js +++ b/src/schemas/microservice.js @@ -31,6 +31,11 @@ const microserviceCreate = { 'iofogUuid': { 'type': 'string' }, 'agentName': { 'type': 'string' }, 'rootHostAccess': { 'type': 'boolean' }, + 'schedule': { + 'type': 'integer', + 'minimum': 0, + 'maximum': 100 + }, 'logSize': { 'type': 'integer' }, 'imageSnapshot': { 'type': 'string' }, 'volumeMappings': { @@ -91,6 +96,11 @@ const microserviceUpdate = { 'agentName': { 'type': 'string' }, 'rootHostAccess': { 'type': 'boolean' }, 'logSize': { 'type': 'integer', 'minimum': 0 }, + 'schedule': { + 'type': 'integer', + 'minimum': 0, + 'maximum': 100 + }, 'volumeMappings': { 'type': 'array', 'items': { '$ref': '/volumeMappings' } diff --git a/src/server.js b/src/server.js index 2d180fba..30ad6b03 100755 --- a/src/server.js +++ b/src/server.js @@ -18,6 +18,7 @@ initialize().then(() => { const logger = require('./logger') const db = require('./data/models') const CleanupService = require('./services/cleanup-service') + const WebSocketServer = require('./websocket/server') const bodyParser = require('body-parser') const cookieParser = require('cookie-parser') @@ -88,14 +89,21 @@ initialize().then(() => { global.appRoot = path.resolve(__dirname) const registerRoute = (route) => { - const middlewares = [route.middleware] - if (route.supportSubstitution) { - middlewares.unshift(substitutionMiddleware) - } - if (route.fileInput) { - middlewares.unshift(uploadFile(route.fileInput)) + if (route.method.toLowerCase() === 'ws') { + // Handle WebSocket routes by registering them with our custom WebSocket server + const wsServer = WebSocketServer.getInstance() + wsServer.registerRoute(route.path, route.middleware) + } else { + // Handle HTTP routes + const middlewares = [route.middleware] + if (route.supportSubstitution) { + middlewares.unshift(substitutionMiddleware) + } + if (route.fileInput) { + middlewares.unshift(uploadFile(route.fileInput)) + } + app[route.method.toLowerCase()](route.path, ...middlewares) } - app[route.method.toLowerCase()](route.path, ...middlewares) } const setupMiddleware = function (routeName) { @@ -145,6 +153,12 @@ initialize().then(() => { logger.info(`==> 🌎 API Listening on port ${ports.api}. Open up http://localhost:${ports.api}/ in your browser.`) jobs.forEach((job) => job.run()) }) + + // Initialize WebSocket server + const wsConfig = config.get('server.webSocket') + const wsServer = new WebSocketServer(wsConfig) + wsServer.initialize(apiServer) + logger.info(`==> 🌎 Webscoker API server listening on port ${ports.api}. Open up ws://localhost:${ports.api}/.`) registerServers(apiServer, viewerServer) } @@ -174,6 +188,13 @@ initialize().then(() => { logger.info(`==> 🌎 HTTPS API server listening on port ${ports.api}. Open up https://localhost:${ports.api}/ in your browser.`) jobs.forEach((job) => job.run()) }) + + // Initialize WebSocket server with SSL + const wsConfig = config.get('server.webSocket') + const wsServer = new WebSocketServer(wsConfig) + wsServer.initialize(apiServer) + logger.info(`==> 🌎 WSS API server listening on port ${ports.api}. Open up wss://localhost:${ports.api}/.`) + registerServers(apiServer, viewerServer) } catch (e) { logger.error('Error loading SSL certificates. Please check your configuration.') diff --git a/src/services/agent-service.js b/src/services/agent-service.js index 3d0d3bd4..fa0ff1d8 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -28,7 +28,8 @@ const FogVersionCommandManager = require('../data/managers/iofog-version-command const StraceManager = require('../data/managers/strace-manager') const RegistryManager = require('../data/managers/registry-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') -const MicroserviceStates = require('../enums/microservice-state') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') +const { microserviceState, microserviceExecState } = require('../enums/microservice-state') const FogStates = require('../enums/fog-state') const Validator = require('../schemas') const Errors = require('../helpers/errors') @@ -104,7 +105,13 @@ const agentDeprovision = async function (deprovisionData, fog, transaction) { await MicroserviceStatusManager.update( { microserviceUuid: deprovisionData.microserviceUuids }, - { status: MicroserviceStates.DELETING }, + { status: microserviceState.DELETING }, + transaction + ) + + await MicroserviceExecStatusManager.update( + { microserviceUuid: deprovisionData.microserviceUuids }, + { status: microserviceExecState.INACTIVE }, transaction ) @@ -290,7 +297,7 @@ const _updateMicroserviceStatuses = async function (microserviceStatus, fog, tra percentage: status.percentage, errorMessage: status.errorMessage, ipAddress: status.ipAddress, - execSessionId: status.execSessionId + execSessionIds: status.execSessionIds } microserviceStatus = AppHelper.deleteUndefinedFields(microserviceStatus) const microservice = await MicroserviceManager.findOne({ @@ -368,7 +375,8 @@ const getAgentMicroservices = async function (fog, transaction) { routes, isConsumer, isRouter, - execEnabled: microservice.execEnabled + execEnabled: microservice.execEnabled, + schedule: microservice.schedule } response.push(responseMicroservice) diff --git a/src/services/application-service.js b/src/services/application-service.js index 4b806ebc..4e0aee1a 100644 --- a/src/services/application-service.js +++ b/src/services/application-service.js @@ -344,6 +344,20 @@ async function getApplication (conditions, isCLI, transaction) { return application } +async function getSystemApplication (conditions, isCLI, transaction) { + const where = isCLI + ? { ...conditions, isSystem: true } + : { ...conditions, isSystem: true } + const attributes = { exclude: ['created_at', 'updated_at'] } + + const applicationRaw = await ApplicationManager.findOnePopulated(where, attributes, transaction) + if (!applicationRaw) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, conditions.name || conditions.id)) + } + const application = await _buildApplicationObject(applicationRaw, transaction) + return application +} + const getApplicationEndPoint = async function (conditions, isCLI, transaction) { const application = await getApplication(conditions, isCLI, transaction) return application @@ -362,6 +376,10 @@ const _checkForDuplicateName = async function (name, applicationId, transaction) } } +const getSystemApplicationEndPoint = async function (conditions, isCLI, transaction) { + const application = await getSystemApplication(conditions, isCLI, transaction) + return application +} async function _updateChangeTrackingsAndDeleteMicroservicesByApplicationId (conditions, deleteMicroservices, transaction) { const microservices = await ApplicationManager.findApplicationMicroservices(conditions, transaction) if (!microservices) { @@ -392,5 +410,7 @@ module.exports = { getSystemApplicationsEndPoint: TransactionDecorator.generateTransaction(getSystemApplicationsEndPoint), getAllApplicationsEndPoint: TransactionDecorator.generateTransaction(getAllApplicationsEndPoint), getApplicationEndPoint: TransactionDecorator.generateTransaction(getApplicationEndPoint), - getApplication: getApplication + getSystemApplicationEndPoint: TransactionDecorator.generateTransaction(getSystemApplicationEndPoint), + getApplication: getApplication, + getSystemApplication: getSystemApplication } diff --git a/src/services/catalog-service.js b/src/services/catalog-service.js index 5c0b7883..ae0be7db 100644 --- a/src/services/catalog-service.js +++ b/src/services/catalog-service.js @@ -60,9 +60,10 @@ const updateCatalogItemEndPoint = async function (id, data, isCLI, transaction) const listCatalogItemsEndPoint = async function (isCLI, transaction) { const where = isCLI ? {} - : { - [Op.or]: [{ category: { [Op.ne]: 'SYSTEM' } }, { category: null }] - } + // : { + // [Op.or]: [{ category: { [Op.ne]: 'SYSTEM' } }, { category: null }] + // } + : {} const attributes = isCLI ? {} @@ -77,10 +78,11 @@ const listCatalogItemsEndPoint = async function (isCLI, transaction) { async function getCatalogItem (id, isCLI, transaction) { const where = isCLI ? { id: id } - : { - id: id, - [Op.or]: [{ category: { [Op.ne]: 'SYSTEM' } }, { category: null }] - } + // : { + // id: id, + // [Op.or]: [{ category: { [Op.ne]: 'SYSTEM' } }, { category: null }] + // } + : { id: id } const attributes = isCLI ? {} @@ -161,6 +163,15 @@ async function getProxyCatalogItem (transaction) { }, transaction) } +async function getDebugCatalogItem (transaction) { + return CatalogItemManager.findOne({ + name: DBConstants.DEBUG_CATALOG_NAME, + category: 'SYSTEM', + publisher: 'Datasance', + registry_id: 1 + }, transaction) +} + async function getBluetoothCatalogItem (transaction) { return CatalogItemManager.findOne({ name: 'RESTBlue', @@ -379,5 +390,6 @@ module.exports = { getBluetoothCatalogItem: getBluetoothCatalogItem, getHalCatalogItem: getHalCatalogItem, getRouterCatalogItem: getRouterCatalogItem, + getDebugCatalogItem: getDebugCatalogItem, getProxyCatalogItem: getProxyCatalogItem } diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index a4ffbf31..2c8853cd 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -33,7 +33,9 @@ const EdgeResourceService = require('./edge-resource-service') const RouterManager = require('../data/managers/router-manager') const MicroserviceExtraHostManager = require('../data/managers/microservice-extra-host-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') const RouterConnectionManager = require('../data/managers/router-connection-manager') +const CatalogItemImageManager = require('../data/managers/catalog-item-image-manager') const RouterService = require('./router-service') const Constants = require('../helpers/constants') const Op = require('sequelize').Op @@ -683,7 +685,7 @@ async function _deleteFogRouter (fogData, transaction) { // Delete router msvc const routerCatalog = await CatalogService.getRouterCatalogItem(transaction) await MicroserviceManager.delete({ catalogItemId: routerCatalog.id, iofogUuid: fogData.uuid }, transaction) - await ApplicationManager.delete({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + // await ApplicationManager.delete({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) } async function deleteFogEndPoint (fogData, isCLI, transaction) { @@ -858,7 +860,7 @@ async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { const newProvision = { iofogUuid: fogData.uuid, - provisionKey: AppHelper.generateRandomString(16), + provisionKey: AppHelper.generateUUID(), expirationTime: new Date().getTime() + (10 * 60 * 1000) } @@ -1024,13 +1026,26 @@ async function _createHalMicroserviceForFog (fogData, oldFog, transaction) { iofogUuid: fogData.uuid, rootHostAccess: true, logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, + schedule: 1, configLastUpdated: Date.now() } - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + let application + try { + application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + } catch (error) { + const systemApplicationData = { + name: `system-${fogData.uuid.toLowerCase()}`, + isActivated: true, + isSystem: true + } + await ApplicationManager.create(systemApplicationData, transaction) + application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + } halMicroserviceData.applicationId = application.id await MicroserviceManager.create(halMicroserviceData, transaction) await MicroserviceStatusManager.create({ microserviceUuid: halMicroserviceData.uuid }, transaction) + await MicroserviceExecStatusManager.create({ microserviceUuid: halMicroserviceData.uuid }, transaction) } async function _deleteHalMicroserviceByFog (fogData, transaction) { @@ -1056,13 +1071,26 @@ async function _createBluetoothMicroserviceForFog (fogData, oldFog, transaction) iofogUuid: fogData.uuid, rootHostAccess: true, logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, + schedule: 1, configLastUpdated: Date.now() } - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + let application + try { + application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + } catch (error) { + const systemApplicationData = { + name: `system-${fogData.uuid.toLowerCase()}`, + isActivated: true, + isSystem: true + } + await ApplicationManager.create(systemApplicationData, transaction) + application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) + } bluetoothMicroserviceData.applicationId = application.id await MicroserviceManager.create(bluetoothMicroserviceData, transaction) await MicroserviceStatusManager.create({ microserviceUuid: bluetoothMicroserviceData.uuid }, transaction) + await MicroserviceExecStatusManager.create({ microserviceUuid: bluetoothMicroserviceData.uuid }, transaction) } async function _deleteBluetoothMicroserviceByFog (fogData, transaction) { @@ -1090,6 +1118,140 @@ async function setFogPruneCommandEndPoint (fogData, isCLI, transaction) { await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.prune, transaction) } +async function enableNodeExecEndPoint (execData, isCLI, transaction) { + await Validator.validate(execData, Validator.schemas.enableNodeExec) + const fog = await FogManager.findOne({ uuid: execData.uuid }, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, execData.uuid)) + } + + const debugMicroserviceData = { + uuid: AppHelper.generateUUID(), + name: `debug-${execData.uuid.toLowerCase()}`, + config: '{}', + iofogUuid: execData.uuid, + ipcMode: 'host', + pidMode: 'host', + rootHostAccess: true, + logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, + schedule: 0, + execEnabled: true, + configLastUpdated: Date.now() + } + + if (execData.image) { + const images = [ + { fogTypeId: 1, containerImage: execData.image }, + { fogTypeId: 2, containerImage: execData.image } + ] + debugMicroserviceData.images = images + } else { + const debugCatalog = await CatalogService.getDebugCatalogItem(transaction) + debugMicroserviceData.catalogItemId = debugCatalog.id + } + + let application + try { + application = await ApplicationManager.findOne({ name: `system-${execData.uuid.toLowerCase()}` }, transaction) + } catch (error) { + const systemApplicationData = { + name: `system-${execData.uuid.toLowerCase()}`, + isActivated: true, + isSystem: true + } + await ApplicationManager.create(systemApplicationData, transaction) + application = await ApplicationManager.findOne({ name: `system-${execData.uuid.toLowerCase()}` }, transaction) + } + debugMicroserviceData.applicationId = application.id + let microservice + + // Check if microservice already exists + const existingMicroservice = await MicroserviceManager.findOneWithCategory({ name: `debug-${execData.uuid.toLowerCase()}` }, transaction) + + if (existingMicroservice) { + // Update existing microservice + const updateData = { + ipcMode: debugMicroserviceData.ipcMode, + pidMode: debugMicroserviceData.pidMode, + rootHostAccess: debugMicroserviceData.rootHostAccess, + logSize: debugMicroserviceData.logSize, + schedule: debugMicroserviceData.schedule, + configLastUpdated: debugMicroserviceData.configLastUpdated, + execEnabled: debugMicroserviceData.execEnabled + } + + if (execData.image) { + updateData.images = debugMicroserviceData.images + } else { + updateData.catalogItemId = debugMicroserviceData.images + } + + microservice = await MicroserviceManager.updateAndFind( + { uuid: existingMicroservice.uuid }, + updateData, + transaction + ) + + if (execData.image) { + const images = [ + { fogTypeId: 1, containerImage: execData.image }, + { fogTypeId: 2, containerImage: execData.image } + ] + await _updateImages(images, existingMicroservice.uuid, transaction) + } + + await ChangeTrackingService.update(execData.uuid, ChangeTrackingService.events.microserviceList, transaction) + await ChangeTrackingService.update(execData.uuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + return microservice + } else { + // Create new microservice + try { + const microservice = await MicroserviceManager.create(debugMicroserviceData, transaction) + await MicroserviceStatusManager.create({ microserviceUuid: debugMicroserviceData.uuid }, transaction) + await MicroserviceExecStatusManager.create({ microserviceUuid: debugMicroserviceData.uuid }, transaction) + + if (execData.image) { + const images = [ + { fogTypeId: 1, containerImage: execData.image }, + { fogTypeId: 2, containerImage: execData.image } + ] + await _createMicroserviceImages(microservice, images, transaction) + } + + await ChangeTrackingService.update(execData.uuid, ChangeTrackingService.events.microserviceList, transaction) + await ChangeTrackingService.update(execData.uuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + + return microservice + } catch (error) { + logger.error(`Error in enableNodeExecEndPoint: ${error.message}`) + throw error + } + } +} + +async function disableNodeExecEndPoint (fogData, isCLI, transaction) { + await Validator.validate(fogData, Validator.schemas.disableNodeExec) + + try { + const fog = await FogManager.findOne({ uuid: fogData.uuid }, transaction) + if (!fog) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) + } + + const microservice = await MicroserviceManager.findOne({ name: `debug-${fogData.uuid.toLowerCase()}` }, transaction) + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, fogData.uuid)) + } + + await MicroserviceManager.delete({ uuid: microservice.uuid }, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceList, transaction) + await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceExecSessions, transaction) + } catch (error) { + logger.error(`Error in disableNodeExecEndPoint: ${error.message}`) + throw error + } +} + /** * Finds services that match the fog node's service tags * @param {Array} serviceTags - Array of service tags from fog node @@ -1219,6 +1381,23 @@ function _mergeTcpListener (routerConfig, listenerObj) { return routerConfig } +async function _createMicroserviceImages (microservice, images, transaction) { + const newImages = [] + for (const img of images) { + const newImg = Object.assign({}, img) + newImg.microserviceUuid = microservice.uuid + newImages.push(newImg) + } + return CatalogItemImageManager.bulkCreate(newImages, transaction) +} + +async function _updateImages (images, microserviceUuid, transaction) { + await CatalogItemImageManager.delete({ + microserviceUuid: microserviceUuid + }, transaction) + return _createMicroserviceImages({ uuid: microserviceUuid }, images, transaction) +} + module.exports = { createFogEndPoint: TransactionDecorator.generateTransaction(createFogEndPoint), updateFogEndPoint: TransactionDecorator.generateTransaction(updateFogEndPoint), @@ -1232,6 +1411,8 @@ module.exports = { getHalUsbInfoEndPoint: TransactionDecorator.generateTransaction(getHalUsbInfoEndPoint), getFog: getFog, setFogPruneCommandEndPoint: TransactionDecorator.generateTransaction(setFogPruneCommandEndPoint), + enableNodeExecEndPoint: TransactionDecorator.generateTransaction(enableNodeExecEndPoint), + disableNodeExecEndPoint: TransactionDecorator.generateTransaction(disableNodeExecEndPoint), _extractServiceTags, _findMatchingServices: TransactionDecorator.generateTransaction(_findMatchingServices), _buildTcpListenerForFog, diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index c98a918d..0e13993e 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -14,6 +14,7 @@ const TransactionDecorator = require('../decorators/transaction-decorator') const MicroserviceManager = require('../data/managers/microservice-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') const MicroserviceArgManager = require('../data/managers/microservice-arg-manager') const MicroserviceCdiDevManager = require('../data/managers/microservice-cdi-device-manager') const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add-manager') @@ -99,6 +100,26 @@ async function listMicroservicesEndPoint (opt, isCLI, transaction) { } } +async function listSystemMicroservicesEndPoint (opt, isCLI, transaction) { + const { applicationName, flowId } = opt + let application = await _validateSystemApplication(applicationName, isCLI, transaction) + + if (flowId) { + // _validateApplication wil try by ID if it fails finding by name + application = await _validateSystemApplication(flowId, isCLI, transaction) + } + const where = application ? { applicationId: application.id, delete: false } : { delete: false, applicationId: { [Op.ne]: null } } + + const microservices = await MicroserviceManager.findAllSystemExcludeFields(where, transaction) + const res = await Promise.all(microservices.map(async (microservice) => { + return _buildGetMicroserviceResponse(microservice.dataValues, transaction) + })) + + return { + microservices: res + } +} + async function getMicroserviceEndPoint (microserviceUuid, isCLI, transaction) { if (!isCLI) { await _validateMicroserviceOnGet(microserviceUuid, transaction) @@ -115,6 +136,27 @@ async function getMicroserviceEndPoint (microserviceUuid, isCLI, transaction) { return _buildGetMicroserviceResponse(microservice.dataValues, transaction) } +async function getSystemMicroserviceEndPoint (microserviceUuid, isCLI, transaction) { + if (!isCLI) { + await _validateSystemMicroserviceOnGet(microserviceUuid, transaction) + } + + const microservice = await MicroserviceManager.findOneExcludeFields({ + uuid: microserviceUuid, delete: false + }, transaction) + + if (!microservice) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + + const app = await ApplicationManager.findOne({ id: microservice.applicationId }, transaction) + if (!app.isSystem) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, microserviceUuid)) + } + + return _buildGetMicroserviceResponse(microservice.dataValues, transaction) +} + function _validateImagesAgainstCatalog (catalogItem, images) { const allImagesEmpty = images.reduce((result, b) => result && b.containerImage === '', true) if (allImagesEmpty) { @@ -385,6 +427,7 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) } await _createMicroserviceStatus(microservice, transaction) + await _createMicroserviceExecStatus(microservice, transaction) const res = { uuid: microservice.uuid, @@ -469,6 +512,7 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + schedule: microserviceData.schedule, pidMode: microserviceData.pidMode, ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, @@ -599,6 +643,7 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD microserviceDataUpdate.runtime || microserviceDataUpdate.volumeMappings || microserviceDataUpdate.ports || + microserviceDataUpdate.schedule || extraHosts ) const updatedMicroservice = await MicroserviceManager.updateAndFind(query, microserviceDataUpdate, transaction) @@ -692,6 +737,7 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + schedule: microserviceData.schedule, pidMode: microserviceData.pidMode, ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, @@ -826,6 +872,7 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i microserviceDataUpdate.runtime || microserviceDataUpdate.volumeMappings || microserviceDataUpdate.ports || + microserviceDataUpdate.schedule || extraHosts ) const updatedMicroservice = await MicroserviceManager.updateAndFind(query, microserviceDataUpdate, transaction) @@ -1484,6 +1531,7 @@ async function _createMicroservice (microserviceData, isCLI, transaction) { platform: microserviceData.platform, runtime: microserviceData.runtime, registryId: microserviceData.registryId || 1, + schedule: microserviceData.schedule || 50, logSize: (microserviceData.logSize || constants.MICROSERVICE_DEFAULT_LOG_SIZE) * 1 } @@ -1539,12 +1587,44 @@ async function _validateApplication (name, isCLI, transaction) { return application } +async function _validateSystemApplication (name, isCLI, transaction) { + if (!name) { + return null + } + + // Force name conversion to string for PG + const where = isCLI + ? { name: name.toString(), isSystem: true } + : { name: name.toString(), isSystem: true } + + const application = await ApplicationManager.findOne(where, transaction) + if (!application) { + // Try with id + const where = isCLI + ? { id: name, isSystem: true } + : { id: name, isSystem: true } + + const application = await ApplicationManager.findOne(where, transaction) + if (!application) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, name)) + } + return application + } + return application +} + async function _createMicroserviceStatus (microservice, transaction) { return MicroserviceStatusManager.create({ microserviceUuid: microservice.uuid }, transaction) } +async function _createMicroserviceExecStatus (microservice, transaction) { + return MicroserviceExecStatusManager.create({ + microserviceUuid: microservice.uuid + }, transaction) +} + async function _createMicroserviceImages (microservice, images, transaction) { const newImages = [] for (const img of images) { @@ -1777,6 +1857,16 @@ async function _validateMicroserviceOnGet (microserviceUuid, transaction) { } } +async function _validateSystemMicroserviceOnGet (microserviceUuid, transaction) { + const where = { + uuid: microserviceUuid + } + const microservice = await MicroserviceManager.findSystemMicroserviceOnGet(where, transaction) + if (!microservice) { + throw new Errors.NotFoundError(ErrorMessages.INVALID_MICROSERVICE_USER) + } +} + async function _getLogicalRoutesByMicroservice (microserviceUuid, transaction) { const res = [] const query = { @@ -1834,6 +1924,7 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { const pubTags = microservice.pubTags ? microservice.pubTags.map(t => t.value) : [] const subTags = microservice.subTags ? microservice.subTags.map(t => t.value) : [] const status = await MicroserviceStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + const execStatus = await MicroserviceExecStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) // build microservice response const res = Object.assign({}, microservice) res.ports = [] @@ -1854,6 +1945,9 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { if (status && status.length) { res.status = status[0] } + if (execStatus && execStatus.length) { + res.execStatus = execStatus[0] + } res.pubTags = pubTags res.subTags = subTags @@ -1901,7 +1995,7 @@ async function listMicroserviceBySubTagEndPoint (subTag, transaction) { } } -async function createExecEndPoint (microserviceUuid, transaction) { +async function createExecEndPoint (microserviceUuid, isCLI, transaction) { const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) if (microservice.catalogItem && microservice.catalogItem.category === 'SYSTEM') { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) @@ -1921,7 +2015,7 @@ async function createExecEndPoint (microserviceUuid, transaction) { } } -async function deleteExecEndPoint (microserviceUuid, transaction) { +async function deleteExecEndPoint (microserviceUuid, isCLI, transaction) { const microservice = await MicroserviceManager.findOneWithCategory({ uuid: microserviceUuid }, transaction) if (microservice.catalogItem && microservice.catalogItem.category === 'SYSTEM') { throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.SYSTEM_MICROSERVICE_UPDATE, microserviceUuid)) @@ -1996,11 +2090,13 @@ module.exports = { deleteVolumeMappingEndPoint: TransactionDecorator.generateTransaction(deleteVolumeMappingEndPoint), deleteSystemVolumeMappingEndPoint: TransactionDecorator.generateTransaction(deleteSystemVolumeMappingEndPoint), getMicroserviceEndPoint: TransactionDecorator.generateTransaction(getMicroserviceEndPoint), + getSystemMicroserviceEndPoint: TransactionDecorator.generateTransaction(getSystemMicroserviceEndPoint), getReceiverMicroservices, isMicroserviceConsumer, isMicroserviceRouter, listMicroservicePortMappingsEndPoint: TransactionDecorator.generateTransaction(listPortMappingsEndPoint), listMicroservicesEndPoint: TransactionDecorator.generateTransaction(listMicroservicesEndPoint), + listSystemMicroservicesEndPoint: TransactionDecorator.generateTransaction(listSystemMicroservicesEndPoint), listVolumeMappingsEndPoint: TransactionDecorator.generateTransaction(listVolumeMappingsEndPoint), updateMicroserviceEndPoint: TransactionDecorator.generateTransaction(updateMicroserviceEndPoint), updateSystemMicroserviceEndPoint: TransactionDecorator.generateTransaction(updateSystemMicroserviceEndPoint), diff --git a/src/services/router-service.js b/src/services/router-service.js index 8d9569bf..07daa12d 100644 --- a/src/services/router-service.js +++ b/src/services/router-service.js @@ -20,6 +20,7 @@ const ErrorMessages = require('../helpers/error-messages') const MicroserviceManager = require('../data/managers/microservice-manager') const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add-manager') const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') const ApplicationManager = require('../data/managers/application-manager') const MicroservicePortManager = require('../data/managers/microservice-port-manager') const RouterConnectionManager = require('../data/managers/router-connection-manager') @@ -308,6 +309,7 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran iofogUuid: uuid, rootHostAccess: false, logSize: constants.MICROSERVICE_DEFAULT_LOG_SIZE, + schedule: 0, configLastUpdated: Date.now(), env: [ { @@ -334,6 +336,7 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran routerMicroserviceData.applicationId = application.id const routerMicroservice = await MicroserviceManager.create(routerMicroserviceData, transaction) await MicroserviceStatusManager.create({ microserviceUuid: routerMicroserviceData.uuid }, transaction) + await MicroserviceExecStatusManager.create({ microserviceUuid: routerMicroserviceData.uuid }, transaction) for (const capAdd of capAddValues) { await MicroserviceCapAddManager.create({ microserviceUuid: routerMicroserviceData.uuid, diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index b620450c..93953fb6 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -265,9 +265,9 @@ const parseMicroserviceYAML = async (microservice) => { agentName: lget(microservice, 'agent.name'), registryId, ...container, - rootHostAccess: lget(microservice, 'rootHostAccess', false), - pidMode: lget(microservice, 'pidMode', ''), - ipcMode: lget(microservice, 'ipcMode', ''), + rootHostAccess: lget(microservice, 'container.rootHostAccess', false), + pidMode: lget(microservice, 'container.pidMode', ''), + ipcMode: lget(microservice, 'container.ipcMode', ''), annotations: container.annotations != null ? JSON.stringify(container.annotations) : undefined, capAdd: lget(microservice, 'container.capAdd', []), capDrop: lget(microservice, 'container.capDrop', []), @@ -280,7 +280,8 @@ const parseMicroserviceYAML = async (microservice) => { ...microservice.msRoutes, pubTags: lget(microservice, 'msRoutes.pubTags', []), subTags: lget(microservice, 'msRoutes.subTags', []), - application: microservice.application + application: microservice.application, + schedule: lget(microservice, 'schedule', 50) } _deleteUndefinedFields(microserviceData) return microserviceData diff --git a/src/websocket/error-handler.js b/src/websocket/error-handler.js new file mode 100644 index 00000000..f8a372e8 --- /dev/null +++ b/src/websocket/error-handler.js @@ -0,0 +1,54 @@ +const logger = require('../logger') + +class WebSocketError extends Error { + constructor (code, message) { + super(message) + this.code = code + this.name = 'WebSocketError' + } +} + +class WebSocketErrorHandler { + static handleError (ws, error) { + logger.error('WebSocket error:' + JSON.stringify({ error })) + + if (error instanceof WebSocketError) { + ws.send(JSON.stringify({ + type: 'error', + code: error.code, + message: error.message + })) + ws.close(error.code, error.message) + } else { + ws.send(JSON.stringify({ + type: 'error', + code: 1011, + message: 'Internal server error' + })) + ws.close(1011, 'Internal server error') + } + } + + static createError (code, message) { + return new WebSocketError(code, message) + } + + static getErrorCode (error) { + if (error instanceof WebSocketError) { + return error.code + } + return 1011 // Internal server error + } + + static getErrorMessage (error) { + if (error instanceof WebSocketError) { + return error.message + } + return 'Internal server error' + } +} + +module.exports = { + WebSocketError, + WebSocketErrorHandler +} diff --git a/src/websocket/server.js b/src/websocket/server.js new file mode 100644 index 00000000..4acf055c --- /dev/null +++ b/src/websocket/server.js @@ -0,0 +1,1022 @@ +const WebSocket = require('ws') +const config = require('../config') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const SessionManager = require('./session-manager') +const { WebSocketError } = require('./error-handler') +const MicroserviceManager = require('../data/managers/microservice-manager') +const ApplicationManager = require('../data/managers/application-manager') +const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') +const { microserviceState, microserviceExecState } = require('../enums/microservice-state') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') +const keycloak = require('../config/keycloak.js').initKeycloak() +const AuthDecorator = require('../decorators/authorization-decorator') +const TransactionDecorator = require('../decorators/transaction-decorator') +const msgpack = require('@msgpack/msgpack') + +const MESSAGE_TYPES = { + STDIN: 0, + STDOUT: 1, + STDERR: 2, + CONTROL: 3, + CLOSE: 4, + ACTIVATION: 5 +} + +class WebSocketServer { + constructor () { + this.wss = null + this.agentSessions = new Map() + this.userSessions = new Map() + this.connectionLimits = new Map() + this.rateLimits = new Map() + this.sessionManager = new SessionManager(config.get('server.webSocket')) + this.config = { + pingInterval: process.env.WS_PING_INTERVAL || config.get('server.webSocket.pingInterval'), + pongTimeout: process.env.WS_PONG_TIMEOUT || config.get('server.webSocket.pongTimeout'), + maxPayload: process.env.WS_MAX_PAYLOAD || config.get('server.webSocket.maxPayload'), + sessionTimeout: process.env.WS_SESSION_TIMEOUT || config.get('server.webSocket.session.timeout'), + cleanupInterval: process.env.WS_CLEANUP_INTERVAL || config.get('server.webSocket.session.cleanupInterval'), + sessionMaxConnections: process.env.WS_SESSION_MAX_CONNECTIONS || config.get('server.webSocket.session.maxConnections') + } + } + + // MessagePack encoding/decoding helpers with improved error handling + encodeMessage (message) { + try { + // Ensure we're only encoding the actual message content + const encoded = msgpack.encode(message) + logger.debug('Encoded MessagePack message:' + JSON.stringify({ + type: typeof message, + isMap: message instanceof Map, + keys: message instanceof Map ? Array.from(message.keys()) : Object.keys(message), + hasExecId: message instanceof Map ? message.has('execId') : 'execId' in message, + hasMicroserviceUuid: message instanceof Map ? message.has('microserviceUuid') : 'microserviceUuid' in message, + encodedLength: encoded.length, + firstBytes: encoded.subarray(0, 16).toString('hex') + })) + return encoded + } catch (error) { + logger.error('Failed to encode message:' + JSON.stringify({ + error: error.message, + message: message + })) + throw new WebSocketError(1008, 'Message encoding failed') + } + } + + decodeMessage (buffer) { + try { + const decoded = msgpack.decode(buffer) + logger.debug('Decoded MessagePack message:' + JSON.stringify({ + type: typeof decoded, + isMap: decoded instanceof Map, + keys: decoded instanceof Map ? Array.from(decoded.keys()) : Object.keys(decoded), + hasExecId: decoded instanceof Map ? decoded.has('execId') : 'execId' in decoded, + hasMicroserviceUuid: decoded instanceof Map ? decoded.has('microserviceUuid') : 'microserviceUuid' in decoded, + bufferLength: buffer.length, + firstBytes: buffer.subarray(0, 16).toString('hex') + })) + return decoded + } catch (error) { + logger.error('Failed to decode MessagePack message:' + JSON.stringify({ + error: error.message, + bufferLength: buffer.length, + firstBytes: buffer.subarray(0, 16).toString('hex') + })) + throw error + } + } + + initialize (server) { + // Strict WebSocket configuration with no extensions and RSV control + const options = { + server, + maxPayload: process.env.WS_SECURITY_MAX_PAYLOAD || config.get('server.webSocket.security.maxPayload'), + perMessageDeflate: false, // Explicitly disable compression + clientTracking: true, + verifyClient: this.verifyClient.bind(this), + // Strict protocol handling + handleProtocols: (protocols) => { + // Accept any protocol but ensure strict mode + return protocols[0] + } + } + + logger.info('Initializing WebSocket server with strict options:' + JSON.stringify(options)) + this.wss = new WebSocket.Server(options) + + // Handle WebSocket server errors + this.wss.on('error', (error) => { + logger.error('WebSocket server error:' + JSON.stringify({ + error: error.message, + stack: error.stack + })) + }) + + // Handle individual connection errors + this.wss.on('connection', (ws, req) => { + logger.info('New WebSocket connection established:' + JSON.stringify({ + url: req.url, + headers: req.headers, + remoteAddress: req.socket.remoteAddress + })) + + // Set strict WebSocket options for this connection + ws.binaryType = 'arraybuffer' // Force binary type to be arraybuffer + + if (ws._socket) { + ws._socket.setNoDelay(true) + ws._socket.setKeepAlive(true, 30000) // Enable keep-alive instead of disabling + } + + // Add detailed frame-level logging + ws.on('message', (data, isBinary) => { + const buffer = Buffer.from(data) + logger.debug('WebSocket frame received:' + JSON.stringify({ + isBinary, + length: buffer.length, + firstBytes: buffer.subarray(0, 16).toString('hex'), + lastBytes: buffer.subarray(-16).toString('hex'), + url: req.url + })) + }) + + // Add error handler for each connection + ws.on('error', (error) => { + logger.error('WebSocket connection error:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url + })) + if (ws.readyState === WebSocket.OPEN) { + try { + ws.close(1002, 'Protocol error') + } catch (closeError) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + }) + + // Wrap handleConnection in try-catch to prevent unhandled errors + try { + this.handleConnection(ws, req) + } catch (error) { + logger.error('Unhandled error in handleConnection:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url + })) + if (ws.readyState === WebSocket.OPEN) { + try { + ws.close(1002, 'Internal server error') + } catch (closeError) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + } + }) + + // Add global error handler for the server + process.on('uncaughtException', (error) => { + logger.error('Uncaught exception in WebSocket server:' + JSON.stringify({ + error: error.message, + stack: error.stack + })) + // Don't let the error crash the process + }) + + process.on('unhandledRejection', (reason, promise) => { + logger.error('Unhandled rejection in WebSocket server:' + JSON.stringify({ + reason: reason, + promise: promise + })) + // Don't let the error crash the process + }) + + this.sessionManager.startCleanup() + } + + async verifyClient (info, callback) { + try { + // Check connection limits + const clientIp = info.req.socket.remoteAddress + const currentConnections = this.connectionLimits.get(clientIp) || 0 + if (currentConnections >= (process.env.WS_SECURITY_MAX_CONNECTIONS_PER_IP || config.get('server.webSocket.security.maxConnectionsPerIp'))) { + callback(new Error('Too many connections'), false) + return + } + + // Check rate limits + const now = Date.now() + const rateLimit = this.rateLimits.get(clientIp) || { count: 0, resetTime: now + 60000 } + if (now > rateLimit.resetTime) { + rateLimit.count = 0 + rateLimit.resetTime = now + 60000 + } + + if (rateLimit.count >= (process.env.WS_SECURITY_MAX_REQUESTS_PER_MINUTE || config.get('server.webSocket.security.maxRequestsPerMinute'))) { + callback(new Error('Rate limit exceeded'), false) + return + } + + rateLimit.count++ + this.rateLimits.set(clientIp, rateLimit) + + callback(null, true) + } catch (error) { + callback(new Error('Internal server error'), false) + } + } + + extractMicroserviceUuid (url) { + // Match UUID pattern in the URL + const uuidPattern = /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i + const match = url.match(uuidPattern) + return match ? match[0] : null + } + + handleConnection (ws, req) { + // Add error handler for this connection + ws.on('error', (error) => { + logger.error('WebSocket connection error:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url, + headers: req.headers + })) + // Don't let the error crash the process + if (ws.readyState === WebSocket.OPEN) { + try { + ws.close(1002, 'Protocol error') + } catch (closeError) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + }) + + // Wrap the entire connection handling in a transaction + TransactionDecorator.generateTransaction(async (transaction) => { + try { + const token = req.headers.authorization + if (!token) { + logger.error('WebSocket connection failed: Missing authentication token') + try { + ws.close(1008, 'Missing authentication token') + } catch (error) { + logger.error('Error closing WebSocket:' + error.message) + } + return + } + + const microserviceUuid = this.extractMicroserviceUuid(req.url) + if (!microserviceUuid) { + logger.error('WebSocket connection failed: Invalid endpoint - no UUID found') + try { + ws.close(1008, 'Invalid endpoint') + } catch (error) { + logger.error('Error closing WebSocket:' + error.message) + } + return + } + + // Determine connection type and handle accordingly + if (req.url.startsWith('/api/v3/agent/exec/')) { + await this.handleAgentConnection(ws, req, token, microserviceUuid, transaction) + } else if (req.url.startsWith('/api/v3/microservices/exec/')) { + await this.handleUserConnection(ws, req, token, microserviceUuid, transaction) + } else { + logger.error('WebSocket connection failed: Invalid endpoint') + try { + ws.close(1008, 'Invalid endpoint') + } catch (error) { + logger.error('Error closing WebSocket:' + error.message) + } + return + } + } catch (error) { + logger.error('WebSocket connection error:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url, + headers: req.headers + })) + + // Handle WebSocket errors gracefully + try { + if (ws.readyState === ws.OPEN) { + ws.close(1008, error.message || 'Internal server error') + await MicroserviceExecStatusManager.update( + { microserviceUuid: this.extractMicroserviceUuid(req.url) }, + { execSessionId: '', status: microserviceExecState.INACTIVE }, + transaction + ) + await MicroserviceManager.update({ uuid: this.extractMicroserviceUuid(req.url) }, { execEnabled: false }, transaction) + } + } catch (closeError) { + logger.error('Error closing WebSocket connection:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + })().catch(error => { + logger.error('Unhandled WebSocket transaction error:' + JSON.stringify({ + error: error.message, + stack: error.stack + })) + }) + } + + async handleAgentConnection (ws, req, token, microserviceUuid, transaction) { + try { + logger.debug('[WS-CONN] Processing agent connection:' + JSON.stringify({ + url: req.url, + microserviceUuid, + remoteAddress: req.socket.remoteAddress + })) + + // Set up message handler for initial message only + const initialMessageHandler = async (data, isBinary) => { + logger.debug('[WS-INIT] Received initial message from agent:' + JSON.stringify({ + isBinary, + url: req.url, + microserviceUuid + })) + + if (!isBinary) { + logger.error('[WS-ERROR] Expected binary message from agent') + ws.close(1008, 'Expected binary message') + return + } + + const buffer = Buffer.from(data) + logger.debug('[WS-INIT] Processing initial message from agent:' + JSON.stringify({ + isBinary, + length: buffer.length, + firstBytes: buffer.subarray(0, 16).toString('hex'), + lastBytes: buffer.subarray(-16).toString('hex') + })) + + let execMsg + try { + execMsg = this.decodeMessage(buffer) + logger.info('[WS-INIT] Decoded MessagePack from agent:' + JSON.stringify(execMsg)) + } catch (err) { + logger.error('[WS-ERROR] Failed to decode MessagePack from agent:' + JSON.stringify({ + error: err.message, + stack: err.stack + })) + ws.close(1008, 'Invalid MessagePack') + return + } + + const { execId, microserviceUuid: msgMicroserviceUuid } = execMsg + if (!execId || !msgMicroserviceUuid) { + logger.error('[WS-ERROR] Agent message missing execId or microserviceUuid:' + JSON.stringify(execMsg)) + ws.close(1008, 'Missing required fields') + return + } + + // Remove the initial message handler + ws.removeListener('message', initialMessageHandler) + + // Try to activate session with the execId from the message + const session = await this.sessionManager.tryActivateSession(msgMicroserviceUuid, execId, ws, true, transaction) + if (session) { + logger.info('[WS-SESSION] Session activated for agent:' + JSON.stringify({ + execId, + microserviceUuid: msgMicroserviceUuid + })) + // Set up message forwarding + logger.debug('[WS-FORWARD] Setting up message forwarding:' + JSON.stringify({ + execId, + microserviceUuid: msgMicroserviceUuid + })) + this.setupMessageForwarding(execId, transaction) + } else { + await this.sessionManager.addPendingAgent(msgMicroserviceUuid, execId, ws, transaction) + await MicroserviceExecStatusManager.update( + { microserviceUuid: microserviceUuid }, + { execSessionId: execId, status: microserviceExecState.PENDING }, + transaction + ) + logger.info('[WS-SESSION] No pending user found for agent, added to pending list:' + JSON.stringify({ + execId, + microserviceUuid: msgMicroserviceUuid + })) + } + } + + // Bind the message handler BEFORE validation + ws.on('message', initialMessageHandler) + + // Now validate the connection + const fog = await this.validateAgentConnection(token, microserviceUuid, transaction) + logger.debug('[WS-VALIDATE] Agent connection validated:' + JSON.stringify({ + fogUuid: fog.uuid, + microserviceUuid, + url: req.url + })) + + // Handle connection close + ws.on('close', () => { + for (const [execId, session] of this.sessionManager.sessions) { + if (session.agent === ws) { + this.cleanupSession(execId, transaction) + } + } + this.sessionManager.removePendingAgent(microserviceUuid, ws) + logger.debug('[WS-CLOSE] Agent connection closed:' + JSON.stringify({ + url: req.url, + microserviceUuid + })) + }) + + // Handle errors + ws.on('error', (error) => { + logger.error('[WS-ERROR] Agent connection error:' + JSON.stringify({ + error: error.message, + url: req.url, + microserviceUuid + })) + }) + } catch (error) { + logger.error('[WS-ERROR] Error in handleAgentConnection:' + JSON.stringify({ + error: error.message, + stack: error.stack, + url: req.url, + microserviceUuid + })) + if (ws.readyState === ws.OPEN) { + ws.close(1008, error.message || 'Connection error') + } + } + } + + async handleUserConnection (ws, req, token, microserviceUuid, transaction) { + try { + const { execSessionId } = await this.validateUserConnection(token, microserviceUuid, transaction) + logger.info('User connection: available execSessionId:' + execSessionId) + + // Check if there's already an active session for this microservice + const existingSession = Array.from(this.sessionManager.sessions.values()) + .find(session => session.microserviceUuid === microserviceUuid && session.user && session.user.readyState === WebSocket.OPEN) + + if (existingSession) { + logger.error('Microservice has already active exec session:' + JSON.stringify({ + microserviceUuid, + existingExecId: existingSession.execId + })) + ws.close(1008, 'Microservice has already active exec session.') + return + } + + // Get all active execIds + const activeExecIds = Array.from(this.sessionManager.sessions.keys()) + logger.info('Currently active execIds:' + JSON.stringify(activeExecIds)) + + // Get pending agent execIds + const pendingAgentExecIds = this.sessionManager.getPendingAgentExecIds(microserviceUuid) + logger.info('Pending agent execIds:' + JSON.stringify(pendingAgentExecIds)) + + // Find an available execId that is both not active AND has a pending agent + const availableExecId = execSessionId && !activeExecIds.includes(execSessionId) && pendingAgentExecIds.includes(execSessionId) + ? execSessionId + : null + + if (!availableExecId) { + logger.error('No available exec session for user') + ws.close(1008, 'No available exec session for this microservice.') + return + } + logger.info('User assigned execId:' + availableExecId) + + // Check if there's a pending agent with this execId + const pendingAgent = this.sessionManager.findPendingAgentForExecId(microserviceUuid, availableExecId) + if (pendingAgent) { + logger.info('Found pending agent for execId:' + JSON.stringify({ + execId: availableExecId, + microserviceUuid, + agentState: pendingAgent.readyState + })) + // Try to activate session with the selected execId + const session = this.sessionManager.tryActivateSession(microserviceUuid, availableExecId, ws, false, transaction) + if (session) { + logger.info('Session activated for user:', { + execId: availableExecId, + microserviceUuid, + userState: ws.readyState, + agentState: pendingAgent.readyState + }) + this.setupMessageForwarding(availableExecId, transaction) + } else { + logger.info('Failed to activate session with pending agent:' + JSON.stringify({ + execId: availableExecId, + microserviceUuid, + userState: ws.readyState, + agentState: pendingAgent.readyState + })) + this.sessionManager.addPendingUser(microserviceUuid, ws) + } + } else { + logger.info('No pending agent found for user, waiting:' + JSON.stringify({ + execId: availableExecId, + microserviceUuid, + userState: ws.readyState + })) + this.sessionManager.addPendingUser(microserviceUuid, ws) + } + + ws.on('close', () => { + for (const [execId, session] of this.sessionManager.sessions) { + if (session.user === ws) { + this.cleanupSession(execId, transaction) + } + } + this.sessionManager.removePendingUser(microserviceUuid, ws) + logger.info('User WebSocket disconnected:' + JSON.stringify({ + microserviceUuid, + userState: ws.readyState + })) + }) + } catch (error) { + logger.error('User connection validation failed:' + JSON.stringify({ + error: error.message, + stack: error.stack + })) + // Handle error gracefully instead of throwing + if (ws.readyState === WebSocket.OPEN) { + try { + ws.close(1008, error.message || 'Authentication failed') + } catch (closeError) { + logger.error('Error closing WebSocket:' + JSON.stringify({ + error: closeError.message, + originalError: error.message + })) + } + } + } + } + + // // Helper method - only filter obvious noise + // isNoise(output) { + // // Filter only the most obvious noise + // const noisePatterns = [ + // /^clear: command not found/, // Clear command error + // /^\s*$/, // Empty or whitespace only + // /^.$/ // Single character (usually control chars) + // ] + // return noisePatterns.some(pattern => pattern.test(output)) + // } + + setupMessageForwarding (execId, transaction) { + const session = this.sessionManager.getSession(execId) + if (!session) { + logger.error('[RELAY] Failed to setup message forwarding: No session found for execId=' + execId) + return + } + + const { agent, user } = session + logger.info('[RELAY] Setting up message forwarding for session:' + JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid, + agentConnected: !!agent, + userConnected: !!user, + agentState: agent ? agent.readyState : 'N/A', + userState: user ? user.readyState : 'N/A' + })) + + // Send activation message to agent + if (agent) { + const activationMsg = { + type: MESSAGE_TYPES.ACTIVATION, + data: Buffer.from(JSON.stringify({ + execId: execId, + microserviceUuid: session.microserviceUuid, + timestamp: Date.now() + })), + microserviceUuid: session.microserviceUuid, + execId: execId, + timestamp: Date.now() + } + + this.sendMessageToAgent(agent, activationMsg, execId, session.microserviceUuid) + .then(success => { + if (success) { + logger.info('[RELAY] Session activation complete:' + JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid, + agentState: agent.readyState + })) + } else { + logger.error('[RELAY] Session activation failed:' + JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid, + agentState: agent.readyState + })) + // Cleanup the session if activation fails + this.cleanupSession(execId, transaction) + } + }) + } + + // Remove any previous message handlers to avoid duplicates + if (user) { + logger.debug('[RELAY] Removing previous user message handlers for execId=' + execId) + user.removeAllListeners('message') + } + if (agent) { + logger.debug('[RELAY] Removing previous agent message handlers for execId=' + execId) + agent.removeAllListeners('message') + } + + // Forward user -> agent + if (user && agent) { + logger.debug('[RELAY] Setting up user->agent message forwarding for execId=' + execId) + user.on('message', async (data, isBinary) => { + logger.debug('[RELAY] User message received:' + JSON.stringify({ + execId, + isBinary, + dataType: typeof data, + dataLength: data.length, + userState: user.readyState, + agentState: agent.readyState + })) + + if (!isBinary) { + // Handle text messages from user + const text = data.toString() + logger.debug('[RELAY] Received text message from user:' + JSON.stringify({ + execId, + text, + length: text.length, + userState: user.readyState, + agentState: agent.readyState + })) + + // Convert text to binary message in agent's expected format + const msg = { + type: MESSAGE_TYPES.STDIN, + data: Buffer.from(text + '\n'), // Add newline for command execution + microserviceUuid: session.microserviceUuid, + execId: execId, + timestamp: Date.now() + } + + await this.sendMessageToAgent(agent, msg, execId, session.microserviceUuid) + return + } + + const buffer = Buffer.from(data) + try { + const msg = this.decodeMessage(buffer) + // Ensure message has all required fields + if (!msg.microserviceUuid) msg.microserviceUuid = session.microserviceUuid + if (!msg.execId) msg.execId = execId + if (!msg.timestamp) msg.timestamp = Date.now() + + if (msg.type === MESSAGE_TYPES.CLOSE) { + logger.info(`[RELAY] User sent CLOSE for execId=${execId}`) + await this.sendMessageToAgent(agent, msg, execId, session.microserviceUuid) + // Get current transaction from the session + const currentTransaction = session.transaction + this.cleanupSession(execId, currentTransaction) + return + } + + await this.sendMessageToAgent(agent, msg, execId, session.microserviceUuid) + } catch (error) { + logger.error('[RELAY] Failed to process binary message:' + JSON.stringify({ + execId, + error: error.message, + stack: error.stack, + bufferLength: buffer.length, + userState: user.readyState, + agentState: agent.readyState + })) + } + }) + + // Forward agent -> user + logger.debug('[RELAY] Setting up agent->user message forwarding for execId=' + execId) + agent.on('message', async (data, isBinary) => { + logger.debug('[RELAY] Agent message received:' + JSON.stringify({ + execId, + isBinary, + dataType: typeof data, + dataLength: data.length, + userState: user.readyState, + agentState: agent.readyState + })) + + try { + const buffer = Buffer.from(data) + const msg = this.decodeMessage(buffer) + logger.debug('[RELAY] Decoded agent message:' + JSON.stringify({ + execId, + type: msg.type, + hasData: !!msg.data, + messageSize: buffer.length + })) + + if (msg.type === MESSAGE_TYPES.CLOSE) { + logger.info(`[RELAY] Agent sent CLOSE for execId=${execId}`) + if (user.readyState === WebSocket.OPEN) { + user.close(1000, 'Agent closed connection') + } + // Get current transaction from the session + const currentTransaction = session.transaction + this.cleanupSession(execId, currentTransaction) + return + } + + if (user.readyState === WebSocket.OPEN) { + if (msg.type === MESSAGE_TYPES.STDOUT || msg.type === MESSAGE_TYPES.STDERR) { + if (msg.data && msg.data.length > 0) { + // Create MessagePack message for user + const userMsg = { + type: msg.type, + data: msg.data, + microserviceUuid: session.microserviceUuid, + execId: execId, + timestamp: Date.now() + } + // Encode and send as binary + const encoded = this.encodeMessage(userMsg) + user.send(encoded, { + binary: true, + compress: false, + mask: false, + fin: true + }) + + logger.debug('[RELAY] Forwarded agent message to user:' + JSON.stringify({ + execId, + type: msg.type, + encodedLength: encoded.length, + messageType: msg.type + })) + } + } else if (msg.type === MESSAGE_TYPES.CONTROL) { + user.send(data, { + binary: true, + compress: false, + mask: false, + fin: true + }) + } + } else { + logger.error('[RELAY] User not ready to receive message:' + JSON.stringify({ + execId, + userState: user.readyState, + messageType: msg.type + })) + } + } catch (error) { + logger.error('[RELAY] Failed to process agent message:', error) + } + }) + } + + logger.info('[RELAY] Message forwarding setup complete for session:' + JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid, + agentConnected: !!agent, + userConnected: !!user, + agentState: agent ? agent.readyState : 'N/A', + userState: user ? user.readyState : 'N/A' + })) + } + + async validateAgentConnection (token, microserviceUuid, transaction) { + try { + // Use AuthDecorator to validate the token and get the fog + let fog = {} + const req = { headers: { authorization: token }, transaction } + const handler = AuthDecorator.checkFogToken(async (req, fogObj) => { + fog = fogObj + return fogObj + }) + await handler(req) + + if (!fog) { + logger.error('Agent validation failed: Invalid agent token') + throw new WebSocketError(1008, 'Invalid agent token') + } + + // Verify microservice exists and belongs to this fog + const microservice = await MicroserviceManager.findOne({ uuid: microserviceUuid }, transaction) + if (!microservice || microservice.iofogUuid !== fog.uuid) { + logger.error('Agent validation failed: Microservice not found or not associated with this agent' + JSON.stringify({ + microserviceUuid, + fogUuid: fog.uuid, + found: !!microservice, + microserviceFogUuid: microservice ? microservice.iofogUuid : null + })) + throw new WebSocketError(1008, 'Microservice not found or not associated with this agent') + } + + return fog + } catch (error) { + logger.error('Agent validation error:' + JSON.stringify({ + error: error.message, + stack: error.stack, + microserviceUuid + })) + throw error // Propagate the original error + } + } + + async validateUserConnection (token, microserviceUuid, transaction) { + try { + // 1. Authenticate user first (Keycloak) + const req = { headers: { authorization: token } } + const res = {} + let userRoles = [] + await new Promise((resolve, reject) => { + keycloak.protect(['SRE', 'Developer'])(req, res, (err) => { + if (err) { + logger.error('User authentication failed:' + err) + reject(new Errors.AuthenticationError('Authentication failed')) + return + } + // Extract roles from token + userRoles = req.kauth && req.kauth.grant && req.kauth.grant.access_token && req.kauth.grant.access_token.content && req.kauth.grant.access_token.content.realm_access && req.kauth.grant.access_token.content.realm_access.roles + ? req.kauth.grant.access_token.content.realm_access.roles + : [] + resolve() + }) + }).catch((err) => { + // Immediately throw on authentication error + throw err + }) + + // 2. Only now check microservice, application, etc. + const microservice = await MicroserviceManager.findOne({ uuid: microserviceUuid }, transaction) + if (!microservice) { + throw new Errors.NotFoundError('Microservice not found') + } + + const application = await ApplicationManager.findOne({ id: microservice.applicationId }, transaction) + if (!application) { + throw new Errors.NotFoundError('Application not found') + } + + const statusArr = await MicroserviceStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + if (!statusArr || statusArr.length === 0) { + throw new Errors.NotFoundError('Microservice status not found') + } + const status = statusArr[0] + logger.debug('Microservice status check:' + JSON.stringify({ + status: status.status, + expectedStatus: microserviceState.RUNNING, + isEqual: status.status === microserviceState.RUNNING + })) + if (status.status !== microserviceState.RUNNING) { + throw new Errors.ValidationError('Microservice is not running') + } + + if (application.isSystem && !userRoles.includes('SRE')) { + throw new Errors.AuthenticationError('Only SRE can access system microservices') + } + // For non-system, SRE or Developer is already checked above + + const execStatusArr = await MicroserviceExecStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + if (!execStatusArr || execStatusArr.length === 0) { + throw new Errors.NotFoundError('Microservice exec status not found') + } + const execStatus = execStatusArr[0] + // logger.debug('Microservice exec status check:' + JSON.stringify({ + // status: execStatus.status, + // expectedStatus: microserviceExecState.ACTIVE, + // isEqual: execStatus.status === microserviceExecState.ACTIVE + // })) + if (execStatus.status === microserviceExecState.ACTIVE) { + throw new Errors.ValidationError('Microservice already has an active session') + } + + return { execSessionId: execStatus.execSessionId } + } catch (error) { + logger.error('User connection validation failed:' + JSON.stringify({ error: error.message, stack: error.stack })) + throw error + } + } + + // Singleton instance + static getInstance () { + if (!WebSocketServer.instance) { + WebSocketServer.instance = new WebSocketServer() + } + return WebSocketServer.instance + } + + // Clean up session and close sockets + cleanupSession (execId, transaction) { + const session = this.sessionManager.getSession(execId) + if (!session) return + + // Send CLOSE message to agent if it's still connected + if (session.agent && session.agent.readyState === WebSocket.OPEN) { + const closeMsg = { + type: MESSAGE_TYPES.CLOSE, + execId: execId, + microserviceUuid: session.microserviceUuid, + timestamp: Date.now(), + data: Buffer.from('Session closed') + } + + try { + const encoded = this.encodeMessage(closeMsg) + session.agent.send(encoded, { + binary: true, + compress: false, + mask: false, + fin: true + }) + logger.info('[RELAY] Sent CLOSE message to agent for execId=' + execId) + } catch (error) { + logger.error('[RELAY] Failed to send CLOSE message to agent:' + JSON.stringify({ + execId, + error: error.message, + stack: error.stack + })) + } + } + + // Close the connections + if (session.user && session.user.readyState === WebSocket.OPEN) { + session.user.close(1000, 'Session closed') + } + if (session.agent && session.agent.readyState === WebSocket.OPEN) { + session.agent.close(1000, 'Session closed') + } + + this.sessionManager.removeSession(execId, transaction) + logger.info('[RELAY] Session cleaned up for execId=' + execId) + } + + // Utility to extract microserviceUuid from path + extractUuidFromPath (path) { + const match = path.match(/([a-f0-9-]{36})/i) + return match ? match[1] : null + } + + registerRoute (path, middleware) { + // Store the route handler + this.routes = this.routes || new Map() + this.routes.set(path, middleware) + + logger.info('Registered WebSocket route: ' + path) + } + + // Helper method for sending messages to agent + async sendMessageToAgent (agent, message, execId, microserviceUuid) { + if (!agent || agent.readyState !== WebSocket.OPEN) { + logger.error('[RELAY] Cannot send message - agent not ready:' + JSON.stringify({ + execId, + microserviceUuid, + agentState: agent ? agent.readyState : 'N/A', + messageType: message.type + })) + return false + } + + try { + const encoded = this.encodeMessage(message) + agent.send(encoded, { + binary: true, + compress: false, + mask: false, + fin: true + }) + logger.debug('[RELAY] Message sent to agent:' + JSON.stringify({ + execId, + microserviceUuid, + messageType: message.type, + encodedLength: encoded.length + })) + return true + } catch (error) { + logger.error('[RELAY] Failed to send message to agent:' + JSON.stringify({ + execId, + microserviceUuid, + messageType: message.type, + error: error.message, + stack: error.stack + })) + return false + } + } +} + +module.exports = WebSocketServer diff --git a/src/websocket/session-manager.js b/src/websocket/session-manager.js new file mode 100644 index 00000000..a9a148be --- /dev/null +++ b/src/websocket/session-manager.js @@ -0,0 +1,495 @@ +const WebSocket = require('ws') +const logger = require('../logger') +const Errors = require('../helpers/errors') +const MicroserviceManager = require('../data/managers/microservice-manager') +const MicroserviceExecStatusManager = require('../data/managers/microservice-exec-status-manager') +const { microserviceExecState } = require('../enums/microservice-state') + +class SessionManager { + constructor (config) { + if (!config || !config.session) { + const error = new Errors.ValidationError('Invalid session manager configuration') + logger.error('Failed to initialize SessionManager:' + error) + throw error + } + this.sessions = new Map() + this.pendingUsers = new Map() // Map> + this.pendingAgents = new Map() // Map> + this.config = config + this.cleanupInterval = null + logger.info('SessionManager initialized with config:' + JSON.stringify({ + sessionTimeout: config.session.timeout, + maxConnections: config.session.maxConnections, + cleanupInterval: config.session.cleanupInterval + })) + } + + createSession (execId, microserviceUuid, agentWs, userWs, transaction) { + const session = { + execId, + microserviceUuid, + agent: agentWs, + user: userWs, + lastActivity: Date.now(), + transaction + } + this.sessions.set(execId, session) + logger.info('Session created:' + JSON.stringify({ + execId, + microserviceUuid, + agentConnected: !!agentWs, + userConnected: !!userWs + })) + return session + } + + getSession (execId) { + return this.sessions.get(execId) || null + } + + async removeSession (execId, transaction) { + const session = this.sessions.get(execId) + if (session) { + logger.info('Removing session:' + JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid + })) + this.sessions.delete(execId) + await MicroserviceExecStatusManager.update( + { microserviceUuid: session.microserviceUuid }, + { execSessionId: '', status: microserviceExecState.INACTIVE }, + transaction + ) + await MicroserviceManager.update({ uuid: session.microserviceUuid }, { execEnabled: false }, transaction) + } + } + + addPendingUser (microserviceUuid, userWs) { + if (!this.pendingUsers.has(microserviceUuid)) { + this.pendingUsers.set(microserviceUuid, new Set()) + } + this.pendingUsers.get(microserviceUuid).add(userWs) + logger.info('Added pending user:' + JSON.stringify({ + microserviceUuid, + pendingUserCount: this.pendingUsers.get(microserviceUuid).size + })) + } + + async addPendingAgent (microserviceUuid, execId, agentWs, transaction) { + if (!this.pendingAgents.has(microserviceUuid)) { + this.pendingAgents.set(microserviceUuid, new Map()) + // await MicroserviceExecStatusManager.update( + // { microserviceUuid: microserviceUuid }, + // { execSessionId: execId, status: microserviceExecState.PENDING }, + // transaction + // ) + } + const agents = this.pendingAgents.get(microserviceUuid) + + // Check if agent with this execId already exists + if (agents.has(execId)) { + logger.warn('Agent with execId already exists in pending list:' + JSON.stringify({ + microserviceUuid, + execId, + existingAgentState: agents.get(execId).ws.readyState, + newAgentState: agentWs.readyState + })) + // Remove old agent if it's not in OPEN state + if (agents.get(execId).ws.readyState !== WebSocket.OPEN) { + agents.delete(execId) + } else { + return // Skip adding if we already have an active agent with this execId + } + } + + const agentInfo = { ws: agentWs, execId } + agents.set(execId, agentInfo) + logger.info('Added pending agent:' + JSON.stringify({ + microserviceUuid, + execId, + pendingAgentCount: agents.size, + agentState: agentWs.readyState + })) + } + + removePendingUser (microserviceUuid, userWs) { + if (this.pendingUsers.has(microserviceUuid)) { + const users = this.pendingUsers.get(microserviceUuid) + users.delete(userWs) + if (users.size === 0) { + this.pendingUsers.delete(microserviceUuid) + } + logger.info('Removed pending user:' + JSON.stringify({ + microserviceUuid, + remainingUsers: users.size + })) + } + } + + removePendingAgent (microserviceUuid, agentWs) { + if (this.pendingAgents.has(microserviceUuid)) { + const agents = this.pendingAgents.get(microserviceUuid) + // Find and remove agent by WebSocket instance + for (const [execId, agentInfo] of agents.entries()) { + if (agentInfo.ws === agentWs) { + agents.delete(execId) + logger.info('Removed pending agent:' + JSON.stringify({ + microserviceUuid, + execId, + remainingAgents: agents.size + })) + break + } + } + + if (agents.size === 0) { + this.pendingAgents.delete(microserviceUuid) + } + } + } + + findPendingUserForExecId (microserviceUuid, execId) { + if (this.pendingUsers.has(microserviceUuid)) { + const users = this.pendingUsers.get(microserviceUuid) + // Return the first available user since we don't store execId with users + // The execId will be assigned when creating the session + for (const userWs of users) { + if (userWs.readyState === WebSocket.OPEN) { + return userWs + } + } + } + return null + } + + findPendingAgentForExecId (microserviceUuid, execId) { + if (this.pendingAgents.has(microserviceUuid)) { + const agents = this.pendingAgents.get(microserviceUuid) + const agentInfo = agents.get(execId) + if (agentInfo && agentInfo.ws.readyState === WebSocket.OPEN) { + return agentInfo.ws + } + } + return null + } + + async tryActivateSession (microserviceUuid, execId, newConnection, isAgent, transaction) { + let pendingUser = null + let pendingAgent = null + let session = null + + try { + if (isAgent) { + pendingUser = this.findPendingUserForExecId(microserviceUuid, execId) + if (pendingUser) { + // Atomic operation: remove user and create session + this.removePendingUser(microserviceUuid, pendingUser) + session = this.createSession(execId, microserviceUuid, newConnection, pendingUser, transaction) + logger.info('Session activated with agent first:' + JSON.stringify({ + execId, + microserviceUuid, + userConnected: !!pendingUser, + agentConnected: !!newConnection, + userState: pendingUser.readyState, + agentState: newConnection.readyState + })) + } else { + await this.addPendingAgent(microserviceUuid, execId, newConnection, transaction) + logger.info('No pending user found for agent, added to pending list:' + JSON.stringify({ + execId, + microserviceUuid, + agentState: newConnection.readyState + })) + } + } else { + pendingAgent = this.findPendingAgentForExecId(microserviceUuid, execId) + if (pendingAgent) { + // Atomic operation: remove agent and create session + this.removePendingAgent(microserviceUuid, pendingAgent) + session = this.createSession(execId, microserviceUuid, pendingAgent, newConnection, transaction) + logger.info('Session activated with user first:' + JSON.stringify({ + execId, + microserviceUuid, + userConnected: !!newConnection, + agentConnected: !!pendingAgent, + userState: newConnection.readyState, + agentState: pendingAgent.readyState + })) + await MicroserviceExecStatusManager.update( + { microserviceUuid: microserviceUuid }, + { execSessionId: execId, status: microserviceExecState.ACTIVE }, + transaction + ) + } else { + this.addPendingUser(microserviceUuid, newConnection) + logger.info('No pending agent found for user, added to pending list:' + JSON.stringify({ + execId, + microserviceUuid, + userState: newConnection.readyState + })) + } + } + } catch (error) { + logger.error('Failed to activate session:' + JSON.stringify({ + error: error.message, + execId, + microserviceUuid, + isAgent, + userState: newConnection.readyState + })) + // Cleanup any partial state + if (session) { + await this.removeSession(execId, transaction) + } + throw error + } + + return session + } + + logSessionState () { + logger.info('--- WebSocket SessionManager State ---') + logger.info('Active sessions:') + for (const [execId, session] of this.sessions) { + logger.info(JSON.stringify({ + execId, + microserviceUuid: session.microserviceUuid, + agentConnected: !!session.agent, + userConnected: !!session.user, + lastActivity: new Date(session.lastActivity).toISOString(), + agentState: session.agent ? session.agent.readyState : 'N/A', + userState: session.user ? session.user.readyState : 'N/A' + })) + } + logger.info('Pending users:') + for (const [microserviceUuid, users] of this.pendingUsers) { + logger.info(JSON.stringify({ + microserviceUuid, + count: users.size + })) + } + logger.info('Pending agents:') + for (const [microserviceUuid, agents] of this.pendingAgents) { + logger.info(JSON.stringify({ + microserviceUuid, + count: agents.size, + execIds: Array.from(agents.keys()) + })) + } + logger.info('--------------------------------------') + } + + assignAgentToSession (execId, agentWs) { + const session = this.getSession(execId) + if (session) { + session.agent = agentWs + session.lastActivity = Date.now() + } + } + + assignUserToSession (execId, userWs) { + const session = this.getSession(execId) + if (session) { + session.user = userWs + session.lastActivity = Date.now() + } + } + + addConnection (sessionId, ws) { + try { + const session = this.getSession(sessionId) + session.connections.add(ws) + session.lastActivity = Date.now() + logger.info('Connection added to session' + JSON.stringify({ + sessionId, + connectionCount: session.connections.size + })) + } catch (error) { + logger.error('Failed to add connection:' + error) + throw error + } + } + + removeConnection (sessionId, ws) { + try { + const session = this.getSession(sessionId) + session.connections.delete(ws) + if (session.connections.size === 0) { + session.lastActivity = Date.now() + logger.info('Last connection removed from session' + JSON.stringify({ sessionId })) + } else { + logger.debug('Connection removed from session' + JSON.stringify({ + sessionId, + remainingConnections: session.connections.size + })) + } + } catch (error) { + logger.error('Failed to remove connection:' + error) + throw error + } + } + + handleReconnection (sessionId, ws) { + try { + const session = this.getSession(sessionId) + if (session.reconnectAttempts < this.config.maxReconnectAttempts) { + session.reconnectAttempts++ + this.addConnection(sessionId, ws) + logger.info('Reconnection successful' + JSON.stringify({ + sessionId, + attempt: session.reconnectAttempts + })) + return true + } else { + const error = new Errors.ValidationError('Max reconnection attempts reached') + logger.warn('Max reconnection attempts reached' + JSON.stringify({ + sessionId, + maxAttempts: this.config.maxReconnectAttempts, + error: error.message + })) + throw error + } + } catch (error) { + logger.error('Reconnection failed:' + error) + throw error + } + } + + startCleanup () { + if (this.cleanupInterval) { + logger.debug('Cleanup interval already running') + return + } + logger.info('Starting session cleanup service with interval: ' + this.config.session.cleanupInterval + 'ms') + this.cleanupInterval = setInterval(() => { + const now = Date.now() + let cleanedCount = 0 + logger.debug('Running session cleanup cycle') + for (const [sessionId, session] of this.sessions) { + if (now - session.lastActivity > this.config.session.timeout) { + this.cleanupSession(sessionId) + cleanedCount++ + } + } + if (cleanedCount > 0) { + logger.info('Session cleanup completed' + JSON.stringify({ cleanedCount })) + } + // Log session state after cleanup + this.logSessionState() + }, this.config.session.cleanupInterval) + } + + cleanupSession (sessionId) { + try { + const session = this.getSession(sessionId) + logger.info('Cleaning up session' + JSON.stringify({ + sessionId, + type: session.type, + connectionCount: session.connections.size + })) + for (const ws of session.connections) { + ws.close(1000, 'Session timeout') + } + this.sessions.delete(sessionId) + logger.debug('Session cleanup completed' + JSON.stringify({ sessionId })) + } catch (error) { + logger.error('Failed to cleanup session:' + error) + throw error + } + } + + stopCleanup () { + if (this.cleanupInterval) { + clearInterval(this.cleanupInterval) + logger.info('Session cleanup service stopped') + } + } + + getActiveConnections (sessionId) { + try { + const session = this.getSession(sessionId) + const count = session.connections.size + logger.debug('Getting active connections' + JSON.stringify({ sessionId, count })) + return count + } catch (error) { + logger.error('Failed to get active connections:' + error) + throw error + } + } + + broadcastToSession (sessionId, message) { + try { + const session = this.getSession(sessionId) + const messageStr = JSON.stringify(message) + let sentCount = 0 + for (const ws of session.connections) { + if (ws.readyState === WebSocket.OPEN) { + ws.send(messageStr) + sentCount++ + } + } + logger.debug('Broadcast message to session' + JSON.stringify({ + sessionId, + recipients: sentCount, + totalConnections: session.connections.size + })) + } catch (error) { + logger.error('Failed to broadcast message:' + error) + throw error + } + } + + bufferMessage (sessionId, message) { + try { + const session = this.getSession(sessionId) + session.buffer.push(message) + if (session.buffer.length > this.config.maxBufferSize) { + session.buffer.shift() // Remove oldest message + logger.debug('Buffer size limit reached, removed oldest message' + JSON.stringify({ + sessionId, + bufferSize: session.buffer.length + })) + } + } catch (error) { + logger.error('Failed to buffer message:' + error) + throw error + } + } + + getBufferedMessages (sessionId) { + try { + const session = this.getSession(sessionId) + const messages = session.buffer + logger.debug('Retrieved buffered messages' + JSON.stringify({ + sessionId, + messageCount: messages.length + })) + return messages + } catch (error) { + logger.error('Failed to get buffered messages:' + error) + throw error + } + } + + clearBuffer (sessionId) { + try { + const session = this.getSession(sessionId) + const count = session.buffer.length + session.buffer = [] + logger.info('Cleared message buffer' + JSON.stringify({ sessionId, clearedCount: count })) + } catch (error) { + logger.error('Failed to clear buffer:' + error) + throw error + } + } + + getPendingAgentExecIds (microserviceUuid) { + if (this.pendingAgents.has(microserviceUuid)) { + const agents = this.pendingAgents.get(microserviceUuid) + return Array.from(agents.keys()) + } + return [] + } +} + +module.exports = SessionManager diff --git a/swagger.js b/swagger.js index cd25b89e..4240fdbb 100644 --- a/swagger.js +++ b/swagger.js @@ -1,21 +1,40 @@ // swagger.js const swaggerJsDoc = require('swagger-jsdoc') +// Import all schemas +const schemas = require('./src/schemas') + const swaggerOptions = { swaggerDefinition: { openapi: '3.0.0', info: { - title: 'API Documentation', - version: '1.0.0', - description: 'Datasancae API Documentation' + title: 'Datasance PoT Controller REST API Documentation', + version: '3.5.0', + description: 'Datasance PoT Controller REST API Documentation' }, servers: [ { - url: 'http://localhost:3000' + url: 'http://localhost:51121/api/v3' + } + ], + components: { + securitySchemes: { + authToken: { + type: 'http', + scheme: 'bearer', + bearerFormat: 'JWT', + description: 'JWT token for authentication (user or agent)' + } + }, + schemas: schemas + }, + security: [ + { + authToken: [] } ] }, - apis: ['./routes/*.js'] + apis: ['./src/routes/*.js'] } const swaggerDocs = swaggerJsDoc(swaggerOptions) From 92f0c522e835dfbe912667d5da8d198fbdcd017c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 21 Jun 2025 16:55:28 +0300 Subject: [PATCH 10/25] ws kc auth roles checking fixed --- src/websocket/server.js | 85 ++- test/backup/iofog-service.js | 1250 ------------------------------ test/backup/services-service.js | 1261 ------------------------------- 3 files changed, 66 insertions(+), 2530 deletions(-) delete mode 100644 test/backup/iofog-service.js delete mode 100644 test/backup/services-service.js diff --git a/src/websocket/server.js b/src/websocket/server.js index 4acf055c..3627ba44 100644 --- a/src/websocket/server.js +++ b/src/websocket/server.js @@ -839,27 +839,59 @@ class WebSocketServer { async validateUserConnection (token, microserviceUuid, transaction) { try { - // 1. Authenticate user first (Keycloak) - const req = { headers: { authorization: token } } - const res = {} + // 1. Authenticate user first (Keycloak) - Direct token verification let userRoles = [] - await new Promise((resolve, reject) => { - keycloak.protect(['SRE', 'Developer'])(req, res, (err) => { - if (err) { - logger.error('User authentication failed:' + err) - reject(new Errors.AuthenticationError('Authentication failed')) - return + + // Extract Bearer token + const bearerToken = token.replace('Bearer ', '') + if (!bearerToken) { + throw new Errors.AuthenticationError('Missing or invalid authorization token') + } + + // Check if we're in development mode (mock Keycloak) + const isDevMode = config.get('server.devMode', true) + const hasAuthConfig = this.isAuthConfigured() + + if (!hasAuthConfig && isDevMode) { + // Use mock roles for development + userRoles = ['SRE', 'Developer', 'Viewer'] + logger.debug('Using mock authentication for development mode') + } else { + // Use real Keycloak token verification + try { + // Create a grant from the access token + const grant = await keycloak.grantManager.createGrant({ + access_token: bearerToken + }) + + // Extract roles from the token - get client-specific roles + const clientId = process.env.KC_CLIENT || config.get('auth.client.id') + const resourceAccess = grant.access_token.content.resource_access + + if (resourceAccess && resourceAccess[clientId] && resourceAccess[clientId].roles) { + userRoles = resourceAccess[clientId].roles + } else { + // Fallback to realm roles if client roles not found + userRoles = grant.access_token.content.realm_access && grant.access_token.content.realm_access.roles + ? grant.access_token.content.realm_access.roles + : [] } - // Extract roles from token - userRoles = req.kauth && req.kauth.grant && req.kauth.grant.access_token && req.kauth.grant.access_token.content && req.kauth.grant.access_token.content.realm_access && req.kauth.grant.access_token.content.realm_access.roles - ? req.kauth.grant.access_token.content.realm_access.roles - : [] - resolve() - }) - }).catch((err) => { - // Immediately throw on authentication error - throw err - }) + + logger.debug('Token verification successful, user roles:' + JSON.stringify(userRoles)) + } catch (keycloakError) { + logger.error('Keycloak token verification failed:' + JSON.stringify({ + error: keycloakError.message, + stack: keycloakError.stack + })) + throw new Errors.AuthenticationError('Invalid or expired token') + } + } + + // Check if user has required roles + const hasRequiredRole = userRoles.some(role => ['SRE', 'Developer'].includes(role)) + if (!hasRequiredRole) { + throw new Errors.AuthenticationError('Insufficient permissions. Required roles: SRE or Developer') + } // 2. Only now check microservice, application, etc. const microservice = await MicroserviceManager.findOne({ uuid: microserviceUuid }, transaction) @@ -1017,6 +1049,21 @@ class WebSocketServer { return false } } + + // Helper method to check if auth is configured + isAuthConfigured () { + const requiredConfigs = [ + 'auth.realm', + 'auth.realmKey', + 'auth.url', + 'auth.client.id', + 'auth.client.secret' + ] + return requiredConfigs.every(configKey => { + const value = config.get(configKey) + return value !== undefined && value !== null && value !== '' + }) + } } module.exports = WebSocketServer diff --git a/test/backup/iofog-service.js b/test/backup/iofog-service.js deleted file mode 100644 index bf90d94b..00000000 --- a/test/backup/iofog-service.js +++ /dev/null @@ -1,1250 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -const config = require('../config') -const fs = require('fs') -const TransactionDecorator = require('../decorators/transaction-decorator') -const AppHelper = require('../helpers/app-helper') -const FogManager = require('../data/managers/iofog-manager') -const FogProvisionKeyManager = require('../data/managers/iofog-provision-key-manager') -const FogVersionCommandManager = require('../data/managers/iofog-version-command-manager') -const ChangeTrackingService = require('./change-tracking-service') -const Errors = require('../helpers/errors') -const ErrorMessages = require('../helpers/error-messages') -const Validator = require('../schemas') -const HWInfoManager = require('../data/managers/hw-info-manager') -const USBInfoManager = require('../data/managers/usb-info-manager') -const CatalogService = require('./catalog-service') -const MicroserviceManager = require('../data/managers/microservice-manager') -const ApplicationManager = require('../data/managers/application-manager') -const TagsManager = require('../data/managers/tags-manager') -const MicroserviceService = require('./microservices-service') -const EdgeResourceService = require('./edge-resource-service') -const VolumeMountService = require('./volume-mount-service') -const RouterManager = require('../data/managers/router-manager') -const MicroserviceExtraHostManager = require('../data/managers/microservice-extra-host-manager') -const MicroserviceStatusManager = require('../data/managers/microservice-status-manager') -const RouterConnectionManager = require('../data/managers/router-connection-manager') -const RouterService = require('./router-service') -const Constants = require('../helpers/constants') -const Op = require('sequelize').Op -const lget = require('lodash/get') -const CertificateService = require('./certificate-service') -const logger = require('../logger') -const ServiceManager = require('../data/managers/service-manager') - -const SITE_CA_CERT = 'pot-site-ca' -const DEFAULT_ROUTER_LOCAL_CA = 'default-router-local-ca' -const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' - -async function checkKubernetesEnvironment () { - const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') - return controlPlane && controlPlane.toLowerCase() === 'kubernetes' -} - -async function getLocalCertificateHosts (isKubernetes, namespace) { - if (isKubernetes) { - return `router-local,router-local.${namespace},router-local.${namespace}.svc.cluster.local` - } - return '127.0.0.1,localhost,host.docker.internal,host.containers.internal' -} - -async function getSiteCertificateHosts (fogData, transaction) { - const hosts = new Set() - // Add existing hosts if isSystem - if (fogData.isSystem) { - if (fogData.host) hosts.add(fogData.host) - if (fogData.ipAddress) hosts.add(fogData.ipAddress) - if (fogData.ipAddressExternal) hosts.add(fogData.ipAddressExternal) - } - // Add default router host if not system - if (!fogData.isSystem) { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (defaultRouter.host) hosts.add(defaultRouter.host) - } - // Add upstream router hosts - // const upstreamRouters = (fogData.upstreamRouters || []).filter(uuid => uuid !== 'default-router') - // if (upstreamRouters.length) { - // for (const uuid of upstreamRouters) { - // const routerHost = await FogManager.findOne({ uuid: uuid }, transaction) - // if (routerHost.host) hosts.add(routerHost.host) - // if (routerHost.ipAddress) hosts.add(routerHost.ipAddress) - // } - // } - return Array.from(hosts).join(',') || 'localhost' -} - -async function _handleRouterCertificates (fogData, uuid, isRouterModeChanged, transaction) { - logger.debug('Starting _handleRouterCertificates for fog: ' + JSON.stringify({ uuid: uuid, host: fogData.host })) - - // Check if we're in Kubernetes environment - const isKubernetes = await checkKubernetesEnvironment() - const namespace = isKubernetes ? process.env.CONTROLLER_NAMESPACE : null - - // Helper to check CA existence - async function ensureCA (name, subject) { - logger.debug('Checking CA existence: ' + JSON.stringify({ name, subject })) - try { - await CertificateService.getCAEndpoint(name, transaction) - logger.debug('CA already exists: ' + name) - // CA exists - } catch (err) { - if (err.name === 'NotFoundError') { - logger.debug('CA not found, creating new CA: ' + JSON.stringify({ name, subject })) - await CertificateService.createCAEndpoint({ - name, - subject: `${subject}`, - expiration: 60, // months - type: 'self-signed' - }, transaction) - logger.debug('Successfully created CA: ' + name) - } else if (err.name === 'ConflictError') { - logger.debug('CA already exists (conflict): ' + name) - // Already exists, ignore - } else { - logger.error('Error in ensureCA - Name: ' + name + ', Subject: ' + subject + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) - logger.error('Stack trace: ' + err.stack) - throw err - } - } - } - - // Helper to check cert existence - async function ensureCert (name, subject, hosts, ca, shouldRecreate = false) { - logger.debug('Checking certificate existence: ' + JSON.stringify({ name, subject, hosts, ca })) - try { - const existingCert = await CertificateService.getCertificateEndpoint(name, transaction) - if (shouldRecreate && existingCert) { - logger.debug('Certificate exists and needs recreation: ' + name) - await CertificateService.deleteCertificateEndpoint(name, transaction) - logger.debug('Deleted existing certificate: ' + name) - // Create new certificate - await CertificateService.createCertificateEndpoint({ - name, - subject: `${subject}`, - hosts, - ca - }, transaction) - logger.debug('Successfully recreated certificate: ' + name) - } else if (!existingCert) { - logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) - await CertificateService.createCertificateEndpoint({ - name, - subject: `${subject}`, - hosts, - ca - }, transaction) - logger.debug('Successfully created certificate: ' + name) - } else { - logger.debug('Certificate already exists: ' + name) - } - } catch (err) { - if (err.name === 'NotFoundError') { - logger.debug('Certificate not found, creating new certificate: ' + JSON.stringify({ name, subject, hosts, ca })) - await CertificateService.createCertificateEndpoint({ - name, - subject: `${subject}`, - hosts, - ca - }, transaction) - logger.debug('Successfully created certificate: ' + name) - } else if (err.name === 'ConflictError') { - logger.debug('Certificate already exists (conflict): ' + name) - // Already exists, ignore - } else { - logger.error('Error in ensureCert - Name: ' + name + ', Subject: ' + subject + ', Hosts: ' + hosts + ', CA: ' + JSON.stringify(ca) + ', Error: ' + err.message + ', Type: ' + err.name + ', Code: ' + err.code) - logger.error('Stack trace: ' + err.stack) - throw err - } - } - } - - try { - // Always ensure SITE_CA_CERT exists - logger.debug('Ensuring SITE_CA_CERT exists') - await ensureCA(SITE_CA_CERT, SITE_CA_CERT) - - // If routerMode is 'none', only ensure DEFAULT_ROUTER_LOCAL_CA and its signed certificate - if (fogData.routerMode === 'none') { - logger.debug('Router mode is none, ensuring DEFAULT_ROUTER_LOCAL_CA exists') - await ensureCA(DEFAULT_ROUTER_LOCAL_CA, DEFAULT_ROUTER_LOCAL_CA) - logger.debug('Ensuring local-agent certificate signed by DEFAULT_ROUTER_LOCAL_CA') - const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) - await ensureCert( - `${uuid}-local-agent`, - `${uuid}-local-agent`, - localHosts, - { type: 'direct', secretName: DEFAULT_ROUTER_LOCAL_CA }, - isRouterModeChanged - ) - logger.debug('Successfully completed _handleRouterCertificates for routerMode none') - return - } - - // For other router modes, ensure all other certificates - // Always ensure site-server cert exists - logger.debug('Ensuring site-server certificate exists') - const siteHosts = await getSiteCertificateHosts(fogData, transaction) - await ensureCert( - `${uuid}-site-server`, - `${uuid}-site-server`, - siteHosts, - { type: 'direct', secretName: SITE_CA_CERT }, - false - ) - - // Always ensure local-ca exists - logger.debug('Ensuring local-ca exists') - await ensureCA(`${uuid}-local-ca`, `${uuid}-local-ca`) - - // Always ensure local-server cert exists - logger.debug('Ensuring local-server certificate exists') - const localHosts = await getLocalCertificateHosts(isKubernetes, namespace) - await ensureCert( - `${uuid}-local-server`, - `${uuid}-local-server`, - localHosts, - { type: 'direct', secretName: `${uuid}-local-ca` }, - isRouterModeChanged - ) - - // Always ensure local-agent cert exists - logger.debug('Ensuring local-agent certificate exists') - await ensureCert( - `${uuid}-local-agent`, - `${uuid}-local-agent`, - localHosts, - { type: 'direct', secretName: `${uuid}-local-ca` }, - isRouterModeChanged - ) - - logger.debug('Successfully completed _handleRouterCertificates') - } catch (error) { - logger.error('Certificate operation failed - UUID: ' + uuid + ', RouterMode: ' + fogData.routerMode + ', Error: ' + error.message + ', Type: ' + error.name + ', Code: ' + error.code) - logger.error('Stack trace: ' + error.stack) - } -} - -async function createFogEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogCreate) - - let createFogData = { - uuid: AppHelper.generateUUID(), - name: fogData.name, - location: fogData.location, - latitude: fogData.latitude, - longitude: fogData.longitude, - gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, - description: fogData.description, - networkInterface: fogData.networkInterface, - dockerUrl: fogData.dockerUrl, - containerEngine: fogData.containerEngine, - deploymentType: fogData.deploymentType, - diskLimit: fogData.diskLimit, - diskDirectory: fogData.diskDirectory, - memoryLimit: fogData.memoryLimit, - cpuLimit: fogData.cpuLimit, - logLimit: fogData.logLimit, - logDirectory: fogData.logDirectory, - logFileCount: fogData.logFileCount, - statusFrequency: fogData.statusFrequency, - changeFrequency: fogData.changeFrequency, - deviceScanFrequency: fogData.deviceScanFrequency, - bluetoothEnabled: fogData.bluetoothEnabled, - watchdogEnabled: fogData.watchdogEnabled, - abstractedHardwareEnabled: fogData.abstractedHardwareEnabled, - fogTypeId: fogData.fogType, - logLevel: fogData.logLevel, - dockerPruningFrequency: fogData.dockerPruningFrequency, - availableDiskThreshold: fogData.availableDiskThreshold, - isSystem: fogData.isSystem, - host: fogData.host, - routerId: null, - timeZone: fogData.timeZone - } - - createFogData = AppHelper.deleteUndefinedFields(createFogData) - - // Default router is edge - fogData.routerMode = fogData.routerMode || 'edge' - - if (fogData.isSystem && fogData.routerMode !== 'interior') { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) - } - - if (fogData.isSystem && !!(await FogManager.findOne({ isSystem: true }, transaction))) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_SYSTEM_FOG)) - } - - const existingFog = await FogManager.findOne({ name: createFogData.name }, transaction) - if (existingFog) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_NAME, createFogData.name)) - } - - let defaultRouter, upstreamRouters - if (fogData.routerMode === 'none') { - const networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) - if (!networkRouter) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) - } - createFogData.routerId = networkRouter.id - } else { - defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(fogData.upstreamRouters, fogData.isSystem, defaultRouter) - } - - const fog = await FogManager.create(createFogData, transaction) - - // Set tags - await _setTags(fog, fogData.tags, transaction) - - // Add certificate handling - await _handleRouterCertificates(fogData, createFogData.uuid, false, transaction) - - if (fogData.routerMode !== 'none') { - if (!fogData.host && !isCLI) { - throw new Errors.ValidationError(ErrorMessages.HOST_IS_REQUIRED) - } - - await RouterService.createRouterForFog(fogData, fog.uuid, upstreamRouters) - - // --- Service Distribution Logic --- - // 1. Extract service tags - const serviceTags = await _extractServiceTags(fogData.tags) - - // 2. If service tags are not empty, find matching services - if (serviceTags.length > 0) { - const services = await _findMatchingServices(serviceTags, transaction) - - // 3. If services are not empty, build listeners and update router config - if (services.length > 0) { - // Get router microservice - const routerName = `router-${fog.uuid.toLowerCase()}` - const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) - if (!routerMicroservice) { - throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) - } - let config = JSON.parse(routerMicroservice.config || '{}') - - // For each service, build listener and merge - for (const service of services) { - const listenerConfig = _buildTcpListenerForFog(service, fog.uuid) - config = _mergeTcpListener(config, listenerConfig) - } - - // Update router microservice config - await MicroserviceManager.update( - { uuid: routerMicroservice.uuid }, - { config: JSON.stringify(config) }, - transaction - ) - - // Update change tracking - await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.microserviceConfig, transaction) - } - } - } - - const res = { - uuid: fog.uuid - } - - await ChangeTrackingService.create(fog.uuid, transaction) - - if (fogData.abstractedHardwareEnabled) { - await _createHalMicroserviceForFog(fog, null, transaction) - } - - if (fogData.bluetoothEnabled) { - await _createBluetoothMicroserviceForFog(fog, null, transaction) - } - - await ChangeTrackingService.update(createFogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) - - return res -} - -async function _setTags (fogModel, tagsArray, transaction) { - if (tagsArray) { - let tags = [] - for (const tag of tagsArray) { - let tagModel = await TagsManager.findOne({ value: tag }, transaction) - if (!tagModel) { - tagModel = await TagsManager.create({ value: tag }, transaction) - } - tags.push(tagModel) - } - await fogModel.setTags(tags) - } -} - -async function updateFogEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogUpdate) - - const queryFogData = { uuid: fogData.uuid } - - let updateFogData = { - name: fogData.name, - location: fogData.location, - latitude: fogData.latitude, - longitude: fogData.longitude, - gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, - description: fogData.description, - networkInterface: fogData.networkInterface, - dockerUrl: fogData.dockerUrl, - containerEngine: fogData.containerEngine, - deploymentType: fogData.deploymentType, - diskLimit: fogData.diskLimit, - diskDirectory: fogData.diskDirectory, - memoryLimit: fogData.memoryLimit, - cpuLimit: fogData.cpuLimit, - logLimit: fogData.logLimit, - logDirectory: fogData.logDirectory, - logFileCount: fogData.logFileCount, - statusFrequency: fogData.statusFrequency, - changeFrequency: fogData.changeFrequency, - deviceScanFrequency: fogData.deviceScanFrequency, - bluetoothEnabled: fogData.bluetoothEnabled, - watchdogEnabled: fogData.watchdogEnabled, - isSystem: fogData.isSystem, - abstractedHardwareEnabled: fogData.abstractedHardwareEnabled, - fogTypeId: fogData.fogType, - logLevel: fogData.logLevel, - dockerPruningFrequency: fogData.dockerPruningFrequency, - host: fogData.host, - availableDiskThreshold: fogData.availableDiskThreshold, - timeZone: fogData.timeZone - } - updateFogData = AppHelper.deleteUndefinedFields(updateFogData) - - const oldFog = await FogManager.findOne(queryFogData, transaction) - if (!oldFog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - // Update tags - await _setTags(oldFog, fogData.tags, transaction) - - if (updateFogData.name) { - const conflictQuery = isCLI - ? { name: updateFogData.name, uuid: { [Op.not]: fogData.uuid } } - : { name: updateFogData.name, uuid: { [Op.not]: fogData.uuid } } - const conflict = await FogManager.findOne(conflictQuery, transaction) - if (conflict) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.DUPLICATE_NAME, updateFogData.name)) - } - } - - // Update router - // Get all router config informations - const router = await oldFog.getRouter() - const host = fogData.host || lget(router, 'host') - const upstreamRoutersConnections = router ? (await RouterConnectionManager.findAllWithRouters({ sourceRouter: router.id }, transaction) || []) : [] - const upstreamRoutersIofogUuid = fogData.upstreamRouters || await Promise.all(upstreamRoutersConnections.map(connection => connection.dest.iofogUuid)) - const routerMode = fogData.routerMode || (router ? (router.isEdge ? 'edge' : 'interior') : 'none') - const messagingPort = fogData.messagingPort || (router ? router.messagingPort : null) - const interRouterPort = fogData.interRouterPort || (router ? router.interRouterPort : null) - const edgeRouterPort = fogData.edgeRouterPort || (router ? router.edgeRouterPort : null) - let networkRouter - - const isSystem = updateFogData.isSystem === undefined ? oldFog.isSystem : updateFogData.isSystem - if (isSystem && routerMode !== 'interior') { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_MODE, fogData.routerMode)) - } - - let isRouterModeChanged = false - const oldRouterMode = (router ? (router.isEdge ? 'edge' : 'interior') : 'none') - if (fogData.routerMode && fogData.routerMode !== oldRouterMode) { - if (fogData.routerMode === 'none' || oldRouterMode === 'none') { - isRouterModeChanged = true - } - } - // Add certificate handling - await _handleRouterCertificates(fogData, fogData.uuid, isRouterModeChanged, transaction) - - if (routerMode === 'none') { - networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) - if (!networkRouter) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, !fogData.networkRouter ? Constants.DEFAULT_ROUTER_NAME : fogData.networkRouter)) - } - // Only delete previous router if there is a network router - if (router) { - // New router mode is none, delete existing router - await _deleteFogRouter(fogData, transaction) - } - } else { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - const upstreamRouters = await RouterService.validateAndReturnUpstreamRouters(upstreamRoutersIofogUuid, oldFog.isSystem, defaultRouter) - if (!router) { - // Router does not exist yet - networkRouter = await RouterService.createRouterForFog(fogData, oldFog.uuid, upstreamRouters) - // --- Service Distribution Logic --- - // 1. Extract service tags - const serviceTags = await _extractServiceTags(fogData.tags) - - // 2. If service tags are not empty, find matching services - if (serviceTags.length > 0) { - const services = await _findMatchingServices(serviceTags, transaction) - - // 3. If services are not empty, build listeners and update router config - if (services.length > 0) { - // Get router microservice - const routerName = `router-${fogData.uuid.toLowerCase()}` - const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) - if (!routerMicroservice) { - throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) - } - let config = JSON.parse(routerMicroservice.config || '{}') - - // For each service, build listener and merge - for (const service of services) { - const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) - config = _mergeTcpListener(config, listenerConfig) - } - - // Update router microservice config - await MicroserviceManager.update( - { uuid: routerMicroservice.uuid }, - { config: JSON.stringify(config) }, - transaction - ) - - // Update change tracking - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) - } - } - } else { - // Extract existing TCP connectors before updating config - const existingConnectors = await _extractExistingTcpConnectors(fogData.uuid, transaction) - // Update existing router - networkRouter = await RouterService.updateRouter(router, { - messagingPort, interRouterPort, edgeRouterPort, isEdge: routerMode === 'edge', host - }, upstreamRouters, fogData.containerEngine) - - // --- Service Distribution Logic --- - // 1. Extract service tags - const serviceTags = await _extractServiceTags(fogData.tags) - - // Get router microservice for config updates - const routerName = `router-${fogData.uuid.toLowerCase()}` - const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) - if (!routerMicroservice) { - throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) - } - let config = JSON.parse(routerMicroservice.config || '{}') - - // 2. If service tags are not empty, find matching services and build listeners - if (serviceTags.length > 0) { - const services = await _findMatchingServices(serviceTags, transaction) - - // 3. If services are not empty, build and merge listeners - if (services.length > 0) { - // For each service, build listener and merge - for (const service of services) { - const listenerConfig = _buildTcpListenerForFog(service, fogData.uuid) - config = _mergeTcpListener(config, listenerConfig) - } - } - } - - // 4. Merge back existing connectors if any - if (existingConnectors && Object.keys(existingConnectors).length > 0) { - for (const connectorName in existingConnectors) { - const connectorObj = existingConnectors[connectorName] - config = _mergeTcpConnector(config, connectorObj) - } - } - - // Update router microservice config - await MicroserviceManager.update( - { uuid: routerMicroservice.uuid }, - { config: JSON.stringify(config) }, - transaction - ) - - // Update change tracking - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceConfig, transaction) - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) - } - } - updateFogData.routerId = networkRouter.id - - // If router changed, set routerChanged flag - if (updateFogData.routerId !== oldFog.routerId || updateFogData.routerMode !== oldFog.routerMode) { - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.routerChanged, transaction) - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceList, transaction) - } - - await FogManager.update(queryFogData, updateFogData, transaction) - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.config, transaction) - - let msChanged = false - - // Update Microservice extra hosts - if (updateFogData.host && updateFogData.host !== oldFog.host) { - await _updateMicroserviceExtraHosts(fogData.uuid, updateFogData.host, transaction) - } - - if (oldFog.abstractedHardwareEnabled === true && fogData.abstractedHardwareEnabled === false) { - await _deleteHalMicroserviceByFog(fogData, transaction) - msChanged = true - } - if (oldFog.abstractedHardwareEnabled === false && fogData.abstractedHardwareEnabled === true) { - await _createHalMicroserviceForFog(fogData, oldFog, transaction) - msChanged = true - } - - if (oldFog.bluetoothEnabled === true && fogData.bluetoothEnabled === false) { - await _deleteBluetoothMicroserviceByFog(fogData, transaction) - msChanged = true - } - if (oldFog.bluetoothEnabled === false && fogData.bluetoothEnabled === true) { - await _createBluetoothMicroserviceForFog(fogData, oldFog, transaction) - msChanged = true - } - - if (msChanged) { - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.microserviceCommon, transaction) - } -} - -async function _updateMicroserviceExtraHosts (fogUuid, host, transaction) { - const microserviceExtraHosts = await MicroserviceExtraHostManager.findAll({ targetFogUuid: fogUuid }, transaction) - for (const extraHost of microserviceExtraHosts) { - extraHost.value = host - await extraHost.save() - // Update tracking change for microservice - await MicroserviceExtraHostManager.updateOriginMicroserviceChangeTracking(extraHost, transaction) - } -} - -async function _updateProxyRouters (fogId, router, transaction) { - const proxyCatalog = await CatalogService.getProxyCatalogItem(transaction) - const proxyMicroservices = await MicroserviceManager.findAll({ catalogItemId: proxyCatalog.id, iofogUuid: fogId }, transaction) - for (const proxyMicroservice of proxyMicroservices) { - const config = JSON.parse(proxyMicroservice.config || '{}') - config.networkRouter = { - host: router.host, - port: router.messagingPort - } - await MicroserviceManager.updateIfChanged({ uuid: proxyMicroservice.uuid }, { config: JSON.stringify(config) }, transaction) - await ChangeTrackingService.update(fogId, ChangeTrackingService.events.microserviceConfig, transaction) - } -} - -async function _deleteFogRouter (fogData, transaction) { - const router = await RouterManager.findOne({ iofogUuid: fogData.uuid }, transaction) - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - - // If agent had a router, delete router and update linked routers - if (!router) { - // Router mode is none, there is nothing to do - return - } - - const routerId = router.id - const routerConnections = await RouterConnectionManager.findAllWithRouters({ [Op.or]: [{ destRouter: routerId }, { sourceRouter: routerId }] }, transaction) - // Delete all router connections, and set routerChanged flag for linked routers - if (routerConnections) { - for (const connection of routerConnections) { - const router = connection.source.id === routerId ? connection.dest : connection.source - // Delete router connection - await RouterConnectionManager.delete({ id: connection.id }, transaction) - // Update config for downstream routers - if (connection.dest.id === routerId) { - // in order to keep downstream routers in the network, we connect them to default router - if (defaultRouter) { - await RouterConnectionManager.create({ sourceRouter: router.id, destRouter: defaultRouter.id }, transaction) - } - - // Update router config - await RouterService.updateConfig(router.id, fogData.containerEngine, transaction) - // Set routerChanged flag - await ChangeTrackingService.update(router.iofogUuid, ChangeTrackingService.events.routerChanged, transaction) - } - } - } - - // Connect the agents to default router - if (defaultRouter) { - const connectedAgents = await FogManager.findAll({ routerId }, transaction) - for (const connectedAgent of connectedAgents) { - await FogManager.update({ uuid: connectedAgent.uuid }, { routerId: defaultRouter.id }, transaction) - await _updateProxyRouters(connectedAgent.uuid, defaultRouter, transaction) - await ChangeTrackingService.update(connectedAgent.uuid, ChangeTrackingService.events.routerChanged, transaction) - } - } - // Delete router - await RouterManager.delete({ iofogUuid: fogData.uuid }, transaction) - // Delete router msvc - const routerCatalog = await CatalogService.getRouterCatalogItem(transaction) - await MicroserviceManager.delete({ catalogItemId: routerCatalog.id, iofogUuid: fogData.uuid }, transaction) - await ApplicationManager.delete({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) -} - -async function deleteFogEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogDelete) - - const queryFogData = { uuid: fogData.uuid } - - const fog = await FogManager.findOne(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - await _deleteFogRouter(fogData, transaction) - - await _processDeleteCommand(fog, transaction) -} - -function _getRouterUuid (router, defaultRouter) { - return (defaultRouter && (router.id === defaultRouter.id)) ? Constants.DEFAULT_ROUTER_NAME : router.iofogUuid -} - -async function _getFogRouterConfig (fog, transaction) { - // Get fog router config - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - const router = await fog.getRouter() - const routerConfig = { - - } - // Router mode is either interior or edge - if (router) { - routerConfig.routerMode = router.isEdge ? 'edge' : 'interior' - routerConfig.messagingPort = router.messagingPort - if (routerConfig.routerMode === 'interior') { - routerConfig.interRouterPort = router.interRouterPort - routerConfig.edgeRouterPort = router.edgeRouterPort - } - // Get upstream routers - const upstreamRoutersConnections = await RouterConnectionManager.findAllWithRouters({ sourceRouter: router.id }, transaction) - routerConfig.upstreamRouters = upstreamRoutersConnections ? upstreamRoutersConnections.map(r => _getRouterUuid(r.dest, defaultRouter)) : [] - } else { - routerConfig.routerMode = 'none' - const networkRouter = await RouterManager.findOne({ id: fog.routerId }, transaction) - if (networkRouter) { - routerConfig.networkRouter = _getRouterUuid(networkRouter, defaultRouter) - } - } - - return routerConfig -} - -async function _getFogEdgeResources (fog, transaction) { - const resourceAttributes = [ - 'name', - 'version', - 'description', - 'interfaceProtocol', - 'displayName', - 'displayIcon', - 'displayColor' - ] - const resources = await fog.getEdgeResources({ attributes: resourceAttributes }) - return resources.map(EdgeResourceService.buildGetObject) -} - -async function _getFogVolumeMounts (fog, transaction) { - const volumeMountAttributes = [ - 'name', - 'version', - 'configMapName', - 'secretName' - ] - const volumeMounts = await fog.getVolumeMounts({ attributes: volumeMountAttributes }) - return volumeMounts.map(vm => { - return { - name: vm.name, - version: vm.version, - configMapName: vm.configMapName, - secretName: vm.secretName - } - }) -} - -async function _getFogExtraInformation (fog, transaction) { - const routerConfig = await _getFogRouterConfig(fog, transaction) - const edgeResources = await _getFogEdgeResources(fog, transaction) - const volumeMounts = await _getFogVolumeMounts(fog, transaction) - // Transform to plain JS object - if (fog.toJSON && typeof fog.toJSON === 'function') { - fog = fog.toJSON() - } - return { ...fog, tags: _mapTags(fog), ...routerConfig, edgeResources, volumeMounts } -} - -// Map tags to string array -// Return plain JS object -function _mapTags (fog) { - return fog.tags ? fog.tags.map(t => t.value) : [] -} - -/** - * Extracts service-related tags from fog node tags - * @param {Array} fogTags - Array of tags from fog node - * @returns {Array} Array of service tags (e.g., ["all", "foo", "bar"]) - */ -async function _extractServiceTags (fogTags) { - if (!fogTags || !Array.isArray(fogTags)) { - return [] - } - - // Filter tags that start with SERVICE_ANNOTATION_TAG - const serviceTags = fogTags - .filter(tag => tag.startsWith(SERVICE_ANNOTATION_TAG)) - .map(tag => { - // Extract the value after the colon - const parts = tag.split(':') - return parts.length > 1 ? parts[1].trim() : '' - }) - .filter(tag => tag !== '') // Remove empty tags - - // If we have "all" tag, return just that - if (serviceTags.includes('all')) { - return ['all'] - } - - return serviceTags -} - -async function getFog (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogGet) - - const queryFogData = fogData.uuid ? { uuid: fogData.uuid } : { name: fogData.name } - - const fog = await FogManager.findOneWithTags(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - return _getFogExtraInformation(fog, transaction) -} - -async function getFogEndPoint (fogData, isCLI, transaction) { - return getFog(fogData, isCLI, transaction) -} - -// async function getFogListEndPoint (filters, isCLI, isSystem, transaction) { -async function getFogListEndPoint (filters, isCLI, transaction) { - await Validator.validate(filters, Validator.schemas.iofogFilters) - - // // If listing system agent through REST API, make sure user is authenticated - // if (isSystem && !isCLI && !lget('id')) { - // throw new Errors.AuthenticationError('Unauthorized') - // } - - // const queryFogData = isSystem ? { isSystem } : (isCLI ? {} : { isSystem: false }) - const queryFogData = {} - - let fogs = await FogManager.findAllWithTags(queryFogData, transaction) - fogs = _filterFogs(fogs, filters) - - // Map all tags - // Get router config info for all fogs - fogs = await Promise.all(fogs.map(async (fog) => _getFogExtraInformation(fog, transaction))) - return { - fogs - } -} - -async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogGenerateProvision) - - const queryFogData = { uuid: fogData.uuid } - - const newProvision = { - iofogUuid: fogData.uuid, - provisionKey: AppHelper.generateRandomString(16), - expirationTime: new Date().getTime() + (10 * 60 * 1000) - } - - const fog = await FogManager.findOne(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - const provisioningKeyData = await FogProvisionKeyManager.updateOrCreate({ iofogUuid: fogData.uuid }, newProvision, transaction) - - const devMode = process.env.DEV_MODE || config.get('server.devMode') - const sslCert = process.env.SSL_CERT || config.get('server.ssl.path.cert') - const intermedKey = process.env.INTERMEDIATE_CERT || config.get('server.ssl.path.intermediateCert') - const sslCertBase64 = config.get('server.ssl.base64.cert') - const intermedKeyBase64 = config.get('server.ssl.base64.intermediateCert') - const hasFileBasedSSL = !devMode && sslCert - const hasBase64SSL = !devMode && sslCertBase64 - let caCert = '' - - if (!devMode) { - if (hasFileBasedSSL) { - try { - if (intermedKey) { - const certData = fs.readFileSync(intermedKey) - caCert = Buffer.from(certData).toString('base64') - } else { - const certData = fs.readFileSync(sslCert) - caCert = Buffer.from(certData).toString('base64') - } - } catch (error) { - throw new Errors.ValidationError('Failed to read SSL certificate file') - } - } - if (hasBase64SSL) { - if (intermedKeyBase64) { - caCert = intermedKeyBase64 - } else if (sslCertBase64) { - caCert = sslCertBase64 - } - } - } - return { - key: provisioningKeyData.provisionKey, - expirationTime: provisioningKeyData.expirationTime, - caCert: caCert - } -} - -async function setFogVersionCommandEndPoint (fogVersionData, isCLI, transaction) { - await Validator.validate(fogVersionData, Validator.schemas.iofogSetVersionCommand) - - const queryFogData = { uuid: fogVersionData.uuid } - - const newVersionCommand = { - iofogUuid: fogVersionData.uuid, - versionCommand: fogVersionData.versionCommand - } - - const fog = await FogManager.findOne(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, queryFogData.uuid)) - } - - if (!fog.isReadyToRollback && fogVersionData.versionCommand === 'rollback') { - throw new Errors.ValidationError(ErrorMessages.INVALID_VERSION_COMMAND_ROLLBACK) - } - if (!fog.isReadyToUpgrade && fogVersionData.versionCommand === 'upgrade') { - throw new Errors.ValidationError(ErrorMessages.INVALID_VERSION_COMMAND_UPGRADE) - } - - await generateProvisioningKeyEndPoint({ uuid: fogVersionData.uuid }, isCLI, transaction) - await FogVersionCommandManager.updateOrCreate({ iofogUuid: fogVersionData.uuid }, newVersionCommand, transaction) - await ChangeTrackingService.update(fogVersionData.uuid, ChangeTrackingService.events.version, transaction) -} - -async function setFogRebootCommandEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogReboot) - - const queryFogData = { uuid: fogData.uuid } - - const fog = await FogManager.findOne(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.reboot, transaction) -} - -async function getHalHardwareInfoEndPoint (uuidObj, isCLI, transaction) { - await Validator.validate(uuidObj, Validator.schemas.halGet) - - const fog = await FogManager.findOne({ - uuid: uuidObj.uuid - }, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, uuidObj.uuid)) - } - - return HWInfoManager.findOne({ - iofogUuid: uuidObj.uuid - }, transaction) -} - -async function getHalUsbInfoEndPoint (uuidObj, isCLI, transaction) { - await Validator.validate(uuidObj, Validator.schemas.halGet) - - const fog = await FogManager.findOne({ - uuid: uuidObj.uuid - }, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, uuidObj.uuid)) - } - - return USBInfoManager.findOne({ - iofogUuid: uuidObj.uuid - }, transaction) -} - -function _filterFogs (fogs, filters) { - if (!filters) { - return fogs - } - - const filtered = [] - fogs.forEach((fog) => { - let isMatchFog = true - filters.some((filter) => { - const fld = filter.key - const val = filter.value - const condition = filter.condition - const isMatchField = (condition === 'equals' && fog[fld] && fog[fld] === val) || - (condition === 'has' && fog[fld] && fog[fld].includes(val)) - if (!isMatchField) { - isMatchFog = false - return false - } - }) - if (isMatchFog) { - filtered.push(fog) - } - }) - return filtered -} - -async function _processDeleteCommand (fog, transaction) { - const microservices = await MicroserviceManager.findAll({ iofogUuid: fog.uuid }, transaction) - for (const microservice of microservices) { - await MicroserviceService.deleteMicroserviceWithRoutesAndPortMappings(microservice, transaction) - } - - await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.deleteNode, transaction) - await FogManager.delete({ uuid: fog.uuid }, transaction) -} - -async function _createHalMicroserviceForFog (fogData, oldFog, transaction) { - const halItem = await CatalogService.getHalCatalogItem(transaction) - - const halMicroserviceData = { - uuid: AppHelper.generateUUID(), - name: `hal-${fogData.uuid.toLowerCase()}`, - config: '{}', - catalogItemId: halItem.id, - iofogUuid: fogData.uuid, - rootHostAccess: true, - logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, - configLastUpdated: Date.now() - } - - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) - halMicroserviceData.applicationId = application.id - await MicroserviceManager.create(halMicroserviceData, transaction) - await MicroserviceStatusManager.create({ microserviceUuid: halMicroserviceData.uuid }, transaction) -} - -async function _deleteHalMicroserviceByFog (fogData, transaction) { - const halItem = await CatalogService.getHalCatalogItem(transaction) - const deleteHalMicroserviceData = { - iofogUuid: fogData.uuid, - catalogItemId: halItem.id - } - - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) - deleteHalMicroserviceData.applicationId = application.id - await MicroserviceManager.delete(deleteHalMicroserviceData, transaction) -} - -async function _createBluetoothMicroserviceForFog (fogData, oldFog, transaction) { - const bluetoothItem = await CatalogService.getBluetoothCatalogItem(transaction) - - const bluetoothMicroserviceData = { - uuid: AppHelper.generateUUID(), - name: `ble-${fogData.uuid.toLowerCase()}`, - config: '{}', - catalogItemId: bluetoothItem.id, - iofogUuid: fogData.uuid, - rootHostAccess: true, - logSize: Constants.MICROSERVICE_DEFAULT_LOG_SIZE, - configLastUpdated: Date.now() - } - - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) - bluetoothMicroserviceData.applicationId = application.id - await MicroserviceManager.create(bluetoothMicroserviceData, transaction) - await MicroserviceStatusManager.create({ microserviceUuid: bluetoothMicroserviceData.uuid }, transaction) -} - -async function _deleteBluetoothMicroserviceByFog (fogData, transaction) { - const bluetoothItem = await CatalogService.getBluetoothCatalogItem(transaction) - const deleteBluetoothMicroserviceData = { - iofogUuid: fogData.uuid, - catalogItemId: bluetoothItem.id - } - const application = await ApplicationManager.findOne({ name: `system-${fogData.uuid.toLowerCase()}` }, transaction) - deleteBluetoothMicroserviceData.applicationId = application.id - - await MicroserviceManager.delete(deleteBluetoothMicroserviceData, transaction) -} - -async function setFogPruneCommandEndPoint (fogData, isCLI, transaction) { - await Validator.validate(fogData, Validator.schemas.iofogPrune) - - const queryFogData = { uuid: fogData.uuid } - - const fog = await FogManager.findOne(queryFogData, transaction) - if (!fog) { - throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, fogData.uuid)) - } - - await ChangeTrackingService.update(fogData.uuid, ChangeTrackingService.events.prune, transaction) -} - -/** - * Finds services that match the fog node's service tags - * @param {Array} serviceTags - Array of service tags from fog node - * @param {Object} transaction - Database transaction - * @returns {Promise>} Array of matching services - */ -async function _findMatchingServices (serviceTags, transaction) { - if (!serviceTags || serviceTags.length === 0) { - return [] - } - - // If 'all' tag is present, get all services - if (serviceTags.includes('all')) { - return ServiceManager.findAllWithTags({}, transaction) - } - - // For each service tag, find matching services - const servicesPromises = serviceTags.map(async (tag) => { - const queryData = { - '$tags.value$': `${tag}` - } - return ServiceManager.findAllWithTags(queryData, transaction) - }) - - // Wait for all queries to complete - const servicesArrays = await Promise.all(servicesPromises) - - // Flatten arrays and remove duplicates based on service name - const seen = new Set() - const uniqueServices = servicesArrays - .flat() - .filter(service => { - if (seen.has(service.name)) { - return false - } - seen.add(service.name) - return true - }) - - return uniqueServices -} - -/** - * Builds TCP listener configuration for a service on a specific fog node - * @param {Object} service - Service object containing name and bridgePort - * @param {string} fogNodeUuid - UUID of the fog node - * @returns {Object} TCP listener configuration - */ -function _buildTcpListenerForFog (service, fogNodeUuid) { - return { - name: `${service.name}-listener`, - port: service.bridgePort.toString(), - address: service.name, - siteId: fogNodeUuid - } -} - -/** - * Gets the router microservice configuration for a fog node - * @param {string} fogNodeUuid - UUID of the fog node - * @param {Object} transaction - Database transaction - * @returns {Promise} Router microservice configuration - */ -async function _getRouterMicroserviceConfig (fogNodeUuid, transaction) { - const routerName = `router-${fogNodeUuid.toLowerCase()}` - const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) - if (!routerMicroservice) { - throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) - } - const routerConfig = JSON.parse(routerMicroservice.config || '{}') - return routerConfig -} - -/** - * Extracts existing TCP connectors from router configuration - * @param {string} fogNodeUuid - UUID of the fog node - * @param {Object} transaction - Database transaction - * @returns {Promise} Object containing TCP connectors - */ -async function _extractExistingTcpConnectors (fogNodeUuid, transaction) { - const routerConfig = await _getRouterMicroserviceConfig(fogNodeUuid, transaction) - // Return empty object if no bridges or tcpConnectors exist - if (!routerConfig.bridges || !routerConfig.bridges.tcpConnectors) { - return {} - } - - return routerConfig.bridges.tcpConnectors -} - -/** - * Merges a single TCP connector into router configuration - * @param {Object} routerConfig - Base router configuration - * @param {Object} connectorObj - TCP connector object (must have 'name' property) - * @returns {Object} Updated router configuration - */ -function _mergeTcpConnector (routerConfig, connectorObj) { - if (!connectorObj || !connectorObj.name) { - throw new Error('Connector object must have a name property') - } - if (!routerConfig.bridges) { - routerConfig.bridges = {} - } - if (!routerConfig.bridges.tcpConnectors) { - routerConfig.bridges.tcpConnectors = {} - } - routerConfig.bridges.tcpConnectors[connectorObj.name] = connectorObj - return routerConfig -} - -/** - * Merges a single TCP listener into router configuration - * @param {Object} routerConfig - Base router configuration - * @param {Object} listenerObj - TCP listener object (must have 'name' property) - * @returns {Object} Updated router configuration - */ -function _mergeTcpListener (routerConfig, listenerObj) { - if (!listenerObj || !listenerObj.name) { - throw new Error('Listener object must have a name property') - } - if (!routerConfig.bridges) { - routerConfig.bridges = {} - } - if (!routerConfig.bridges.tcpListeners) { - routerConfig.bridges.tcpListeners = {} - } - routerConfig.bridges.tcpListeners[listenerObj.name] = listenerObj - return routerConfig -} - -module.exports = { - createFogEndPoint: TransactionDecorator.generateTransaction(createFogEndPoint), - updateFogEndPoint: TransactionDecorator.generateTransaction(updateFogEndPoint), - deleteFogEndPoint: TransactionDecorator.generateTransaction(deleteFogEndPoint), - getFogEndPoint: TransactionDecorator.generateTransaction(getFogEndPoint), - getFogListEndPoint: TransactionDecorator.generateTransaction(getFogListEndPoint), - generateProvisioningKeyEndPoint: TransactionDecorator.generateTransaction(generateProvisioningKeyEndPoint), - setFogVersionCommandEndPoint: TransactionDecorator.generateTransaction(setFogVersionCommandEndPoint), - setFogRebootCommandEndPoint: TransactionDecorator.generateTransaction(setFogRebootCommandEndPoint), - getHalHardwareInfoEndPoint: TransactionDecorator.generateTransaction(getHalHardwareInfoEndPoint), - getHalUsbInfoEndPoint: TransactionDecorator.generateTransaction(getHalUsbInfoEndPoint), - getFog: getFog, - setFogPruneCommandEndPoint: TransactionDecorator.generateTransaction(setFogPruneCommandEndPoint), - _extractServiceTags, - _findMatchingServices: TransactionDecorator.generateTransaction(_findMatchingServices), - _buildTcpListenerForFog, - _getRouterMicroserviceConfig: TransactionDecorator.generateTransaction(_getRouterMicroserviceConfig), - _extractExistingTcpConnectors: TransactionDecorator.generateTransaction(_extractExistingTcpConnectors), - _mergeTcpConnector, - _mergeTcpListener, - checkKubernetesEnvironment, - _handleRouterCertificates: TransactionDecorator.generateTransaction(_handleRouterCertificates) -} diff --git a/test/backup/services-service.js b/test/backup/services-service.js deleted file mode 100644 index f2dd81ca..00000000 --- a/test/backup/services-service.js +++ /dev/null @@ -1,1261 +0,0 @@ -/* - * ******************************************************************************* - * * Copyright (c) 2023 Datasance Teknoloji A.S. - * * - * * This program and the accompanying materials are made available under the - * * terms of the Eclipse Public License v. 2.0 which is available at - * * http://www.eclipse.org/legal/epl-2.0 - * * - * * SPDX-License-Identifier: EPL-2.0 - * ******************************************************************************* - * - */ - -const TransactionDecorator = require('../decorators/transaction-decorator') -const ServiceManager = require('../data/managers/service-manager') -const MicroserviceManager = require('../data/managers/microservice-manager') -const RouterManager = require('../data/managers/router-manager') -const RouterConnectionManager = require('../data/managers/router-connection-manager') -const K8sClient = require('../utils/k8s-client') -const AppHelper = require('../helpers/app-helper') -const config = require('../config') -const Errors = require('../helpers/errors') -const ErrorMessages = require('../helpers/error-messages') -const Validator = require('../schemas') -const logger = require('../logger') -const FogManager = require('../data/managers/iofog-manager') -const TagsManager = require('../data/managers/tags-manager') -const ChangeTrackingService = require('./change-tracking-service') -const ApplicationManager = require('../data/managers/application-manager') -// const { Op } = require('sequelize') - -const K8S_ROUTER_CONFIG_MAP = 'pot-router' -const SERVICE_ANNOTATION_TAG = 'service.datasance.com/tag' - -// Map service tags to string array -// Return plain JS object -function _mapTags (service) { - return service.tags ? service.tags.map(t => t.value) : [] -} - -async function _setTags (serviceModel, tagsArray, transaction) { - if (tagsArray) { - let tags = [] - for (const tag of tagsArray) { - let tagModel = await TagsManager.findOne({ value: tag }, transaction) - if (!tagModel) { - tagModel = await TagsManager.create({ value: tag }, transaction) - } - tags.push(tagModel) - } - await serviceModel.setTags(tags) - } -} - -async function handleServiceDistribution (serviceTags, transaction) { - // Always find fog nodes with 'all' tag - const allTaggedFogNodes = await FogManager.findAllWithTags({ - '$tags.value$': `${SERVICE_ANNOTATION_TAG}: all` - }, transaction) - - // If serviceTags is null or empty, return only fog nodes with 'all' tag - if (!serviceTags || serviceTags.length === 0) { - const uuids = allTaggedFogNodes.map(fog => fog.uuid) - return uuids - } - - // Filter tags that don't contain ':' or '=' - const filteredServiceTags = serviceTags - .filter(tag => tag != null) - .map(tag => String(tag)) - .filter(tag => !tag.includes(':') && !tag.includes('=')) - .filter(tag => tag.length > 0) - - if (filteredServiceTags.length === 0) { - const uuids = allTaggedFogNodes.map(fog => fog.uuid) - return uuids - } - - // Find fog nodes for each filtered tag - const specificTaggedFogNodes = new Set() - for (const tag of filteredServiceTags) { - const fogNodes = await FogManager.findAllWithTags({ - '$tags.value$': `${SERVICE_ANNOTATION_TAG}: ${tag}` - }, transaction) - fogNodes.forEach(fog => specificTaggedFogNodes.add(fog.uuid)) - } - - // Get all tag fog node UUIDs - const allTagUuids = allTaggedFogNodes.map(fog => fog.uuid) - - // Combine both sets of fog nodes and remove duplicates - const allFogUuids = new Set([...allTagUuids, ...Array.from(specificTaggedFogNodes)]) - - return Array.from(allFogUuids) -} - -async function checkKubernetesEnvironment () { - const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') - return controlPlane && controlPlane.toLowerCase() === 'kubernetes' -} - -async function validateNonK8sType (serviceConfig) { - const isK8s = await checkKubernetesEnvironment() - if (serviceConfig.type.toLowerCase() !== 'k8s' && isK8s) { - if (!serviceConfig.k8sType || !serviceConfig.servicePort) { - throw new Errors.ValidationError('Kubernetes environment is required for k8s service type(LoadBalancer or ClusterIP or NodePort) and service port') - } - } -} - -async function _validateServiceName (serviceConfig) { - if (serviceConfig.name.toLowerCase() === 'controller' || serviceConfig.name.toLowerCase() === 'router' || serviceConfig.name.toLowerCase() === 'router-internal' || serviceConfig.name.toLowerCase() === 'docker' || serviceConfig.name.toLowerCase() === 'podman' || serviceConfig.name.toLowerCase() === 'kubernetes') { - throw new Errors.ValidationError('Service name cannot be "controller" or "router" or "router-internal" or "docker"') - } -} - -async function validateMicroserviceType (serviceConfig, transaction) { - if (serviceConfig.type.toLowerCase() !== 'microservice') { - return - } - - let microserviceUuid = serviceConfig.resource - - // If resource contains "/", it means user provided "/" - if (serviceConfig.resource.includes('/')) { - const [appName, microserviceName] = serviceConfig.resource.split('/') - const app = await ApplicationManager.findOne({ name: appName }, transaction) - if (!app) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_APPLICATION_NAME, appName)) - } - const microservice = await MicroserviceManager.findOne({ - name: microserviceName, - applicationId: app.id - }, transaction) - - if (!microservice) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_NAME, serviceConfig.resource)) - } - - microserviceUuid = microservice.uuid - } else { - // User provided UUID directly, validate if microservice exists - const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) - if (!microservice) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) - } - } - - // Update resource to be the microservice UUID - serviceConfig.resource = microserviceUuid -} - -async function validateFogServiceType (serviceConfig, transaction) { - if (serviceConfig.type.toLowerCase() !== 'agent') { - return - } - - // First try to find fog node by name - let fogNode = await FogManager.findOne({ name: serviceConfig.resource }, transaction) - - // If not found by name, try to find by UUID - if (!fogNode) { - fogNode = await FogManager.findOne({ uuid: serviceConfig.resource }, transaction) - } - - // If still not found, throw error - if (!fogNode) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_IOFOG_UUID, serviceConfig.resource)) - } - - // Always set resource to be the fog node UUID - serviceConfig.resource = fogNode.uuid -} - -async function validateDefaultBridge (serviceConfig, transaction) { - // If defaultBridge is empty, set it to 'default-router' - if (!serviceConfig.defaultBridge) { - logger.debug('Setting default bridge to default-router') - serviceConfig.defaultBridge = 'default-router' - return - } - - // If service type is not microservice or agent, defaultBridge must be 'default-router' - if (serviceConfig.type.toLowerCase() !== 'microservice' && serviceConfig.type.toLowerCase() !== 'agent') { - if (serviceConfig.defaultBridge !== 'default-router') { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_DEFAULT_BRIDGE, serviceConfig.defaultBridge)) - } - return - } - - // For microservice or agent type, if user provided a UUID instead of 'default-router' - if (serviceConfig.defaultBridge !== 'default-router') { - let iofogUuid - - if (serviceConfig.type.toLowerCase() === 'microservice') { - // Get the microservice to find its iofog node - const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) - if (!microservice) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_MICROSERVICE_UUID, serviceConfig.resource)) - } - iofogUuid = microservice.iofogUuid - } else if (serviceConfig.type.toLowerCase() === 'agent') { - // For agent type, the resource is the agent UUID - iofogUuid = serviceConfig.resource - } - - // Get the router for the iofog node - const router = await RouterManager.findOne({ iofogUuid: iofogUuid }, transaction) - if (!router) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, iofogUuid)) - } - - // Check if the router has a connection to the specified upstream router - const upstreamRouter = await RouterManager.findOne({ iofogUuid: serviceConfig.defaultBridge }, transaction) - if (!upstreamRouter) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER, serviceConfig.defaultBridge)) - } - - const routerConnection = await RouterConnectionManager.findOne({ - sourceRouter: router.id, - destRouter: upstreamRouter.id - }, transaction) - - if (!routerConnection) { - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.INVALID_ROUTER_CONNECTION, serviceConfig.defaultBridge, router.id)) - } - } -} - -async function defineBridgePort (serviceConfig, transaction) { - // Get bridge port range from environment or config - const bridgePortRangeStr = process.env.BRIDGE_PORTS_RANGE || config.get('bridgePorts.range') || '10024-65535' - const [startStr, endStr] = bridgePortRangeStr.split('-') - const start = parseInt(startStr) - const end = parseInt(endStr) - - // Get all existing services to check used ports - const existingServices = await ServiceManager.findAll({}, transaction) - const usedPorts = new Set(existingServices.map(service => service.bridgePort)) - - // Find the first available port in the range - let bridgePort = start - while (bridgePort <= end) { - if (!usedPorts.has(bridgePort)) { - serviceConfig.bridgePort = bridgePort - return - } - bridgePort++ - } - - // If we get here, no ports are available - throw new Errors.ValidationError(AppHelper.formatMessage(ErrorMessages.NO_AVAILABLE_BRIDGE_PORT, bridgePortRangeStr)) -} - -// Helper function to determine host based on service type -async function _determineConnectorHost (serviceConfig, transaction) { - switch (serviceConfig.type.toLowerCase()) { - case 'microservice': - const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) - if (microservice.rootHostAccess) { - return 'iofog' - } else { - return `iofog_${serviceConfig.resource}` - } - case 'agent': - return 'iofog' - case 'k8s': - case 'external': - return serviceConfig.resource - default: - throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) - } -} - -// Helper function to determine siteId for connector -async function _determineConnectorSiteId (serviceConfig, transaction) { - switch (serviceConfig.type.toLowerCase()) { - case 'microservice': { - const microservice = await MicroserviceManager.findOne({ uuid: serviceConfig.resource }, transaction) - if (!microservice) { - throw new Errors.NotFoundError(`Microservice not found: ${serviceConfig.resource}`) - } - return microservice.iofogUuid - } - case 'agent': - return serviceConfig.resource - case 'k8s': - case 'external': - return 'default-router' - default: - throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) - } -} - -// Helper function to determine processId for connector -async function _determineConnectorProcessId (serviceConfig) { - switch (serviceConfig.type.toLowerCase()) { - case 'microservice': - return serviceConfig.resource - case 'agent': - return `${serviceConfig.resource}-local-${serviceConfig.targetPort}` - case 'k8s': - return `${serviceConfig.resource}-k8s-${serviceConfig.targetPort}` - case 'external': - return `${serviceConfig.resource}-external-${serviceConfig.targetPort}` - default: - throw new Errors.ValidationError(`Invalid service type: ${serviceConfig.type}`) - } -} - -// Helper function to build tcpConnector configuration -async function _buildTcpConnector (serviceConfig, transaction) { - const host = await _determineConnectorHost(serviceConfig, transaction) - const siteId = await _determineConnectorSiteId(serviceConfig, transaction) - const processId = await _determineConnectorProcessId(serviceConfig) - - return { - name: `${serviceConfig.name}-connector`, - host, - port: serviceConfig.targetPort.toString(), - address: serviceConfig.name, - siteId, - processId - } -} - -// Helper function to build tcpListener configuration -async function _buildTcpListener (serviceConfig, fogNodeUuid = null) { - const listener = { - name: `${serviceConfig.name}-listener`, - port: serviceConfig.bridgePort.toString(), - address: serviceConfig.name, - siteId: fogNodeUuid || serviceConfig.defaultBridge - } - return listener -} - -// Helper function to get router microservice by fog node UUID -async function _getRouterMicroservice (fogNodeUuid, transaction) { - const routerName = `router-${fogNodeUuid.toLowerCase()}` - const routerMicroservice = await MicroserviceManager.findOne({ name: routerName }, transaction) - if (!routerMicroservice) { - throw new Errors.NotFoundError(`Router microservice not found: ${routerName}`) - } - return routerMicroservice -} - -// Helper function to update router config in Kubernetes environment -async function _updateK8sRouterConfig (config) { - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const patchData = { - data: { - 'skrouterd.json': JSON.stringify(config) - } - } - - await K8sClient.patchConfigMap(K8S_ROUTER_CONFIG_MAP, patchData) -} - -// Helper function to update router microservice config -async function _updateRouterMicroserviceConfig (fogNodeUuid, config, transaction) { - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - - // Update microservice with the provided config - await MicroserviceManager.update( - { uuid: routerMicroservice.uuid }, - { config: JSON.stringify(config) }, - transaction - ) - - // Update change tracking - await ChangeTrackingService.update(fogNodeUuid, ChangeTrackingService.events.microserviceConfig, transaction) -} - -// Helper function to add tcpConnector to router config -async function _addTcpConnector (serviceConfig, transaction) { - const isK8s = await checkKubernetesEnvironment() - const connector = await _buildTcpConnector(serviceConfig, transaction) - const siteId = connector.siteId - - if (siteId === 'default-router') { - if (isK8s) { - // Update K8s router config - logger.debug('Updating K8s router config') - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Add new connector to the array - routerConfig.push(['tcpConnector', connector]) - - await _updateK8sRouterConfig(routerConfig) - } else { - // Update default router microservice config - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - logger.error('Default router not found') - throw new Errors.NotFoundError('Default router not found') - } - const fogNodeUuid = defaultRouter.iofogUuid - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (!currentConfig.bridges) { - currentConfig.bridges = {} - } - if (!currentConfig.bridges.tcpConnectors) { - currentConfig.bridges.tcpConnectors = {} - } - currentConfig.bridges.tcpConnectors[connector.name] = connector - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } - } else { - // Update specific router microservice config - const fogNodeUuid = siteId - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (!currentConfig.bridges) { - currentConfig.bridges = {} - } - if (!currentConfig.bridges.tcpConnectors) { - currentConfig.bridges.tcpConnectors = {} - } - currentConfig.bridges.tcpConnectors[connector.name] = connector - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } -} - -// Helper function to add tcpListener to router config -async function _addTcpListener (serviceConfig, transaction) { - const isK8s = await checkKubernetesEnvironment() - - // First handle K8s case if we're in K8s environment - if (isK8s) { - const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - logger.error('ConfigMap not found:' + K8S_ROUTER_CONFIG_MAP) - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Add new listener to the array - routerConfig.push(['tcpListener', k8sListener]) - - await _updateK8sRouterConfig(routerConfig) - } - - // Handle distributed router microservice cases - // Get list of fog nodes that need this listener - const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) - - // If not in K8s environment, always include default router - if (!isK8s) { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - logger.error('Default router not found') - throw new Errors.NotFoundError('Default router not found') - } - // Add default router if not already in the list - if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { - fogNodeUuids.push(defaultRouter.iofogUuid) - } - } - // else if (!fogNodeUuids || fogNodeUuids.length === 0) { - // // If in K8s and no fog nodes found, add default router - // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - // if (!defaultRouter) { - // logger.error('Default router not found') - // throw new Errors.NotFoundError('Default router not found') - // } - // fogNodeUuids.push(defaultRouter.iofogUuid) - // } - - // Add listener to each router microservice - for (const fogNodeUuid of fogNodeUuids) { - try { - const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - if (!currentConfig.bridges) currentConfig.bridges = {} - if (!currentConfig.bridges.tcpListeners) currentConfig.bridges.tcpListeners = {} - currentConfig.bridges.tcpListeners[listener.name] = listener - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } catch (err) { - if (err instanceof Errors.NotFoundError) { - logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) - continue - } - throw err - } - } -} - -// Helper function to update tcpConnector in router config -async function _updateTcpConnector (serviceConfig, transaction) { - const isK8s = await checkKubernetesEnvironment() - const connector = await _buildTcpConnector(serviceConfig, transaction) - const siteId = connector.siteId - - if (siteId === 'default-router') { - if (isK8s) { - // Update K8s router config - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Find and update the existing connector - const connectorIndex = routerConfig.findIndex(item => - item[0] === 'tcpConnector' && item[1].name === connector.name - ) - if (connectorIndex !== -1) { - routerConfig[connectorIndex] = ['tcpConnector', connector] - } - - await _updateK8sRouterConfig(routerConfig) - } else { - // Update default router microservice config - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - throw new Errors.NotFoundError('Default router not found') - } - const fogNodeUuid = defaultRouter.iofogUuid - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (!currentConfig.bridges) { - currentConfig.bridges = {} - } - if (!currentConfig.bridges.tcpConnectors) { - currentConfig.bridges.tcpConnectors = {} - } - // Update the connector with the same name - currentConfig.bridges.tcpConnectors[connector.name] = connector - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } - } else { - // Update specific router microservice config - const fogNodeUuid = siteId - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (!currentConfig.bridges) { - currentConfig.bridges = {} - } - if (!currentConfig.bridges.tcpConnectors) { - currentConfig.bridges.tcpConnectors = {} - } - // Update the connector with the same name - currentConfig.bridges.tcpConnectors[connector.name] = connector - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } -} - -// Helper function to update tcpListener in router config -async function _updateTcpListener (serviceConfig, transaction) { - const isK8s = await checkKubernetesEnvironment() - - // First handle K8s case if we're in K8s environment - if (isK8s) { - const k8sListener = await _buildTcpListener(serviceConfig, null) // null for K8s case - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Update the listener in the array - const listenerIndex = routerConfig.findIndex(item => - item[0] === 'tcpListener' && item[1].name === k8sListener.name - ) - if (listenerIndex !== -1) { - routerConfig[listenerIndex] = ['tcpListener', k8sListener] - } else { - routerConfig.push(['tcpListener', k8sListener]) - } - - await _updateK8sRouterConfig(routerConfig) - } - - // Handle distributed router microservice cases - // Get list of fog nodes that need this listener - const fogNodeUuids = await handleServiceDistribution(serviceConfig.tags, transaction) - // If not in K8s environment, always include default router - if (!isK8s) { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - throw new Errors.NotFoundError('Default router not found') - } - // Add default router if not already in the list - if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { - fogNodeUuids.push(defaultRouter.iofogUuid) - } - } - // else if (!fogNodeUuids || fogNodeUuids.length === 0) { - // // If in K8s and no fog nodes found, add default router - // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - // if (!defaultRouter) { - // throw new Errors.NotFoundError('Default router not found') - // } - // fogNodeUuids.push(defaultRouter.iofogUuid) - // } - - // Update listener in each router microservice - for (const fogNodeUuid of fogNodeUuids) { - try { - const listener = await _buildTcpListener(serviceConfig, fogNodeUuid) - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (!currentConfig.bridges) { - currentConfig.bridges = {} - } - if (!currentConfig.bridges.tcpListeners) { - currentConfig.bridges.tcpListeners = {} - } - // Update listener with its name as key - currentConfig.bridges.tcpListeners[listener.name] = listener - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } catch (err) { - if (err instanceof Errors.NotFoundError) { - logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) - continue - } - throw err - } - } -} - -// Helper function to delete tcpConnector from router config -async function _deleteTcpConnector (serviceName, transaction) { - const isK8s = await checkKubernetesEnvironment() - const connectorName = `${serviceName}-connector` - - // Get service to determine if it's using default router - const service = await ServiceManager.findOne({ name: serviceName }, transaction) - if (!service) { - throw new Errors.NotFoundError(`Service not found: ${serviceName}`) - } - - const isDefaultRouter = service.defaultBridge === 'default-router' - let microserviceSource = null - if (service.type === 'microservice') { - microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) - } - - if (isDefaultRouter && !microserviceSource) { - if (isK8s) { - // Update K8s router config - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Remove the connector from the array - const updatedConfig = routerConfig.filter(item => - !(item[0] === 'tcpConnector' && item[1].name === connectorName) - ) - - await _updateK8sRouterConfig(updatedConfig) - } else { - // Update default router microservice config - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - throw new Errors.NotFoundError('Default router not found') - } - const fogNodeUuid = defaultRouter.iofogUuid - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { - delete currentConfig.bridges.tcpConnectors[connectorName] - } - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } - } else { - let fogNodeUuid = null - if (microserviceSource) { - fogNodeUuid = microserviceSource.iofogUuid - } else { - fogNodeUuid = service.defaultBridge // This is the actual fogNodeUuid for non-default router - } - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - - if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { - delete currentConfig.bridges.tcpConnectors[connectorName] - } - - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } -} - -// Helper function to delete tcpListener from router config -async function _deleteTcpListener (serviceName, transaction) { - const isK8s = await checkKubernetesEnvironment() - const listenerName = `${serviceName}-listener` - - // First handle K8s case if we're in K8s environment - if (isK8s) { - const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) - if (!configMap) { - throw new Errors.NotFoundError(`ConfigMap not found: ${K8S_ROUTER_CONFIG_MAP}`) - } - - const routerConfig = JSON.parse(configMap.data['skrouterd.json']) - // Remove the listener from the array - const updatedConfig = routerConfig.filter(item => - !(item[0] === 'tcpListener' && item[1].name === listenerName) - ) - - await _updateK8sRouterConfig(updatedConfig) - } - - // Get service to determine its tags for distribution - const service = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) - if (!service) { - throw new Errors.NotFoundError(`Service not found: ${serviceName}`) - } - - let microserviceSource = null - if (service.type === 'microservice') { - microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) - } - // Handle distributed router microservice cases - // Get list of fog nodes that need this listener removed - const serviceTags = service.tags.map(tag => tag.value) - const fogNodeUuids = await handleServiceDistribution(serviceTags, transaction) - - if (microserviceSource) { - if (!fogNodeUuids.includes(microserviceSource.iofogUuid)) { - fogNodeUuids.push(microserviceSource.iofogUuid) - } - } - // If not in K8s environment, always include default router - if (!isK8s) { - const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - if (!defaultRouter) { - throw new Errors.NotFoundError('Default router not found') - } - // Add default router if not already in the list - if (!fogNodeUuids.includes(defaultRouter.iofogUuid)) { - fogNodeUuids.push(defaultRouter.iofogUuid) - } - } - // else if (!fogNodeUuids || fogNodeUuids.length === 0) { - // // If in K8s and no fog nodes found, add default router - // const defaultRouter = await RouterManager.findOne({ isDefault: true }, transaction) - // if (!defaultRouter) { - // throw new Errors.NotFoundError('Default router not found') - // } - // fogNodeUuids.push(defaultRouter.iofogUuid) - // } - - // Remove listener from each router microservice - for (const fogNodeUuid of fogNodeUuids) { - try { - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') - if (currentConfig.bridges && currentConfig.bridges.tcpListeners) { - delete currentConfig.bridges.tcpListeners[listenerName] - } - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) - } catch (err) { - if (err instanceof Errors.NotFoundError) { - logger.info(`Router microservice not found for fogNodeUuid ${fogNodeUuid}, skipping.`) - continue - } - throw err - } - } -} - -// Helper function to create Kubernetes service -async function _createK8sService (serviceConfig, transaction) { - const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) - const serviceSpec = { - apiVersion: 'v1', - kind: 'Service', - metadata: { - name: serviceConfig.name, - annotations: normalizedTags.reduce((acc, tag) => { - const [key, value] = tag.split(':') - acc[key] = value || '' - return acc - }, {}) - }, - spec: { - type: serviceConfig.k8sType, - selector: { - application: 'interior-router', - name: 'router', - 'skupper.io/component': 'router' - }, - ports: [{ - port: parseInt(serviceConfig.bridgePort), - targetPort: parseInt(serviceConfig.servicePort), - protocol: 'TCP' - }] - } - } - - const service = await K8sClient.createService(serviceSpec) - - // If LoadBalancer type, wait for and set the external IP - if (serviceConfig.k8sType === 'LoadBalancer') { - const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) - if (loadBalancerIP) { - await ServiceManager.update( - { name: serviceConfig.name }, - { serviceEndpoint: loadBalancerIP }, - transaction - ) - } - } - - return service -} - -// Helper function to update Kubernetes service -async function _updateK8sService (serviceConfig, transaction) { - const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) - const patchData = { - metadata: { - annotations: normalizedTags.reduce((acc, tag) => { - const [key, value] = tag.split(':') - acc[key] = value || '' - return acc - }, {}) - }, - spec: { - type: serviceConfig.k8sType, - selector: { - application: 'interior-router', - name: 'router', - 'skupper.io/component': 'router' - }, - ports: [{ - port: parseInt(serviceConfig.bridgePort), - targetPort: parseInt(serviceConfig.servicePort), - protocol: 'TCP' - }] - } - } - - logger.debug(`Updating service: ${serviceConfig.name}`) - const service = await K8sClient.updateService(serviceConfig.name, patchData) - - // If LoadBalancer type, wait for and set the external IP - if (serviceConfig.k8sType === 'LoadBalancer') { - const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) - if (loadBalancerIP) { - await ServiceManager.update( - { name: serviceConfig.name }, - { serviceEndpoint: loadBalancerIP }, - transaction - ) - } - } - - return service -} - -// Helper function to delete Kubernetes service -async function _deleteK8sService (serviceName) { - await K8sClient.deleteService(serviceName) -} - -// Create service endpoint -async function createServiceEndpoint (serviceData, transaction) { - logger.debug('Creating service with data:' + JSON.stringify(serviceData)) - - // 1. Validate from schemas validator - await Validator.validate(serviceData, Validator.schemas.serviceCreate) - await _validateServiceName(serviceData) - - // 2. Check K8s environment if type is k8s - const isK8s = await checkKubernetesEnvironment() - if (serviceData.type === 'k8s' && !isK8s) { - throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') - } - - if (serviceData.type !== 'k8s' && isK8s) { - logger.debug('Validating non k8s service type') - await validateNonK8sType(serviceData) - } - - // 3. Validate microservice type - if (serviceData.type === 'microservice') { - await validateMicroserviceType(serviceData, transaction) - } - - // 4. Validate agent type - if (serviceData.type === 'agent') { - logger.debug('Validating agent service type') - await validateFogServiceType(serviceData, transaction) - } - - // 5. Validate default bridge - logger.debug('Validating default bridge') - await validateDefaultBridge(serviceData, transaction) - - logger.debug('Defining bridge port') - // 6. Define bridge port - await defineBridgePort(serviceData, transaction) - - let service - try { - // Create service in database first - logger.debug('Creating service in database') - service = await ServiceManager.create(serviceData, transaction) - - // Set tags if provided - logger.debug('Setting tags') - if (serviceData.tags && serviceData.tags.length > 0) { - await _setTags(service, serviceData.tags, transaction) - } - - // 7. Add TCP connector - logger.debug('Adding TCP connector') - await _addTcpConnector(serviceData, transaction) - - // 8. Add TCP listener - logger.debug('Adding TCP listener') - try { - await _addTcpListener(serviceData, transaction) - } catch (error) { - logger.error('Error adding TCP listener:' + error.message + ' ' + error.stack + ' ' + serviceData.name) - throw error - } - - // 9. Create K8s service if needed - if ((serviceData.type === 'microservice' || serviceData.type === 'agent' || serviceData.type === 'external') && isK8s) { - logger.debug('Creating K8s service') - try { - await _createK8sService(serviceData, transaction) - } catch (error) { - logger.error('Error creating K8s service:' + error.message + ' ' + error.stack + ' ' + serviceData.name) - throw error - } - } - - return service - } catch (error) { - logger.error('Error creating service:' + error.message + ' ' + error.stack + ' ' + serviceData.name + ' ' + serviceData.type + ' ' + error.validationStep) - - // If any error occurs after service creation, clean up - if (service) { - try { - // Delete K8s service if it was created - if ((serviceData.type === 'microservice' || serviceData.type === 'agent' || serviceData.type === 'external') && isK8s) { - await _deleteK8sService(serviceData.name) - } - // Delete TCP listener if it was added - await _deleteTcpListener(serviceData.name, transaction) - // Delete TCP connector if it was added - await _deleteTcpConnector(serviceData.name, transaction) - // Finally delete the service from database - await ServiceManager.delete({ id: service.id }, transaction) - } catch (cleanupError) { - logger.error('Error during service creation cleanup:', { - error: cleanupError.message, - stack: cleanupError.stack, - serviceName: serviceData.name - }) - } - } - - // Wrap the error in a proper error type if it's not already - if (!(error instanceof Errors.ValidationError) && - !(error instanceof Errors.NotFoundError) && - !(error instanceof Errors.TransactionError) && - !(error instanceof Errors.DuplicatePropertyError)) { - throw new Errors.ValidationError(`Failed to create service: ${error.message}`) - } - throw error - } -} - -// Update service endpoint -async function updateServiceEndpoint (serviceName, serviceData, transaction) { - // 1. Validate from schemas validator - await Validator.validate(serviceData, Validator.schemas.serviceUpdate) - await _validateServiceName(serviceData) - - // 2. Get existing service - const existingService = await ServiceManager.findOneWithTags({ name: serviceName }, transaction) - if (!existingService) { - throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) - } - - // 3. Check if service type is being changed - if (serviceData.type && serviceData.type !== existingService.type) { - throw new Errors.ValidationError('Changing service type is not allowed. Please delete the service and create a new one with the desired type.') - } - - // 4. Check K8s environment if type is k8s - const isK8s = await checkKubernetesEnvironment() - if (existingService.type === 'k8s' && !isK8s) { - throw new Errors.ValidationError('Kubernetes environment is required for k8s service type') - } - - if (serviceData.type !== 'k8s' && isK8s) { - logger.debug('Validating non k8s service type') - await validateNonK8sType(serviceData) - } - - // 5. Validate microservice type if needed - if (existingService.type === 'microservice') { - await validateMicroserviceType(serviceData, transaction) - } - - // 6. Validate agent type if needed - if (existingService.type === 'agent') { - await validateFogServiceType(serviceData, transaction) - } - - // 7. Validate default bridge if needed - if (serviceData.defaultBridge) { - await validateDefaultBridge(serviceData, transaction) - } - - serviceData.bridgePort = existingService.bridgePort - - let updatedService - try { - // Update service in database - updatedService = await ServiceManager.update( - { name: serviceName }, - serviceData, - transaction - ) - - // Update tags if provided - if (serviceData.tags) { - await _setTags(existingService, serviceData.tags, transaction) - } - - // Handle resource changes - if (serviceData.resource && - JSON.stringify(serviceData.resource) !== JSON.stringify(existingService.resource)) { - // If resource changed, delete and recreate connector - await _deleteTcpConnector(serviceName, transaction) - await _addTcpConnector(serviceData, transaction) - } else { - // If resource didn't change, just update connector and listener - await _updateTcpConnector(serviceData, transaction) - // await _updateTcpListener(serviceData, transaction) - } - - // Update K8s service if needed - if ((existingService.type === 'microservice' || existingService.type === 'agent' || existingService.type === 'external') && isK8s) { - await _updateK8sService(serviceData, transaction) - } - - return updatedService - } catch (error) { - logger.error('Error updating service:', { - error: error.message, - stack: error.stack, - serviceName: serviceName, - serviceType: existingService.type - }) - - // If any error occurs after service update, attempt to rollback - if (updatedService) { - try { - // Rollback K8s service if it was updated - if ((existingService.type === 'microservice' || existingService.type === 'agent' || existingService.type === 'external') && isK8s) { - await _updateK8sService(existingService, transaction) - } - // Rollback TCP connector and listener - if (serviceData.resource && - JSON.stringify(serviceData.resource) !== JSON.stringify(existingService.resource)) { - await _deleteTcpConnector(serviceName, transaction) - await _addTcpConnector(existingService, transaction) - } else { - await _updateTcpConnector(existingService, transaction) - await _updateTcpListener(existingService, transaction) - } - // Rollback service in database - await ServiceManager.update( - { name: serviceName }, - existingService, - transaction - ) - } catch (rollbackError) { - logger.error('Error during service update rollback:', { - error: rollbackError.message, - stack: rollbackError.stack, - serviceName: serviceName - }) - } - } - - // Wrap the error in a proper error type if it's not already - if (!(error instanceof Errors.ValidationError) && - !(error instanceof Errors.NotFoundError) && - !(error instanceof Errors.TransactionError) && - !(error instanceof Errors.DuplicatePropertyError)) { - throw new Errors.ValidationError(`Failed to update service: ${error.message}`) - } - throw error - } -} - -// Delete service endpoint -async function deleteServiceEndpoint (serviceName, transaction) { - // Get existing service - const existingService = await ServiceManager.findOne({ name: serviceName }, transaction) - if (!existingService) { - throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) - } - - const isK8s = await checkKubernetesEnvironment() - - try { - // Delete TCP connector - await _deleteTcpConnector(serviceName, transaction) - - // Delete TCP listener - await _deleteTcpListener(serviceName, transaction) - - // Delete K8s service if needed - if (isK8s && existingService.type !== 'k8s') { - await _deleteK8sService(serviceName) - } - - // Finally delete the service from database - await ServiceManager.delete({ name: serviceName }, transaction) - - return { message: `Service ${serviceName} deleted successfully` } - } catch (error) { - logger.error('Error deleting service:', { - error: error.message, - stack: error.stack, - serviceName: serviceName, - serviceType: existingService.type - }) - - // Wrap the error in a proper error type if it's not already - if (!(error instanceof Errors.ValidationError) && - !(error instanceof Errors.NotFoundError) && - !(error instanceof Errors.TransactionError) && - !(error instanceof Errors.DuplicatePropertyError)) { - throw new Errors.ValidationError(`Failed to delete service: ${error.message}`) - } - throw error - } -} - -// List services endpoint -async function getServicesListEndpoint (transaction) { - const queryFogData = {} - const services = await ServiceManager.findAllWithTags(queryFogData, transaction) - return services.map(service => ({ - name: service.name, - type: service.type, - resource: service.resource, - defaultBridge: service.defaultBridge, - bridgePort: service.bridgePort, - targetPort: service.targetPort, - servicePort: service.servicePort, - k8sType: service.k8sType, - serviceEndpoint: service.serviceEndpoint, - tags: _mapTags(service) - })) -} - -// Get service endpoint -async function getServiceEndpoint (serviceName, transaction) { - const queryFogData = { name: serviceName } - const service = await ServiceManager.findOneWithTags(queryFogData, transaction) - if (!service) { - throw new Errors.NotFoundError(`Service with name ${serviceName} not found`) - } - return { - name: service.name, - type: service.type, - resource: service.resource, - defaultBridge: service.defaultBridge, - bridgePort: service.bridgePort, - targetPort: service.targetPort, - servicePort: service.servicePort, - k8sType: service.k8sType, - serviceEndpoint: service.serviceEndpoint, - tags: _mapTags(service) - } -} - -async function moveMicroserviceTcpBridgeToNewFog (service, newFogUuid, oldFogUuid, transaction) { - const listenerName = `${service.name}-listener` - const connectorName = `${service.name}-connector` - - const oldRouterMicroservice = await _getRouterMicroservice(oldFogUuid, transaction) - const oldRouterConfig = JSON.parse(oldRouterMicroservice.config || '{}') - const newRouterMicroservice = await _getRouterMicroservice(newFogUuid, transaction) - const newRouterConfig = JSON.parse(newRouterMicroservice.config || '{}') - - const connector = oldRouterConfig.bridges.tcpConnectors[connectorName] - const listener = oldRouterConfig.bridges.tcpListeners[listenerName] - - if (oldRouterConfig.bridges.tcpConnectors[connectorName]) { - delete oldRouterConfig.bridges.tcpConnectors[connectorName] - } - if (oldRouterConfig.bridges.tcpListeners[listenerName]) { - delete oldRouterConfig.bridges.tcpListeners[listenerName] - } - - if (!newRouterConfig.bridges) { - newRouterConfig.bridges = {} - } - if (!newRouterConfig.bridges.tcpConnectors) { - newRouterConfig.bridges.tcpConnectors = {} - } - - newRouterConfig.bridges.tcpConnectors[connectorName] = connector - newRouterConfig.bridges.tcpListeners[listenerName] = listener - - await _updateRouterMicroserviceConfig(oldFogUuid, oldRouterConfig, transaction) - await _updateRouterMicroserviceConfig(newFogUuid, newRouterConfig, transaction) -} - -module.exports = { - checkKubernetesEnvironment, - validateMicroserviceType: TransactionDecorator.generateTransaction(validateMicroserviceType), - validateNonK8sType, - _validateServiceName, - validateFogServiceType: TransactionDecorator.generateTransaction(validateFogServiceType), - validateDefaultBridge: TransactionDecorator.generateTransaction(validateDefaultBridge), - defineBridgePort: TransactionDecorator.generateTransaction(defineBridgePort), - handleServiceDistribution: TransactionDecorator.generateTransaction(handleServiceDistribution), - _mapTags, - _setTags: TransactionDecorator.generateTransaction(_setTags), - _createK8sService, - _updateK8sService, - _deleteK8sService, - createServiceEndpoint: TransactionDecorator.generateTransaction(createServiceEndpoint), - updateServiceEndpoint: TransactionDecorator.generateTransaction(updateServiceEndpoint), - deleteServiceEndpoint: TransactionDecorator.generateTransaction(deleteServiceEndpoint), - getServicesListEndpoint: TransactionDecorator.generateTransaction(getServicesListEndpoint), - getServiceEndpoint: TransactionDecorator.generateTransaction(getServiceEndpoint), - moveMicroserviceTcpBridgeToNewFog: TransactionDecorator.generateTransaction(moveMicroserviceTcpBridgeToNewFog) -} From 2cd88ff7568cbf9344741df3540c5adc9cd238b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 21 Jun 2025 16:59:16 +0300 Subject: [PATCH 11/25] fix --- src/websocket/server.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/websocket/server.js b/src/websocket/server.js index 3627ba44..112e3aa2 100644 --- a/src/websocket/server.js +++ b/src/websocket/server.js @@ -849,7 +849,7 @@ class WebSocketServer { } // Check if we're in development mode (mock Keycloak) - const isDevMode = config.get('server.devMode', true) + const isDevMode = process.env.SERVER_DEV_MODE || config.get('server.devMode', true) const hasAuthConfig = this.isAuthConfigured() if (!hasAuthConfig && isDevMode) { @@ -890,7 +890,7 @@ class WebSocketServer { // Check if user has required roles const hasRequiredRole = userRoles.some(role => ['SRE', 'Developer'].includes(role)) if (!hasRequiredRole) { - throw new Errors.AuthenticationError('Insufficient permissions. Required roles: SRE or Developer') + throw new Errors.AuthenticationError('Insufficient permissions. Required roles: SRE for Node Exec or Developer for Microservice Exec') } // 2. Only now check microservice, application, etc. From 468eb6c4a393a2521bf6a25403f63795f657d5fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Fri, 4 Jul 2025 01:43:50 +0300 Subject: [PATCH 12/25] beta release preparation, swagger yaml updated, db ssl config added, migration and seeder scripts fixed, bugfixes --- docs/swagger.yaml | 440 ++++++++++++------ package-lock.json | 12 +- package.json | 4 +- src/config/controller.yaml | 4 + src/config/env-mapping.js | 6 + src/data/managers/config-map-manager.js | 1 + src/data/managers/fog-used-token-manager.js | 44 +- .../mysql/db_migration_mysql_v1.0.2.sql | 104 ++--- .../postgres/db_migration_pg_v1.0.2.sql | 104 ++--- .../sqlite/db_migration_sqlite_v1.0.2.sql | 104 ++--- src/data/models/fog.js | 13 +- src/data/models/fogUsedToken.js | 6 +- src/data/models/microservice.js | 7 +- src/data/models/volumeMount.js | 2 +- src/data/providers/mysql.js | 25 + src/data/providers/postgres.js | 30 +- .../seeders/mysql/db_seeder_mysql_v1.0.2.sql | 21 +- .../seeders/postgres/db_seeder_pg_v1.0.2.sql | 21 +- .../sqlite/db_seeder_sqlite_v1.0.2.sql | 21 +- src/schemas/config-map.js | 1 + src/server.js | 4 + src/services/agent-service.js | 24 +- src/services/catalog-service.js | 16 +- src/services/iofog-key-service.js | 10 +- src/services/iofog-service.js | 57 ++- src/services/microservices-service.js | 42 +- src/services/router-service.js | 7 +- src/services/services-service.js | 51 +- src/utils/k8s-client.js | 11 +- 29 files changed, 729 insertions(+), 463 deletions(-) diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 622c9514..307efca3 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -45,7 +45,7 @@ paths: summary: Returns list of ioFog nodes operationId: getIOFogNodes security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -76,7 +76,7 @@ paths: summary: Creates a new ioFog node operationId: createIOFogNode security: - - userToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/UpdateIOFogNodeRequestBody" responses: @@ -111,7 +111,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/UpdateIOFogNodeRequestBody" responses: @@ -143,7 +143,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "202": description: Accepted @@ -171,7 +171,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -204,7 +204,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "201": description: Success @@ -246,7 +246,7 @@ paths: - upgrade - rollback security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -277,7 +277,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -304,7 +304,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -331,7 +331,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -364,7 +364,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -390,7 +390,7 @@ paths: summary: Lists all applications operationId: listApplication security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -416,7 +416,7 @@ paths: summary: Lists all system applications operationId: listSystemApplication security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -436,6 +436,38 @@ paths: "500": description: Internal Server Error /application/system/{name}: + get: + tags: + - Application + summary: Gets an application details + operationId: getApplication + parameters: + - in: path + name: name + description: Application name + required: true + schema: + type: string + security: + - authToken: [] + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/ApplicationGetResponse" + "400": + description: Bad Request + "404": + description: Not Found + "500": + description: Internal Server Error delete: tags: - Application @@ -449,7 +481,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -472,7 +504,7 @@ paths: summary: Creates an application using a YAML file operationId: createApplicationYAML security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -513,7 +545,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -554,7 +586,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -582,7 +614,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -615,7 +647,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -647,7 +679,7 @@ paths: summary: Lists all application templates operationId: listApplicationTemplates security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -673,7 +705,7 @@ paths: summary: Creates an application template using a YAML file operationId: createApplicationTemplateYAML security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -714,7 +746,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -748,7 +780,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -776,7 +808,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -809,7 +841,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -902,13 +934,7 @@ paths: summary: Move Controller CA to Agent operationId: agentControllerCert security: - - agentToken: [] - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/AgentDeprovisioningRequest" - required: true + - authToken: [] responses: "204": description: Success @@ -930,7 +956,7 @@ paths: summary: Get an ioFog node configuration operationId: getIOFogNodeConfig security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -953,7 +979,7 @@ paths: summary: Updates an ioFog node configuration operationId: updateIOFogNodeConfig security: - - agentToken: [] + - authToken: [] requestBody: content: application/json: @@ -981,7 +1007,7 @@ paths: summary: Gets ioFog node changes operationId: getIOFogNodeChanges security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1006,7 +1032,7 @@ paths: summary: Resets ioFog node changes list operationId: resetIOFogNodeChanges security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1028,7 +1054,7 @@ paths: summary: Posts agent status to ioFog node operationId: postAgentStatus security: - - agentToken: [] + - authToken: [] requestBody: content: application/json: @@ -1056,7 +1082,7 @@ paths: summary: Gets microservices running on an ioFog node operationId: getAgentMicroservicesList security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1087,7 +1113,7 @@ paths: schema: type: string security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1113,7 +1139,7 @@ paths: summary: Gets list of Docker registries operationId: getRegistriesList security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1137,7 +1163,7 @@ paths: summary: Get an ioFog node tunnel configuration operationId: getIOFogNodeTunnelConfig security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1163,7 +1189,7 @@ paths: summary: Get an ioFog node strace info operationId: getIOFogNodeStraceInfo security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1188,7 +1214,7 @@ paths: summary: Posts agent strace to ioFog node operationId: postIOFogNodeStraceBuffer security: - - agentToken: [] + - authToken: [] requestBody: content: application/json: @@ -1218,7 +1244,7 @@ paths: summary: Get change version command operationId: getChangeVersion security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1244,7 +1270,7 @@ paths: summary: Updates HAL hardware info operationId: putHalHardwareInfo security: - - agentToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/HalInfo" responses: @@ -1268,7 +1294,7 @@ paths: summary: Retrieves HAL USB info operationId: getAgentHalUsbInfo security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1293,7 +1319,7 @@ paths: summary: Updates HAL USB info operationId: putHalUsbInfo security: - - agentToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/HalInfo" responses: @@ -1317,7 +1343,7 @@ paths: summary: Deletes an ioFog node operationId: deleteAgentNode security: - - agentToken: [] + - authToken: [] responses: "204": description: No Content @@ -1337,7 +1363,7 @@ paths: summary: Get image snapshot info operationId: getImageSnapshot security: - - agentToken: [] + - authToken: [] responses: "200": description: Success @@ -1362,7 +1388,7 @@ paths: summary: Put image snapshot info on controller operationId: putImageSnapshot security: - - agentToken: [] + - authToken: [] requestBody: content: application/json: @@ -1390,7 +1416,7 @@ paths: summary: Post tracking info operationId: postTracking security: - - agentToken: [] + - authToken: [] requestBody: content: application/json: @@ -1411,7 +1437,7 @@ paths: summary: Gets microservices catalog operationId: getMicroservicesCatalog security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1434,7 +1460,7 @@ paths: summary: Creates a new microservice catalog item operationId: createMicroserviceCatalogItem security: - - userToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/CreateUpdateCatalogItemRequestBody" responses: @@ -1474,7 +1500,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Catalog Item Info @@ -1506,7 +1532,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: $ref: "#/components/requestBodies/CreateUpdateCatalogItemRequestBody" responses: @@ -1540,7 +1566,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -1576,7 +1602,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1600,7 +1626,7 @@ paths: summary: Creates a new microservice in an Application operationId: createMicroserviceYAML security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -1648,7 +1674,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1680,7 +1706,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -1710,7 +1736,23 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] + responses: + "204": + description: Updated + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Invalid Registry Id + "500": + description: Internal Server Error /microservices/pub/{tag}: get: tags: @@ -1725,7 +1767,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1756,7 +1798,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -1773,7 +1815,91 @@ paths: description: Not Authorized "500": description: Internal Server Error + /microservices/system: + get: + tags: + - Microservices + summary: Gets list of system microservices + operationId: getSystemMicroservicesList + security: + - authToken: [] + parameters: + - in: query + name: flowId + deprecated: true + description: Flow Id + required: false + schema: + type: integer + - in: query + name: application + description: Application name + required: false + schema: + type: string + responses: + '200': + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + type: object + properties: + microservices: + type: array + items: + type: object + properties: + uuid: + type: string + name: + type: string + config: + type: object + '401': + description: Not Authorized + '404': + description: Not Found + '500': + description: Internal Server Error /microservices/system/{uuid}: + get: + tags: + - Microservices + summary: Gets a system microservice info + operationId: getSystemMicroserviceInfo + parameters: + - in: path + name: uuid + description: Microservice Uuid + required: true + schema: + type: string + security: + - authToken: [] + responses: + "200": + description: Success + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + content: + application/json: + schema: + $ref: "#/components/schemas/MicroserviceInfoResponse" + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error patch: tags: - Microservices @@ -1787,7 +1913,23 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] + responses: + "204": + description: Updated + headers: + X-Timestamp: + description: FogController server timestamp + schema: + type: number + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Invalid Registry Id + "500": + description: Internal Server Error /microservices/yaml/{uuid}: patch: tags: @@ -1802,7 +1944,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -1845,7 +1987,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -1886,7 +2028,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Created @@ -1919,7 +2061,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -1967,7 +2109,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2002,7 +2144,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2031,7 +2173,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2075,7 +2217,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2108,7 +2250,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2159,7 +2301,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2196,7 +2338,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2227,7 +2369,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2256,7 +2398,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2285,7 +2427,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "201": description: Created @@ -2313,7 +2455,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2342,7 +2484,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "201": description: Created @@ -2370,7 +2512,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2399,7 +2541,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "201": description: Created @@ -2434,7 +2576,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2468,7 +2610,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2516,7 +2658,7 @@ paths: - file - string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2551,7 +2693,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2588,7 +2730,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2624,7 +2766,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2650,7 +2792,7 @@ paths: summary: Creates new registry operationId: createRegistry security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2676,7 +2818,7 @@ paths: summary: Gets list of registries operationId: getRegistryList security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2707,7 +2849,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Deleted @@ -2735,7 +2877,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2821,7 +2963,7 @@ paths: summary: Logout operationId: logout security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -2836,7 +2978,7 @@ paths: summary: Get current user profile data operationId: getUserProfile security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2860,7 +3002,7 @@ paths: summary: Get routes operationId: getRoutes security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -2883,7 +3025,7 @@ paths: summary: Creates a new route operationId: createRoute security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2928,7 +3070,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Route Info @@ -2960,7 +3102,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -2999,7 +3141,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -3021,7 +3163,7 @@ paths: summary: Get Edge Resources operationId: getEdgeResources security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3058,7 +3200,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3094,7 +3236,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -3137,7 +3279,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -3166,7 +3308,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3190,7 +3332,7 @@ paths: summary: Create Specific Edge Resource operationId: postEdgeResource security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -3234,7 +3376,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -3275,7 +3417,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -3304,7 +3446,7 @@ paths: summary: Creates a new secret operationId: createSecret security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -3332,7 +3474,7 @@ paths: summary: Lists all secrets operationId: listSecrets security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3358,7 +3500,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3385,7 +3527,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -3420,7 +3562,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3437,7 +3579,7 @@ paths: summary: Create a secret from YAML file operationId: createSecretFromYAML security: - - userToken: [] + - authToken: [] requestBody: content: multipart/form-data: @@ -3476,7 +3618,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: content: multipart/form-data: @@ -3513,7 +3655,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3540,7 +3682,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3558,7 +3700,7 @@ paths: summary: Create a new certificate operationId: createCertificate security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -3585,7 +3727,7 @@ paths: summary: List all certificates operationId: listCertificates security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3613,7 +3755,7 @@ paths: type: integer default: 30 security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3642,7 +3784,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3669,7 +3811,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3694,7 +3836,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: '200': description: Success @@ -3717,7 +3859,7 @@ paths: summary: Create a certificate or CA from YAML file operationId: createCertificateFromYAML security: - - userToken: [] + - authToken: [] requestBody: content: multipart/form-data: @@ -3751,7 +3893,7 @@ paths: summary: Gets list of services operationId: getServicesList security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3776,7 +3918,7 @@ paths: summary: Creates a new service operationId: createService security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -3817,7 +3959,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -3849,7 +3991,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "204": description: Success @@ -3877,7 +4019,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -3911,7 +4053,7 @@ paths: summary: Creates a new service from YAML operationId: createServiceYAML security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -3977,7 +4119,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4015,7 +4157,7 @@ paths: summary: Creates a new ConfigMap operationId: createConfigMap security: - - userToken: [] + - authToken: [] requestBody: content: application/json: @@ -4042,7 +4184,7 @@ paths: summary: Lists all ConfigMaps operationId: listConfigMaps security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -4061,7 +4203,7 @@ paths: summary: Creates a new ConfigMap from YAML operationId: createConfigMapFromYaml security: - - userToken: [] + - authToken: [] requestBody: content: multipart/form-data: @@ -4093,7 +4235,7 @@ paths: summary: Gets a ConfigMap by name operationId: getConfigMap security: - - userToken: [] + - authToken: [] parameters: - in: path name: name @@ -4119,7 +4261,7 @@ paths: summary: Updates a ConfigMap operationId: updateConfigMap security: - - userToken: [] + - authToken: [] parameters: - in: path name: name @@ -4152,7 +4294,7 @@ paths: summary: Deletes a ConfigMap operationId: deleteConfigMap security: - - userToken: [] + - authToken: [] parameters: - in: path name: name @@ -4175,7 +4317,7 @@ paths: summary: Updates a ConfigMap from YAML operationId: updateConfigMapFromYaml security: - - userToken: [] + - authToken: [] parameters: - in: path name: name @@ -4213,7 +4355,7 @@ paths: summary: Returns list of volume mounts operationId: listVolumeMounts security: - - userToken: [] + - authToken: [] responses: "200": description: List of volume mounts @@ -4233,7 +4375,7 @@ paths: summary: Creates a new volume mount operationId: createVolumeMount security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4261,7 +4403,7 @@ paths: summary: Creates a new volume mount from YAML operationId: createVolumeMountYaml security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4296,7 +4438,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "200": description: Success @@ -4323,7 +4465,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4358,7 +4500,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] responses: "202": description: Accepted @@ -4383,7 +4525,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4420,7 +4562,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4455,7 +4597,7 @@ paths: schema: type: string security: - - userToken: [] + - authToken: [] requestBody: required: true content: @@ -4518,11 +4660,7 @@ servers: - url: http://localhost:51121/api/v3 components: securitySchemes: - userToken: - type: http - scheme: bearer - bearerFormat: JWT - agentToken: + authToken: type: http scheme: bearer bearerFormat: JWT diff --git a/package-lock.json b/package-lock.json index 49756055..1f7e89d1 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0", + "version": "3.5.0-beta", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0", + "version": "3.5.0-beta", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "0.5.4", + "@datasance/ecn-viewer": "1.0.0-alpha1", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-0.5.4.tgz", - "integrity": "sha512-Eu8BhBAhHyU6S3RdOPyiKpq3DhRUcEQQlU02BBWTdI5e6j5Iqv6Q72AFBw+AaE0NeO7PSNz8x7jQj77OX7jU5g==" + "version": "1.0.0-alpha1", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-alpha1.tgz", + "integrity": "sha512-gFOvOf9gZqSJyW32UxWAz/e5vLKak0uPx9+Lqp5CpP5pLETaMWmApiWQFaPKxVWhGVOwT38b7WoWo4W78SWzDQ==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index d5061e20..cfa58995 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0", + "version": "3.5.0-beta", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "0.5.4", + "@datasance/ecn-viewer": "1.0.0-alpha1", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/config/controller.yaml b/src/config/controller.yaml index 58f58c47..8f1b8756 100644 --- a/src/config/controller.yaml +++ b/src/config/controller.yaml @@ -60,12 +60,16 @@ database: # username: "" # MySQL username # password: "" # MySQL password # databaseName: "" # MySQL database name + # useSSL: false # Use SSL for MySQL connection + # sslCA: "" # MySQL SSL CA in base64 encoded string # postgres: # host: "" # PostgreSQL host # port: 5432 # PostgreSQL port # username: "" # PostgreSQL username # password: "" # PostgreSQL password # databaseName: "" # PostgreSQL database name + # useSSL: false # Use SSL for PostgreSQL connection + # sslCA: "" # PostgreSQL SSL CA in base64 encoded string sqlite: databaseName: dev_database.sqlite # SQLite database file name logging: false # Enable SQLite query logging diff --git a/src/config/env-mapping.js b/src/config/env-mapping.js index b125c323..18f1baea 100644 --- a/src/config/env-mapping.js +++ b/src/config/env-mapping.js @@ -57,6 +57,12 @@ module.exports = { 'DB_NAME': { path: (provider) => `database.${provider}.databaseName` }, + 'DB_USE_SSL': { + path: (provider) => `database.${provider}.useSSL` + }, + 'DB_SSL_CA': { + path: (provider) => `database.${provider}.sslCA` + }, // Auth Configuration 'KC_REALM': 'auth.realm', diff --git a/src/data/managers/config-map-manager.js b/src/data/managers/config-map-manager.js index ee7b6d30..b8da144c 100644 --- a/src/data/managers/config-map-manager.js +++ b/src/data/managers/config-map-manager.js @@ -41,6 +41,7 @@ class ConfigMapManager extends BaseManager { return configMaps.map(configMap => ({ id: configMap.id, name: configMap.name, + immutable: configMap.immutable, created_at: configMap.created_at, updated_at: configMap.updated_at })) diff --git a/src/data/managers/fog-used-token-manager.js b/src/data/managers/fog-used-token-manager.js index a9274ded..0b9a3349 100644 --- a/src/data/managers/fog-used-token-manager.js +++ b/src/data/managers/fog-used-token-manager.js @@ -26,22 +26,40 @@ class FogUsedTokenManager { */ static async storeJti (jti, fogUuid, exp, transaction) { try { + // Input validation + if (!jti || typeof jti !== 'string') { + throw new Error('JTI must be a non-empty string') + } + if (!fogUuid || typeof fogUuid !== 'string') { + throw new Error('Fog UUID must be a non-empty string') + } + + // Ensure exp is a valid integer (Unix timestamp) + const expiryTime = parseInt(exp, 10) + if (isNaN(expiryTime) || expiryTime <= 0) { + throw new Error('Expiration timestamp must be a positive integer') + } + + // Prepare the data object + const tokenData = { + jti, + iofogUuid: fogUuid, + expiryTime: expiryTime + } + + // Create the record with or without transaction if (!transaction || transaction.fakeTransaction) { - // If no transaction or fake transaction, create a new one - await models.FogUsedToken.create({ - jti, - iofogUuid: fogUuid, - expiryTime: exp - }) + await models.FogUsedToken.create(tokenData) } else { - // Use the provided transaction - await models.FogUsedToken.create({ - jti, - iofogUuid: fogUuid, - expiryTime: exp - }, { transaction }) + await models.FogUsedToken.create(tokenData, { transaction }) } } catch (error) { + // Check if it's a duplicate JTI error + if (error.name === 'SequelizeUniqueConstraintError' && error.fields && error.fields.jti) { + logger.warn(`JTI already exists: ${jti}`) + throw new Error('JWT token already used') + } + logger.error(`Failed to store JTI: ${error.message}`) throw error } @@ -81,7 +99,7 @@ class FogUsedTokenManager { */ static async cleanupExpiredJtis () { try { - const now = new Date().getTime() / 1000 // Convert to Unix timestamp + const now = Math.floor(Date.now() / 1000) // Convert to Unix timestamp (seconds) const result = await models.FogUsedToken.destroy({ where: { expiryTime: { diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql index e87893b1..528e829c 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql @@ -60,7 +60,7 @@ CREATE INDEX idx_fog_type_bluetooth_catalog_item_id ON FogTypes (bluetooth_catal CREATE TABLE IF NOT EXISTS Fogs ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) DEFAULT 'Unnamed ioFog 1', location TEXT, gps_mode TEXT, @@ -68,7 +68,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( longitude FLOAT, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', + daemon_status VARCHAR(36) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage FLOAT DEFAULT 0.000, @@ -80,21 +80,21 @@ CREATE TABLE IF NOT EXISTS Fogs ( system_available_disk BIGINT, system_available_memory BIGINT, system_total_cpu FLOAT, - security_status VARCHAR(32) DEFAULT 'OK', - security_violation_info VARCHAR(32) DEFAULT 'No violation', + security_status VARCHAR(36) DEFAULT 'OK', + security_violation_info VARCHAR(36) DEFAULT 'No violation', catalog_item_status TEXT, repository_count BIGINT DEFAULT 0, repository_status TEXT, system_time BIGINT, last_status_time BIGINT, - ip_address VARCHAR(32) DEFAULT '0.0.0.0', - ip_address_external VARCHAR(32) DEFAULT '0.0.0.0', - host VARCHAR(32), + ip_address VARCHAR(36) DEFAULT '0.0.0.0', + ip_address_external VARCHAR(36) DEFAULT '0.0.0.0', + host VARCHAR(36), processed_messages BIGINT DEFAULT 0, catalog_item_message_counts TEXT, message_speed FLOAT DEFAULT 0.000, last_command_time BIGINT, - network_interface VARCHAR(32) DEFAULT 'dynamic', + network_interface VARCHAR(36) DEFAULT 'dynamic', docker_url VARCHAR(255) DEFAULT 'unix:///var/run/docker.sock', disk_limit FLOAT DEFAULT 50, disk_directory VARCHAR(255) DEFAULT '/var/lib/iofog/', @@ -118,7 +118,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, router_id INT DEFAULT 0, - time_zone VARCHAR(32) DEFAULT 'Etc/UTC', + time_zone VARCHAR(36) DEFAULT 'Etc/UTC', created_at DATETIME, updated_at DATETIME, fog_type_id INT DEFAULT 0, @@ -144,7 +144,7 @@ CREATE TABLE IF NOT EXISTS ChangeTrackings ( prune BOOLEAN DEFAULT false, linked_edge_resources BOOLEAN DEFAULT false, last_updated VARCHAR(255) DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -154,7 +154,7 @@ CREATE TABLE IF NOT EXISTS FogAccessTokens ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, expiration_time BIGINT, token TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -164,7 +164,7 @@ CREATE TABLE IF NOT EXISTS FogProvisionKeys ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, provisioning_string VARCHAR(100), expiration_time BIGINT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -173,7 +173,7 @@ CREATE INDEX idx_fog_provision_keys_iofogUuid ON FogProvisionKeys (iofog_uuid); CREATE TABLE IF NOT EXISTS FogVersionCommands ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, version_command VARCHAR(100), - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -184,7 +184,7 @@ CREATE TABLE IF NOT EXISTS HWInfos ( info TEXT, created_at DATETIME, updated_at DATETIME, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -195,7 +195,7 @@ CREATE TABLE IF NOT EXISTS USBInfos ( info TEXT, created_at DATETIME, updated_at DATETIME, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -210,14 +210,14 @@ CREATE TABLE IF NOT EXISTS Tunnels ( local_port INT DEFAULT 22, rsa_key TEXT, closed BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); CREATE INDEX idx_tunnels_iofogUuid ON Tunnels (iofog_uuid); CREATE TABLE IF NOT EXISTS Microservices ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, config TEXT, name VARCHAR(255) DEFAULT 'New Microservice', config_last_updated BIGINT, @@ -231,7 +231,7 @@ CREATE TABLE IF NOT EXISTS Microservices ( updated_at DATETIME, catalog_item_id INT, registry_id INT DEFAULT 1, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), application_id INT, FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, FOREIGN KEY (registry_id) REFERENCES Registries (id) ON DELETE SET NULL, @@ -247,7 +247,7 @@ CREATE INDEX idx_microservices_applicationId ON Microservices (application_id); CREATE TABLE IF NOT EXISTS MicroserviceArgs ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, cmd TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -257,7 +257,7 @@ CREATE TABLE IF NOT EXISTS MicroserviceEnvs ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, `key` TEXT, `value` TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -270,9 +270,9 @@ CREATE TABLE IF NOT EXISTS MicroserviceExtraHost ( public_port INT, template TEXT, `value` TEXT, - microservice_uuid VARCHAR(32), - target_microservice_uuid VARCHAR(32), - target_fog_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), + target_microservice_uuid VARCHAR(36), + target_fog_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (target_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (target_fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -291,7 +291,7 @@ CREATE TABLE IF NOT EXISTS MicroservicePorts ( is_proxy BOOLEAN, created_at DATETIME, updated_at DATETIME, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -328,7 +328,7 @@ CREATE TABLE IF NOT EXISTS MicroserviceStatuses ( container_id VARCHAR(255) DEFAULT '', percentage FLOAT DEFAULT 0.00, error_message TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE @@ -340,7 +340,7 @@ CREATE TABLE IF NOT EXISTS StraceDiagnostics ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, strace_run BOOLEAN, buffer VARCHAR(255) DEFAULT '', - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -352,7 +352,7 @@ CREATE TABLE IF NOT EXISTS VolumeMappings ( container_destination TEXT, access_mode TEXT, type TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -363,7 +363,7 @@ CREATE TABLE IF NOT EXISTS CatalogItemImages ( id INT AUTO_INCREMENT PRIMARY KEY, container_image TEXT, catalog_item_id INT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), fog_type_id INT, FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, @@ -398,8 +398,8 @@ CREATE INDEX idx_catalog_item_output_type_catalog_item_id ON CatalogItemOutputTy CREATE TABLE IF NOT EXISTS Routings ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, name TEXT NOT NULL, - source_microservice_uuid VARCHAR(32), - dest_microservice_uuid VARCHAR(32), + source_microservice_uuid VARCHAR(36), + dest_microservice_uuid VARCHAR(36), application_id INT, FOREIGN KEY (source_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (dest_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, @@ -418,7 +418,7 @@ CREATE TABLE IF NOT EXISTS Routers ( inter_router_port INT, host TEXT, is_default BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -461,7 +461,7 @@ CREATE TABLE IF NOT EXISTS Tags ( CREATE TABLE IF NOT EXISTS IofogTags ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -486,7 +486,7 @@ CREATE TABLE IF NOT EXISTS EdgeResources ( CREATE TABLE IF NOT EXISTS AgentEdgeResources ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), edge_resource_id INT, FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (edge_resource_id) REFERENCES EdgeResources (id) ON DELETE CASCADE @@ -557,7 +557,7 @@ CREATE INDEX idx_applicationtemplatevariables_application_template_id ON Applica CREATE TABLE IF NOT EXISTS MicroserviceCdiDevices ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, cdi_devices TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -571,7 +571,7 @@ ADD COLUMN runtime TEXT DEFAULT NULL; CREATE TABLE IF NOT EXISTS MicroservicePubTags ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -579,7 +579,7 @@ CREATE TABLE IF NOT EXISTS MicroservicePubTags ( CREATE TABLE IF NOT EXISTS MicroserviceSubTags ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -593,7 +593,7 @@ CREATE INDEX idx_microservicesubtags_tag_id ON MicroserviceSubTags (tag_id); CREATE TABLE IF NOT EXISTS MicroserviceCapAdd ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, cap_add TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -602,7 +602,7 @@ CREATE INDEX idx_microservice_capAdd_microserviceUuid ON MicroserviceCapAdd (mic CREATE TABLE IF NOT EXISTS MicroserviceCapDrop ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, cap_drop TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -614,7 +614,7 @@ ADD COLUMN annotations TEXT; CREATE TABLE IF NOT EXISTS FogPublicKeys ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, public_key TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -625,8 +625,8 @@ CREATE INDEX idx_fog_public_keys_iofogUuid ON FogPublicKeys (iofog_uuid); CREATE TABLE IF NOT EXISTS FogUsedTokens ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, jti VARCHAR(255) NOT NULL, - iofog_uuid VARCHAR(32), - expiry_time DATETIME NOT NULL, + iofog_uuid VARCHAR(36), + expiry_time BIGINT NOT NULL, created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -703,8 +703,8 @@ CREATE TABLE IF NOT EXISTS ServiceTags ( CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); -ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(32); -ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(36); +ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(36); ALTER TABLE MicroserviceExtraHost DROP COLUMN public_port; ALTER TABLE MicroservicePorts DROP COLUMN is_public; @@ -727,7 +727,7 @@ CREATE TABLE IF NOT EXISTS ConfigMaps ( CREATE INDEX idx_config_maps_name ON ConfigMaps (name); CREATE TABLE IF NOT EXISTS VolumeMounts ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) NOT NULL, config_map_name VARCHAR(255), secret_name VARCHAR(255), @@ -744,8 +744,8 @@ CREATE INDEX idx_volume_mounts_secret_name ON VolumeMounts (secret_name); CREATE TABLE IF NOT EXISTS FogVolumeMounts ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), - volume_mount_uuid VARCHAR(32), + fog_uuid VARCHAR(36), + volume_mount_uuid VARCHAR(36), FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (volume_mount_uuid) REFERENCES VolumeMounts (uuid) ON DELETE CASCADE ); @@ -759,16 +759,16 @@ ALTER TABLE Fogs ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; ALTER TABLE ChangeTrackings ADD COLUMN volume_mounts BOOLEAN DEFAULT false; ALTER TABLE ChangeTrackings ADD COLUMN exec_sessions BOOLEAN DEFAULT false; -ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(36) DEFAULT 'pending'; ALTER TABLE Services ADD COLUMN provisioning_error TEXT; ALTER TABLE Fogs ADD COLUMN warning_message TEXT; -ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(36); ALTER TABLE Fogs ADD COLUMN gps_scan_frequency INT DEFAULT 60; ALTER TABLE Fogs ADD COLUMN edge_guard_frequency INT DEFAULT 0; -ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); -ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(36); +ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(36); ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_ids TEXT; @@ -777,9 +777,9 @@ ALTER TABLE Microservices ADD COLUMN schedule INT DEFAULT 50; CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, - status VARCHAR(255) DEFAULT 'PENDING', + status VARCHAR(255) DEFAULT 'INACTIVE', exec_session_id VARCHAR(255), - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql index 74cca3e7..d8367c48 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql @@ -58,7 +58,7 @@ CREATE INDEX idx_fog_type_bluetooth_catalog_item_id ON "FogTypes" (bluetooth_cat CREATE TABLE IF NOT EXISTS "Fogs" ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) DEFAULT 'Unnamed ioFog 1', location TEXT, gps_mode TEXT, @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS "Fogs" ( longitude DOUBLE PRECISION, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', + daemon_status VARCHAR(36) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage DOUBLE PRECISION DEFAULT 0.000, @@ -78,21 +78,21 @@ CREATE TABLE IF NOT EXISTS "Fogs" ( system_available_disk BIGINT, system_available_memory BIGINT, system_total_cpu DOUBLE PRECISION, - security_status VARCHAR(32) DEFAULT 'OK', - security_violation_info VARCHAR(32) DEFAULT 'No violation', + security_status VARCHAR(36) DEFAULT 'OK', + security_violation_info VARCHAR(36) DEFAULT 'No violation', catalog_item_status TEXT, repository_count BIGINT DEFAULT 0, repository_status TEXT, system_time BIGINT, last_status_time BIGINT, - ip_address VARCHAR(32) DEFAULT '0.0.0.0', - ip_address_external VARCHAR(32) DEFAULT '0.0.0.0', - host VARCHAR(32), + ip_address VARCHAR(36) DEFAULT '0.0.0.0', + ip_address_external VARCHAR(36) DEFAULT '0.0.0.0', + host VARCHAR(36), processed_messages BIGINT DEFAULT 0, catalog_item_message_counts TEXT, message_speed DOUBLE PRECISION DEFAULT 0.000, last_command_time BIGINT, - network_interface VARCHAR(32) DEFAULT 'dynamic', + network_interface VARCHAR(36) DEFAULT 'dynamic', docker_url VARCHAR(255) DEFAULT 'unix:///var/run/docker.sock', disk_limit DOUBLE PRECISION DEFAULT 50, disk_directory VARCHAR(255) DEFAULT '/var/lib/iofog/', @@ -116,7 +116,7 @@ CREATE TABLE IF NOT EXISTS "Fogs" ( log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, router_id INT DEFAULT 0, - time_zone VARCHAR(32) DEFAULT 'Etc/UTC', + time_zone VARCHAR(36) DEFAULT 'Etc/UTC', created_at TIMESTAMP(0), updated_at TIMESTAMP(0), fog_type_id INT DEFAULT 0, @@ -142,7 +142,7 @@ CREATE TABLE IF NOT EXISTS "ChangeTrackings" ( prune BOOLEAN DEFAULT false, linked_edge_resources BOOLEAN DEFAULT false, last_updated VARCHAR(255) DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -152,7 +152,7 @@ CREATE TABLE IF NOT EXISTS "FogAccessTokens" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, expiration_time BIGINT, token TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -162,7 +162,7 @@ CREATE TABLE IF NOT EXISTS "FogProvisionKeys" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, provisioning_string VARCHAR(100), expiration_time BIGINT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -171,7 +171,7 @@ CREATE INDEX idx_fog_provision_keys_iofogUuid ON "FogProvisionKeys" (iofog_uuid) CREATE TABLE IF NOT EXISTS "FogVersionCommands" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, version_command VARCHAR(100), - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -182,7 +182,7 @@ CREATE TABLE IF NOT EXISTS "HWInfos" ( info TEXT, created_at TIMESTAMP(0), updated_at TIMESTAMP(0), - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -193,7 +193,7 @@ CREATE TABLE IF NOT EXISTS "USBInfos" ( info TEXT, created_at TIMESTAMP(0), updated_at TIMESTAMP(0), - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); @@ -208,14 +208,14 @@ CREATE TABLE IF NOT EXISTS "Tunnels" ( local_port INT DEFAULT 22, rsa_key TEXT, closed BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE ); CREATE INDEX idx_tunnels_iofogUuid ON "Tunnels" (iofog_uuid); CREATE TABLE IF NOT EXISTS "Microservices" ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, config TEXT, name VARCHAR(255) DEFAULT 'New Microservice', config_last_updated BIGINT, @@ -229,7 +229,7 @@ CREATE TABLE IF NOT EXISTS "Microservices" ( updated_at TIMESTAMP(0), catalog_item_id INT, registry_id INT DEFAULT 1, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), application_id INT, FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, FOREIGN KEY (registry_id) REFERENCES "Registries" (id) ON DELETE SET NULL, @@ -245,7 +245,7 @@ CREATE INDEX idx_microservices_applicationId ON "Microservices" (application_id) CREATE TABLE IF NOT EXISTS "MicroserviceArgs" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, cmd TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -255,7 +255,7 @@ CREATE TABLE IF NOT EXISTS "MicroserviceEnvs" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, key TEXT, value TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -268,9 +268,9 @@ CREATE TABLE IF NOT EXISTS "MicroserviceExtraHost" ( public_port INT, template TEXT, value TEXT, - microservice_uuid VARCHAR(32), - target_microservice_uuid VARCHAR(32), - target_fog_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), + target_microservice_uuid VARCHAR(36), + target_fog_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, FOREIGN KEY (target_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, FOREIGN KEY (target_fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE @@ -289,7 +289,7 @@ CREATE TABLE IF NOT EXISTS "MicroservicePorts" ( is_proxy BOOLEAN, created_at TIMESTAMP(0), updated_at TIMESTAMP(0), - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -326,7 +326,7 @@ CREATE TABLE IF NOT EXISTS "MicroserviceStatuses" ( container_id VARCHAR(255) DEFAULT '', percentage DOUBLE PRECISION DEFAULT 0.00, error_message TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at TIMESTAMP(0), updated_at TIMESTAMP(0), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE @@ -338,7 +338,7 @@ CREATE TABLE IF NOT EXISTS "StraceDiagnostics" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, strace_run BOOLEAN, buffer VARCHAR(255) DEFAULT '', - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -350,7 +350,7 @@ CREATE TABLE IF NOT EXISTS "VolumeMappings" ( container_destination TEXT, access_mode TEXT, type TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -361,7 +361,7 @@ CREATE TABLE IF NOT EXISTS "CatalogItemImages" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, container_image TEXT, catalog_item_id INT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), fog_type_id INT, FOREIGN KEY (catalog_item_id) REFERENCES "CatalogItems" (id) ON DELETE CASCADE, FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, @@ -396,8 +396,8 @@ CREATE INDEX idx_catalog_item_output_type_catalog_item_id ON "CatalogItemOutputT CREATE TABLE IF NOT EXISTS "Routings" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, name TEXT NOT NULL, - source_microservice_uuid VARCHAR(32), - dest_microservice_uuid VARCHAR(32), + source_microservice_uuid VARCHAR(36), + dest_microservice_uuid VARCHAR(36), application_id INT, FOREIGN KEY (source_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, FOREIGN KEY (dest_microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, @@ -416,7 +416,7 @@ CREATE TABLE IF NOT EXISTS "Routers" ( inter_router_port INT, host TEXT, is_default BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at TIMESTAMP(0), updated_at TIMESTAMP(0), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE @@ -459,7 +459,7 @@ CREATE TABLE IF NOT EXISTS "Tags" ( CREATE TABLE IF NOT EXISTS "IofogTags" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE @@ -484,7 +484,7 @@ CREATE TABLE IF NOT EXISTS "EdgeResources" ( CREATE TABLE IF NOT EXISTS "AgentEdgeResources" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), edge_resource_id INT, FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, FOREIGN KEY (edge_resource_id) REFERENCES "EdgeResources" (id) ON DELETE CASCADE @@ -555,7 +555,7 @@ CREATE INDEX idx_applicationtemplatevariables_application_template_id ON "Applic CREATE TABLE IF NOT EXISTS "MicroserviceCdiDevices" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, cdi_devices TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -569,7 +569,7 @@ ADD COLUMN runtime TEXT DEFAULT NULL; CREATE TABLE IF NOT EXISTS "MicroservicePubTags" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE @@ -577,7 +577,7 @@ CREATE TABLE IF NOT EXISTS "MicroservicePubTags" ( CREATE TABLE IF NOT EXISTS "MicroserviceSubTags" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES "Tags" (id) ON DELETE CASCADE @@ -591,7 +591,7 @@ CREATE INDEX idx_microservicesubtags_tag_id ON "MicroserviceSubTags" (tag_id); CREATE TABLE IF NOT EXISTS "MicroserviceCapAdd" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, cap_add TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -600,7 +600,7 @@ CREATE INDEX idx_microservice_capAdd_microserviceUuid ON "MicroserviceCapAdd" (m CREATE TABLE IF NOT EXISTS "MicroserviceCapDrop" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, cap_drop TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE ); @@ -612,7 +612,7 @@ ADD COLUMN annotations TEXT; CREATE TABLE IF NOT EXISTS "FogPublicKeys" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, public_key TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at TIMESTAMP(0), updated_at TIMESTAMP(0), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE @@ -623,8 +623,8 @@ CREATE INDEX idx_fog_public_keys_iofogUuid ON "FogPublicKeys" (iofog_uuid); CREATE TABLE IF NOT EXISTS "FogUsedTokens" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, jti VARCHAR(255) NOT NULL, - iofog_uuid VARCHAR(32), - expiry_time TIMESTAMP(0) NOT NULL, + iofog_uuid VARCHAR(36), + expiry_time BIGINT NOT NULL, created_at TIMESTAMP(0), updated_at TIMESTAMP(0), FOREIGN KEY (iofog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE @@ -703,8 +703,8 @@ CREATE INDEX idx_service_tags_service_id ON "ServiceTags" (service_id); CREATE INDEX idx_service_tags_tag_id ON "ServiceTags" (tag_id); -ALTER TABLE "Fogs" ADD COLUMN container_engine VARCHAR(32); -ALTER TABLE "Fogs" ADD COLUMN deployment_type VARCHAR(32); +ALTER TABLE "Fogs" ADD COLUMN container_engine VARCHAR(36); +ALTER TABLE "Fogs" ADD COLUMN deployment_type VARCHAR(36); ALTER TABLE "MicroserviceExtraHost" DROP COLUMN IF EXISTS public_port; ALTER TABLE "MicroservicePorts" DROP COLUMN IF EXISTS is_public; @@ -727,7 +727,7 @@ CREATE TABLE IF NOT EXISTS "ConfigMaps" ( CREATE INDEX idx_config_maps_name ON "ConfigMaps" (name); CREATE TABLE IF NOT EXISTS "VolumeMounts" ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) NOT NULL, config_map_name VARCHAR(255), secret_name VARCHAR(255), @@ -744,8 +744,8 @@ CREATE INDEX idx_volume_mounts_secret_name ON "VolumeMounts" (secret_name); CREATE TABLE IF NOT EXISTS "FogVolumeMounts" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - fog_uuid VARCHAR(32), - volume_mount_uuid VARCHAR(32), + fog_uuid VARCHAR(36), + volume_mount_uuid VARCHAR(36), FOREIGN KEY (fog_uuid) REFERENCES "Fogs" (uuid) ON DELETE CASCADE, FOREIGN KEY (volume_mount_uuid) REFERENCES "VolumeMounts" (uuid) ON DELETE CASCADE ); @@ -759,16 +759,16 @@ ALTER TABLE "Fogs" ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; ALTER TABLE "ChangeTrackings" ADD COLUMN volume_mounts BOOLEAN DEFAULT false; ALTER TABLE "ChangeTrackings" ADD COLUMN exec_sessions BOOLEAN DEFAULT false; -ALTER TABLE "Services" ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE "Services" ADD COLUMN provisioning_status VARCHAR(36) DEFAULT 'pending'; ALTER TABLE "Services" ADD COLUMN provisioning_error TEXT; ALTER TABLE "Fogs" ADD COLUMN warning_message TEXT DEFAULT 'HEALTHY'; -ALTER TABLE "Fogs" ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE "Fogs" ADD COLUMN gps_device VARCHAR(36); ALTER TABLE "Fogs" ADD COLUMN gps_scan_frequency INT DEFAULT 60; ALTER TABLE "Fogs" ADD COLUMN edge_guard_frequency INT DEFAULT 0; -ALTER TABLE "Microservices" ADD COLUMN pid_mode VARCHAR(32); -ALTER TABLE "Microservices" ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE "Microservices" ADD COLUMN pid_mode VARCHAR(36); +ALTER TABLE "Microservices" ADD COLUMN ipc_mode VARCHAR(36); ALTER TABLE "Microservices" ADD COLUMN exec_enabled BOOLEAN DEFAULT false; ALTER TABLE "MicroserviceStatuses" ADD COLUMN exec_session_ids TEXT; @@ -777,9 +777,9 @@ ALTER TABLE "Microservices" ADD COLUMN schedule INT DEFAULT 50; CREATE TABLE IF NOT EXISTS "MicroserviceExecStatuses" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, - status VARCHAR(255) DEFAULT 'PENDING', + status VARCHAR(255) DEFAULT 'INACTIVE', exec_session_id VARCHAR(255), - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at TIMESTAMP(0), updated_at TIMESTAMP(0), FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql index 75b56bf1..f5d2aee4 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql @@ -58,7 +58,7 @@ CREATE INDEX idx_fog_type_bluetooth_catalog_item_id ON FogTypes (bluetooth_catal CREATE TABLE IF NOT EXISTS Fogs ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) DEFAULT 'Unnamed ioFog 1', location TEXT, gps_mode TEXT, @@ -66,7 +66,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( longitude FLOAT, description TEXT, last_active BIGINT, - daemon_status VARCHAR(32) DEFAULT 'NOT_PROVISIONED', + daemon_status VARCHAR(36) DEFAULT 'NOT_PROVISIONED', daemon_operating_duration BIGINT DEFAULT 0, daemon_last_start BIGINT, memory_usage FLOAT DEFAULT 0.000, @@ -78,21 +78,21 @@ CREATE TABLE IF NOT EXISTS Fogs ( system_available_disk BIGINT, system_available_memory BIGINT, system_total_cpu FLOAT, - security_status VARCHAR(32) DEFAULT 'OK', - security_violation_info VARCHAR(32) DEFAULT 'No violation', + security_status VARCHAR(36) DEFAULT 'OK', + security_violation_info VARCHAR(36) DEFAULT 'No violation', catalog_item_status TEXT, repository_count BIGINT DEFAULT 0, repository_status TEXT, system_time BIGINT, last_status_time BIGINT, - ip_address VARCHAR(32) DEFAULT '0.0.0.0', - ip_address_external VARCHAR(32) DEFAULT '0.0.0.0', - host VARCHAR(32), + ip_address VARCHAR(36) DEFAULT '0.0.0.0', + ip_address_external VARCHAR(36) DEFAULT '0.0.0.0', + host VARCHAR(36), processed_messages BIGINT DEFAULT 0, catalog_item_message_counts TEXT, message_speed FLOAT DEFAULT 0.000, last_command_time BIGINT, - network_interface VARCHAR(32) DEFAULT 'dynamic', + network_interface VARCHAR(36) DEFAULT 'dynamic', docker_url VARCHAR(255) DEFAULT 'unix:///var/run/docker.sock', disk_limit FLOAT DEFAULT 50, disk_directory VARCHAR(255) DEFAULT '/var/lib/iofog/', @@ -116,7 +116,7 @@ CREATE TABLE IF NOT EXISTS Fogs ( log_level VARCHAR(10) DEFAULT 'INFO', is_system BOOLEAN DEFAULT FALSE, router_id INT DEFAULT 0, - time_zone VARCHAR(32) DEFAULT 'Etc/UTC', + time_zone VARCHAR(36) DEFAULT 'Etc/UTC', created_at DATETIME, updated_at DATETIME, fog_type_id INT DEFAULT 0, @@ -142,7 +142,7 @@ CREATE TABLE IF NOT EXISTS ChangeTrackings ( prune BOOLEAN DEFAULT false, linked_edge_resources BOOLEAN DEFAULT false, last_updated VARCHAR(255) DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -152,7 +152,7 @@ CREATE TABLE IF NOT EXISTS FogAccessTokens ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, expiration_time BIGINT, token TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -162,7 +162,7 @@ CREATE TABLE IF NOT EXISTS FogProvisionKeys ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, provisioning_string VARCHAR(100), expiration_time BIGINT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -171,7 +171,7 @@ CREATE INDEX idx_fog_provision_keys_iofogUuid ON FogProvisionKeys (iofog_uuid); CREATE TABLE IF NOT EXISTS FogVersionCommands ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, version_command VARCHAR(100), - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -182,7 +182,7 @@ CREATE TABLE IF NOT EXISTS HWInfos ( info TEXT, created_at DATETIME, updated_at DATETIME, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -193,7 +193,7 @@ CREATE TABLE IF NOT EXISTS USBInfos ( info TEXT, created_at DATETIME, updated_at DATETIME, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); @@ -208,14 +208,14 @@ CREATE TABLE IF NOT EXISTS Tunnels ( local_port INT DEFAULT 22, rsa_key TEXT, closed BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE ); CREATE INDEX idx_tunnels_iofogUuid ON Tunnels (iofog_uuid); CREATE TABLE IF NOT EXISTS Microservices ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, config TEXT, name VARCHAR(255) DEFAULT 'New Microservice', config_last_updated BIGINT, @@ -229,7 +229,7 @@ CREATE TABLE IF NOT EXISTS Microservices ( updated_at DATETIME, catalog_item_id INT, registry_id INT DEFAULT 1, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), application_id INT, run_as_user TEXT, platform TEXT, @@ -248,7 +248,7 @@ CREATE INDEX idx_microservices_applicationId ON Microservices (application_id); CREATE TABLE IF NOT EXISTS MicroserviceArgs ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, cmd TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -258,7 +258,7 @@ CREATE TABLE IF NOT EXISTS MicroserviceEnvs ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, `key` TEXT, `value` TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -270,9 +270,9 @@ CREATE TABLE IF NOT EXISTS MicroserviceExtraHost ( name TEXT, template TEXT, `value` TEXT, - microservice_uuid VARCHAR(32), - target_microservice_uuid VARCHAR(32), - target_fog_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), + target_microservice_uuid VARCHAR(36), + target_fog_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (target_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (target_fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -289,7 +289,7 @@ CREATE TABLE IF NOT EXISTS MicroservicePorts ( is_udp BOOLEAN, created_at DATETIME, updated_at DATETIME, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -326,7 +326,7 @@ CREATE TABLE IF NOT EXISTS MicroserviceStatuses ( container_id VARCHAR(255) DEFAULT '', percentage FLOAT DEFAULT 0.00, error_message TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE @@ -338,7 +338,7 @@ CREATE TABLE IF NOT EXISTS StraceDiagnostics ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, strace_run BOOLEAN, buffer VARCHAR(255) DEFAULT '', - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -350,7 +350,7 @@ CREATE TABLE IF NOT EXISTS VolumeMappings ( container_destination TEXT, access_mode TEXT, type TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -361,7 +361,7 @@ CREATE TABLE IF NOT EXISTS CatalogItemImages ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, container_image TEXT, catalog_item_id INT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), fog_type_id INT, FOREIGN KEY (catalog_item_id) REFERENCES CatalogItems (id) ON DELETE CASCADE, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, @@ -396,8 +396,8 @@ CREATE INDEX idx_catalog_item_output_type_catalog_item_id ON CatalogItemOutputTy CREATE TABLE IF NOT EXISTS Routings ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name TEXT NOT NULL, - source_microservice_uuid VARCHAR(32), - dest_microservice_uuid VARCHAR(32), + source_microservice_uuid VARCHAR(36), + dest_microservice_uuid VARCHAR(36), application_id INT, FOREIGN KEY (source_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (dest_microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, @@ -416,7 +416,7 @@ CREATE TABLE IF NOT EXISTS Routers ( inter_router_port INT, host TEXT, is_default BOOLEAN DEFAULT false, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -459,7 +459,7 @@ CREATE TABLE IF NOT EXISTS Tags ( CREATE TABLE IF NOT EXISTS IofogTags ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -484,7 +484,7 @@ CREATE TABLE IF NOT EXISTS EdgeResources ( CREATE TABLE IF NOT EXISTS AgentEdgeResources ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - fog_uuid VARCHAR(32), + fog_uuid VARCHAR(36), edge_resource_id INT, FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (edge_resource_id) REFERENCES EdgeResources (id) ON DELETE CASCADE @@ -555,7 +555,7 @@ CREATE INDEX idx_applicationtemplatevariables_application_template_id ON Applica CREATE TABLE IF NOT EXISTS MicroserviceCdiDevices ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, cdi_devices TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -563,7 +563,7 @@ CREATE INDEX idx_microservice_cdiDevices_microserviceUuid ON MicroserviceCdiDevi CREATE TABLE IF NOT EXISTS MicroservicePubTags ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -571,7 +571,7 @@ CREATE TABLE IF NOT EXISTS MicroservicePubTags ( CREATE TABLE IF NOT EXISTS MicroserviceSubTags ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), tag_id INT, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE, FOREIGN KEY (tag_id) REFERENCES Tags (id) ON DELETE CASCADE @@ -585,7 +585,7 @@ CREATE INDEX idx_microservicesubtags_tag_id ON MicroservicesubTags (tag_id); CREATE TABLE IF NOT EXISTS MicroserviceCapAdd ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, cap_add TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -594,7 +594,7 @@ CREATE INDEX idx_microservice_capAdd_microserviceUuid ON MicroserviceCapAdd (mic CREATE TABLE IF NOT EXISTS MicroserviceCapDrop ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, cap_drop TEXT, - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); @@ -605,7 +605,7 @@ ALTER TABLE Microservices ADD COLUMN annotations TEXT; CREATE TABLE IF NOT EXISTS FogPublicKeys ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, public_key TEXT, - iofog_uuid VARCHAR(32), + iofog_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -616,8 +616,8 @@ CREATE INDEX idx_fog_public_keys_iofogUuid ON FogPublicKeys (iofog_uuid); CREATE TABLE IF NOT EXISTS FogUsedTokens ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, jti VARCHAR(255) NOT NULL, - iofog_uuid VARCHAR(32), - expiry_time DATETIME NOT NULL, + iofog_uuid VARCHAR(36), + expiry_time BIGINT NOT NULL, created_at DATETIME, updated_at DATETIME, FOREIGN KEY (iofog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE @@ -694,8 +694,8 @@ CREATE TABLE IF NOT EXISTS ServiceTags ( CREATE INDEX idx_service_tags_service_id ON ServiceTags (service_id); CREATE INDEX idx_service_tags_tag_id ON ServiceTags (tag_id); -ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(32); -ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN container_engine VARCHAR(36); +ALTER TABLE Fogs ADD COLUMN deployment_type VARCHAR(36); DROP TABLE IF EXISTS MicroservicePublicPorts; @@ -714,7 +714,7 @@ CREATE TABLE IF NOT EXISTS ConfigMaps ( CREATE INDEX idx_config_maps_name ON ConfigMaps (name); CREATE TABLE IF NOT EXISTS VolumeMounts ( - uuid VARCHAR(32) PRIMARY KEY NOT NULL, + uuid VARCHAR(36) PRIMARY KEY NOT NULL, name VARCHAR(255) NOT NULL, config_map_name VARCHAR(255), secret_name VARCHAR(255), @@ -731,8 +731,8 @@ CREATE INDEX idx_volume_mounts_secret_name ON VolumeMounts (secret_name); CREATE TABLE IF NOT EXISTS FogVolumeMounts ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - fog_uuid VARCHAR(32), - volume_mount_uuid VARCHAR(32), + fog_uuid VARCHAR(36), + volume_mount_uuid VARCHAR(36), FOREIGN KEY (fog_uuid) REFERENCES Fogs (uuid) ON DELETE CASCADE, FOREIGN KEY (volume_mount_uuid) REFERENCES VolumeMounts (uuid) ON DELETE CASCADE ); @@ -746,16 +746,16 @@ ALTER TABLE Fogs ADD COLUMN volume_mount_last_update BIGINT DEFAULT 0; ALTER TABLE ChangeTrackings ADD COLUMN volume_mounts BOOLEAN DEFAULT false; ALTER TABLE ChangeTrackings ADD COLUMN exec_sessions BOOLEAN DEFAULT false; -ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(32) DEFAULT 'pending'; +ALTER TABLE Services ADD COLUMN provisioning_status VARCHAR(36) DEFAULT 'pending'; ALTER TABLE Services ADD COLUMN provisioning_error TEXT; ALTER TABLE Fogs ADD COLUMN warning_message TEXT DEFAULT 'HEALTHY'; -ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(32); +ALTER TABLE Fogs ADD COLUMN gps_device VARCHAR(36); ALTER TABLE Fogs ADD COLUMN gps_scan_frequency INT DEFAULT 60; ALTER TABLE Fogs ADD COLUMN edge_guard_frequency INT DEFAULT 0; -ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(32); -ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN pid_mode VARCHAR(36); +ALTER TABLE Microservices ADD COLUMN ipc_mode VARCHAR(36); ALTER TABLE Microservices ADD COLUMN exec_enabled BOOLEAN DEFAULT false; ALTER TABLE MicroserviceStatuses ADD COLUMN exec_session_ids TEXT; @@ -764,9 +764,9 @@ ALTER TABLE Microservices ADD COLUMN schedule INT DEFAULT 50; CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, - status VARCHAR(255) DEFAULT 'PENDING', + status VARCHAR(255) DEFAULT 'INACTIVE', exec_session_id VARCHAR(255), - microservice_uuid VARCHAR(32), + microservice_uuid VARCHAR(36), created_at DATETIME, updated_at DATETIME, FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE diff --git a/src/data/models/fog.js b/src/data/models/fog.js index 72d956e2..49c69961 100644 --- a/src/data/models/fog.js +++ b/src/data/models/fog.js @@ -5,7 +5,7 @@ const { convertToInt } = require('../../helpers/app-helper') module.exports = (sequelize, DataTypes) => { const Fog = sequelize.define('Fog', { uuid: { - type: DataTypes.STRING(32), + type: DataTypes.STRING(36), primaryKey: true, allowNull: false, field: 'uuid' @@ -17,7 +17,8 @@ module.exports = (sequelize, DataTypes) => { }, location: { type: DataTypes.TEXT, - field: 'location' + field: 'location', + defaultValue: '' }, gpsMode: { type: DataTypes.TEXT, @@ -42,7 +43,8 @@ module.exports = (sequelize, DataTypes) => { }, description: { type: DataTypes.TEXT, - field: 'description' + field: 'description', + defaultValue: '' }, lastActive: { type: DataTypes.BIGINT, @@ -370,11 +372,6 @@ module.exports = (sequelize, DataTypes) => { defaultValue: 0 }) - // Fog.hasOne(models.FogAccessToken, { - // foreignKey: 'iofog_uuid', - // as: 'accessToken' - // }) - Fog.hasOne(models.FogPublicKey, { foreignKey: 'iofog_uuid', as: 'publicKey' diff --git a/src/data/models/fogUsedToken.js b/src/data/models/fogUsedToken.js index a46323ff..6e0b9e68 100644 --- a/src/data/models/fogUsedToken.js +++ b/src/data/models/fogUsedToken.js @@ -19,7 +19,11 @@ module.exports = (sequelize, DataTypes) => { expiryTime: { type: DataTypes.BIGINT, get () { - return convertToInt(this.getDataValue('daemonLastStart'), 0) + return convertToInt(this.getDataValue('expiryTime'), 0) + }, + set (value) { + // Ensure the value is stored as a BIGINT (Unix timestamp) + this.setDataValue('expiryTime', parseInt(value, 10)) }, field: 'expiry_time' } diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index 7806b389..c90e9871 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -5,7 +5,7 @@ const { convertToInt } = require('../../helpers/app-helper') module.exports = (sequelize, DataTypes) => { const Microservice = sequelize.define('Microservice', { uuid: { - type: DataTypes.STRING(32), + type: DataTypes.STRING(36), primaryKey: true, allowNull: false, field: 'uuid' @@ -183,11 +183,6 @@ module.exports = (sequelize, DataTypes) => { as: 'env' }) - Microservice.hasMany(models.VolumeMount, { - foreignKey: 'microservice_uuid', - as: 'volumeMounts' - }) - Microservice.hasMany(models.MicroserviceArg, { foreignKey: 'microservice_uuid', as: 'cmd' diff --git a/src/data/models/volumeMount.js b/src/data/models/volumeMount.js index 0bdf2f2f..db27b4fa 100644 --- a/src/data/models/volumeMount.js +++ b/src/data/models/volumeMount.js @@ -3,7 +3,7 @@ module.exports = (sequelize, DataTypes) => { const VolumeMount = sequelize.define('VolumeMount', { uuid: { - type: DataTypes.STRING(32), + type: DataTypes.STRING(36), primaryKey: true, allowNull: false, field: 'uuid' diff --git a/src/data/providers/mysql.js b/src/data/providers/mysql.js index 8cf6e9cc..a899ea96 100644 --- a/src/data/providers/mysql.js +++ b/src/data/providers/mysql.js @@ -21,6 +21,20 @@ class MySqlDatabaseProvider extends DatabaseProvider { connectTimeout: 10000 } + // Configure SSL if enabled + const useSSL = process.env.DB_USE_SSL === 'true' || mysqlConfig.useSsl === true + if (useSSL) { + const caBase64 = process.env.DB_SSL_CA_B64 + const sslOptions = caBase64 + ? { + ca: Buffer.from(caBase64, 'base64').toString('utf-8'), + rejectUnauthorized: true + } + : { rejectUnauthorized: false } + + connectionOptions.ssl = sslOptions + } + // Sequelize configuration const sequelizeConfig = { dialect: 'mysql', @@ -35,6 +49,17 @@ class MySqlDatabaseProvider extends DatabaseProvider { logging: false } + // Add SSL configuration to Sequelize if enabled + if (useSSL) { + const caBase64 = process.env.DB_SSL_CA_B64 + sequelizeConfig.dialectOptions.ssl = caBase64 + ? { + ca: Buffer.from(caBase64, 'base64').toString('utf-8'), + rejectUnauthorized: true + } + : { rejectUnauthorized: false } + } + this.sequelize = new Sequelize(sequelizeConfig) this.connectionOptions = connectionOptions } diff --git a/src/data/providers/postgres.js b/src/data/providers/postgres.js index f1e44ade..bfbe071c 100644 --- a/src/data/providers/postgres.js +++ b/src/data/providers/postgres.js @@ -21,6 +21,20 @@ class PostgresDatabaseProvider extends DatabaseProvider { connectTimeout: 10000 } + // Configure SSL if enabled + const useSSL = process.env.DB_USE_SSL === 'true' || postgresConfig.useSsl === true + if (useSSL) { + const caBase64 = process.env.DB_SSL_CA_B64 + const sslOptions = caBase64 + ? { + ca: Buffer.from(caBase64, 'base64').toString('utf-8'), + rejectUnauthorized: true + } + : { rejectUnauthorized: false } + + connectionOptions.ssl = sslOptions + } + // Sequelize configuration const sequelizeConfig = { dialect: 'postgres', @@ -34,6 +48,16 @@ class PostgresDatabaseProvider extends DatabaseProvider { }, logging: false } + // Add SSL configuration to Sequelize if enabled + if (useSSL) { + const caBase64 = process.env.DB_SSL_CA_B64 + sequelizeConfig.dialectOptions.ssl = caBase64 + ? { + ca: Buffer.from(caBase64, 'base64').toString('utf-8'), + rejectUnauthorized: true + } + : { rejectUnauthorized: false } + } this.sequelize = new Sequelize(sequelizeConfig) this.connectionOptions = connectionOptions @@ -50,7 +74,11 @@ class PostgresDatabaseProvider extends DatabaseProvider { // Database doesn't exist, try to create it logger.info('Database does not exist, attempting to create it...') const { database, ...connectionConfig } = this.connectionOptions - const pool = new Pool(connectionConfig) + // Connect to the default 'postgres' database to create the target database + const pool = new Pool({ + ...connectionConfig, + database: 'postgres' + }) try { await pool.query(`CREATE DATABASE "${database}"`) logger.info(`Database ${database} created successfully`) diff --git a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql index 62c66bc5..5675a835 100644 --- a/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql +++ b/src/data/seeders/mysql/db_seeder_mysql_v1.0.2.sql @@ -7,12 +7,11 @@ VALUES INSERT INTO `CatalogItems` (name, description, category, publisher, disk_required, ram_required, picture, config_example, is_public, registry_id) VALUES - ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -26,18 +25,16 @@ WHERE fog_type_id IS NULL; INSERT INTO `CatalogItemImages` (catalog_item_id, fog_type_id, container_image) VALUES - (1, 1, 'ghcr.io/datasance/nats:latest'), - (1, 2, 'ghcr.io/datasance/nats:latest'), + (1, 1, 'ghcr.io/datasance/router:latest'), + (1, 2, 'ghcr.io/datasance/router:latest'), (2, 1, 'ghcr.io/datasance/restblue:latest'), (2, 2, 'ghcr.io/datasance/restblue:latest'), (3, 1, 'ghcr.io/datasance/hal:latest'), (3, 2, 'ghcr.io/datasance/hal:latest'), - (4, 1, 'ghcr.io/datasance/edge-guard:latest'), - (4, 2, 'ghcr.io/datasance/edge-guard:latest'), - (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/node-debugger:latest'), - (6, 2, 'ghcr.io/datasance/node-debugger:latest'); + (4, 1, 'ghcr.io/datasance/node-debugger:latest'), + (4, 2, 'ghcr.io/datasance/node-debugger:latest'), + (5, 1, 'ghcr.io/datasance/nats:latest'), + (5, 2, 'ghcr.io/datasance/nats:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql index 19a9ae04..37a361f4 100644 --- a/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql +++ b/src/data/seeders/postgres/db_seeder_pg_v1.0.2.sql @@ -7,12 +7,11 @@ VALUES INSERT INTO "CatalogItems" (name, description, category, publisher, disk_required, ram_required, picture, config_example, is_public, registry_id) VALUES - ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1); INSERT INTO "FogTypes" (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -26,17 +25,15 @@ WHERE fog_type_id IS NULL; INSERT INTO "CatalogItemImages" (catalog_item_id, fog_type_id, container_image) VALUES - (1, 1, 'ghcr.io/datasance/nats:latest'), - (1, 2, 'ghcr.io/datasance/nats:latest'), + (1, 1, 'ghcr.io/datasance/router:latest'), + (1, 2, 'ghcr.io/datasance/router:latest'), (2, 1, 'ghcr.io/datasance/restblue:latest'), (2, 2, 'ghcr.io/datasance/restblue:latest'), (3, 1, 'ghcr.io/datasance/hal:latest'), (3, 2, 'ghcr.io/datasance/hal:latest'), - (4, 1, 'ghcr.io/datasance/edge-guard:latest'), - (4, 2, 'ghcr.io/datasance/edge-guard:latest'), - (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/node-debugger:latest'), - (6, 2, 'ghcr.io/datasance/node-debugger:latest'); + (4, 1, 'ghcr.io/datasance/node-debugger:latest'), + (4, 2, 'ghcr.io/datasance/node-debugger:latest'), + (5, 1, 'ghcr.io/datasance/nats:latest'), + (5, 2, 'ghcr.io/datasance/nats:latest'); COMMIT; \ No newline at end of file diff --git a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql index da57229f..d3e6e162 100644 --- a/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql +++ b/src/data/seeders/sqlite/db_seeder_sqlite_v1.0.2.sql @@ -5,12 +5,11 @@ VALUES INSERT INTO `CatalogItems` (name, description, category, publisher, disk_required, ram_required, picture, config_example, is_public, registry_id) VALUES - ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), + ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), ('RESTBlue', 'REST API for Bluetooth Low Energy layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), ('HAL', 'REST API for Hardware Abstraction layer.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('EdgeGuard', 'Security and monitoring component for edge devices running ioFog Agents.', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1), - ('Router', 'The built-in router for Datasance PoT.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), - ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1); + ('Debug', 'The built-in debugger for Datasance PoT IoFog Agent.', 'SYSTEM', 'Datasance', 0, 0, 'none.png', NULL, false, 1), + ('NATs', 'NATs server microservice for Datasance PoT', 'UTILITIES', 'Datasance', 0, 0, 'none.png', NULL, true, 1); INSERT INTO `FogTypes` (id, name, image, description, network_catalog_item_id, hal_catalog_item_id, bluetooth_catalog_item_id) VALUES @@ -24,15 +23,13 @@ WHERE fog_type_id IS NULL; INSERT INTO `CatalogItemImages` (catalog_item_id, fog_type_id, container_image) VALUES - (1, 1, 'ghcr.io/datasance/nats:latest'), - (1, 2, 'ghcr.io/datasance/nats:latest'), + (1, 1, 'ghcr.io/datasance/router:latest'), + (1, 2, 'ghcr.io/datasance/router:latest'), (2, 1, 'ghcr.io/datasance/restblue:latest'), (2, 2, 'ghcr.io/datasance/restblue:latest'), (3, 1, 'ghcr.io/datasance/hal:latest'), (3, 2, 'ghcr.io/datasance/hal:latest'), - (4, 1, 'ghcr.io/datasance/edge-guard:latest'), - (4, 2, 'ghcr.io/datasance/edge-guard:latest'), - (5, 1, 'ghcr.io/datasance/router:latest'), - (5, 2, 'ghcr.io/datasance/router:latest'), - (6, 1, 'ghcr.io/datasance/node-debugger:latest'), - (6, 2, 'ghcr.io/datasance/node-debugger:latest'); + (4, 1, 'ghcr.io/datasance/node-debugger:latest'), + (4, 2, 'ghcr.io/datasance/node-debugger:latest'), + (5, 1, 'ghcr.io/datasance/nats:latest'), + (5, 2, 'ghcr.io/datasance/nats:latest'); diff --git a/src/schemas/config-map.js b/src/schemas/config-map.js index bccfa961..2376be9e 100644 --- a/src/schemas/config-map.js +++ b/src/schemas/config-map.js @@ -14,6 +14,7 @@ const configMapUpdate = { id: '/configMapUpdate', type: 'object', properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, immutable: { type: 'boolean' }, data: { type: 'object' } }, diff --git a/src/server.js b/src/server.js index 30ad6b03..25a7e387 100755 --- a/src/server.js +++ b/src/server.js @@ -205,6 +205,7 @@ initialize().then(() => { const apiPort = process.env.API_PORT || config.get('server.port') const viewerPort = process.env.VIEWER_PORT || config.get('viewer.port') const viewerURL = process.env.VIEWER_URL || config.get('viewer.url') + const controlPlane = process.env.CONTROL_PLANE || config.get('app.ControlPlane') // File-based SSL configuration const sslKey = process.env.SSL_PATH_KEY || config.get('server.ssl.path.key') @@ -259,6 +260,9 @@ initialize().then(() => { if (viewerURL) { ecnViewerControllerConfig.url = viewerURL } + if (controlPlane) { + ecnViewerControllerConfig.controlPlane = controlPlane + } const ecnViewerConfigScript = ` window.controllerConfig = ${JSON.stringify(ecnViewerControllerConfig)} ` diff --git a/src/services/agent-service.js b/src/services/agent-service.js index fa0ff1d8..74111edb 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -18,6 +18,7 @@ const formidable = require('formidable') const Sequelize = require('sequelize') const moment = require('moment') const Op = Sequelize.Op +const logger = require('../logger') const TransactionDecorator = require('../decorators/transaction-decorator') const FogProvisionKeyManager = require('../data/managers/iofog-provision-key-manager') @@ -647,11 +648,20 @@ const getControllerCA = async function (fog, transaction) { if (hasFileBasedSSL) { try { if (intermedKey) { - const certData = fs.readFileSync(intermedKey) - return Buffer.from(certData).toString('base64') + // Check if intermediate certificate file exists before trying to read it + if (fs.existsSync(intermedKey)) { + const certData = fs.readFileSync(intermedKey, 'utf8') + return Buffer.from(certData).toString('base64') + } else { + // Intermediate certificate file doesn't exist, don't provide any CA cert + // Let the system's default trust store handle validation + logger.info(`Intermediate certificate file not found at path: ${intermedKey}, not providing CA certificate`) + return '' + } } else { - const certData = fs.readFileSync(sslCert) - return Buffer.from(certData).toString('base64') + // No intermediate certificate path provided, don't provide any CA cert + // Let the system's default trust store handle validation + return '' } } catch (error) { throw new Errors.ValidationError('Failed to read SSL certificate file') @@ -661,8 +671,10 @@ const getControllerCA = async function (fog, transaction) { if (hasBase64SSL) { if (intermedKeyBase64) { return intermedKeyBase64 - } else if (sslCertBase64) { - return sslCertBase64 + } else { + // No intermediate certificate base64 provided, don't provide any CA cert + // Let the system's default trust store handle validation + return '' } } diff --git a/src/services/catalog-service.js b/src/services/catalog-service.js index ae0be7db..8350c19c 100644 --- a/src/services/catalog-service.js +++ b/src/services/catalog-service.js @@ -136,7 +136,7 @@ const deleteCatalogItemEndPoint = async function (id, isCLI, transaction) { return affectedRows } -async function getNetworkCatalogItem (transaction) { +async function getNatsCatalogItem (transaction) { return CatalogItemManager.findOne({ name: 'NATs', category: 'UTILITIES', @@ -154,15 +154,6 @@ async function getRouterCatalogItem (transaction) { }, transaction) } -async function getProxyCatalogItem (transaction) { - return CatalogItemManager.findOne({ - name: DBConstants.PROXY_CATALOG_NAME, - category: 'SYSTEM', - publisher: 'Datasance', - registry_id: 1 - }, transaction) -} - async function getDebugCatalogItem (transaction) { return CatalogItemManager.findOne({ name: DBConstants.DEBUG_CATALOG_NAME, @@ -386,10 +377,9 @@ module.exports = { updateCatalogItemEndPoint: TransactionDecorator.generateTransaction(updateCatalogItemEndPoint), getCatalogItem: getCatalogItem, getSystemCatalogItem: getSystemCatalogItem, - getNetworkCatalogItem: getNetworkCatalogItem, + getNatsCatalogItem: getNatsCatalogItem, getBluetoothCatalogItem: getBluetoothCatalogItem, getHalCatalogItem: getHalCatalogItem, getRouterCatalogItem: getRouterCatalogItem, - getDebugCatalogItem: getDebugCatalogItem, - getProxyCatalogItem: getProxyCatalogItem + getDebugCatalogItem: getDebugCatalogItem } diff --git a/src/services/iofog-key-service.js b/src/services/iofog-key-service.js index 242c4597..d34927c9 100644 --- a/src/services/iofog-key-service.js +++ b/src/services/iofog-key-service.js @@ -12,9 +12,9 @@ */ const crypto = require('crypto') -const AppHelper = require('../helpers/app-helper') const FogPublicKeyManager = require('../data/managers/iofog-public-key-manager') const FogUsedTokenManager = require('../data/managers/fog-used-token-manager') +const SecretHelper = require('../helpers/secret-helper') const jose = require('jose') /** @@ -47,8 +47,8 @@ const generateKeyPair = async function (transaction) { * @returns {Promise} Promise resolving to the stored public key */ const storePublicKey = async function (fogUuid, publicKey, transaction) { - // Encrypt the public key using the fog UUID as salt - const encryptedPublicKey = AppHelper.encryptText(publicKey, fogUuid) + // Encrypt the public key using SecretHelper for better security and database compatibility + const encryptedPublicKey = await SecretHelper.encryptSecret(publicKey, fogUuid) // Store the encrypted public key return FogPublicKeyManager.updateOrCreate(fogUuid, encryptedPublicKey, transaction) @@ -68,8 +68,8 @@ const getPublicKey = async function (fogUuid, transaction) { return null } - // Decrypt the public key using the fog UUID as salt - return AppHelper.decryptText(fogPublicKey.publicKey, fogUuid) + // Decrypt the public key using SecretHelper for better security and database compatibility + return SecretHelper.decryptSecret(fogPublicKey.publicKey, fogUuid) } /** diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index 2c8853cd..ce9bac21 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -56,7 +56,7 @@ async function checkKubernetesEnvironment () { async function getLocalCertificateHosts (isKubernetes, namespace) { if (isKubernetes) { - return `router-local,router-local.${namespace},router-local.${namespace}.svc.cluster.local` + return `router-local,router-local.${namespace},router-local.${namespace}.svc.cluster.local,127.0.0.1,localhost,host.docker.internal,host.containers.internal` } return '127.0.0.1,localhost,host.docker.internal,host.containers.internal' } @@ -239,14 +239,13 @@ async function _handleRouterCertificates (fogData, uuid, isRouterModeChanged, tr async function createFogEndPoint (fogData, isCLI, transaction) { await Validator.validate(fogData, Validator.schemas.iofogCreate) - let createFogData = { uuid: AppHelper.generateUUID(), name: fogData.name, location: fogData.location, latitude: fogData.latitude, longitude: fogData.longitude, - gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, + // gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, description: fogData.description, networkInterface: fogData.networkInterface, dockerUrl: fogData.dockerUrl, @@ -275,6 +274,15 @@ async function createFogEndPoint (fogData, isCLI, transaction) { timeZone: fogData.timeZone } + if ((fogData.latitude || fogData.longitude) && fogData.gpsMode !== 'dynamic') { + createFogData.gpsMode = 'manual' + } else if (fogData.gpsMode === 'dynamic' && fogData.gpsDevice) { + createFogData.gpsMode = fogData.gpsMode + createFogData.gpsDevice = fogData.gpsDevice + } else { + createFogData.gpsMode = undefined + } + createFogData = AppHelper.deleteUndefinedFields(createFogData) // Default router is edge @@ -363,7 +371,7 @@ async function createFogEndPoint (fogData, isCLI, transaction) { // Set fog node as healthy await FogManager.update({ uuid: fog.uuid }, { warningMessage: 'HEALTHY' }, transaction) } catch (err) { - logger.error('Background orchestration failed in createFogEndPoint:', err) + logger.error('Background orchestration failed in createFogEndPoint: ' + err.message) // Set fog node as warning with error message await FogManager.update( { uuid: fog.uuid }, @@ -404,7 +412,7 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { location: fogData.location, latitude: fogData.latitude, longitude: fogData.longitude, - gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, + // gpsMode: fogData.latitude || fogData.longitude ? 'manual' : undefined, description: fogData.description, networkInterface: fogData.networkInterface, dockerUrl: fogData.dockerUrl, @@ -431,6 +439,15 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { availableDiskThreshold: fogData.availableDiskThreshold, timeZone: fogData.timeZone } + + if ((fogData.latitude || fogData.longitude) && fogData.gpsMode !== 'dynamic') { + updateFogData.gpsMode = 'manual' + } else if (fogData.gpsMode === 'dynamic' && fogData.gpsDevice) { + updateFogData.gpsMode = fogData.gpsMode + updateFogData.gpsDevice = fogData.gpsDevice + } else { + updateFogData.gpsMode = undefined + } updateFogData = AppHelper.deleteUndefinedFields(updateFogData) const oldFog = await FogManager.findOne(queryFogData, transaction) @@ -486,7 +503,8 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { (async () => { try { // --- Begin orchestration logic --- - await _handleRouterCertificates(fogData, fogData.uuid, isRouterModeChanged, transaction) + const fog = await FogManager.findOne({ uuid: fogData.uuid }, transaction) + await _handleRouterCertificates(fogData, fog.uuid, isRouterModeChanged, transaction) if (routerMode === 'none') { networkRouter = await RouterService.getNetworkRouter(fogData.networkRouter) @@ -597,7 +615,7 @@ async function updateFogEndPoint (fogData, isCLI, transaction) { // Set fog node as healthy await FogManager.update({ uuid: fogData.uuid }, { warningMessage: 'HEALTHY' }, transaction) } catch (err) { - logger.error('Background orchestration failed in updateFogEndPoint:', err) + logger.error('Background orchestration failed in updateFogEndPoint: ' + err.message) await FogManager.update( { uuid: fogData.uuid }, { @@ -884,11 +902,20 @@ async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { if (hasFileBasedSSL) { try { if (intermedKey) { - const certData = fs.readFileSync(intermedKey) - caCert = Buffer.from(certData).toString('base64') + // Check if intermediate certificate file exists before trying to read it + if (fs.existsSync(intermedKey)) { + const certData = fs.readFileSync(intermedKey) + caCert = Buffer.from(certData).toString('base64') + } else { + // Intermediate certificate file doesn't exist, don't provide any CA cert + // Let the system's default trust store handle validation + logger.info(`Intermediate certificate file not found at path: ${intermedKey}, not providing CA certificate`) + caCert = '' + } } else { - const certData = fs.readFileSync(sslCert) - caCert = Buffer.from(certData).toString('base64') + // No intermediate certificate path provided, don't provide any CA cert + // Let the system's default trust store handle validation + caCert = '' } } catch (error) { throw new Errors.ValidationError('Failed to read SSL certificate file') @@ -897,8 +924,10 @@ async function generateProvisioningKeyEndPoint (fogData, isCLI, transaction) { if (hasBase64SSL) { if (intermedKeyBase64) { caCert = intermedKeyBase64 - } else if (sslCertBase64) { - caCert = sslCertBase64 + } else { + // No intermediate certificate base64 provided, don't provide any CA cert + // Let the system's default trust store handle validation + caCert = '' } } } @@ -1010,7 +1039,7 @@ async function _processDeleteCommand (fog, transaction) { for (const microservice of microservices) { await MicroserviceService.deleteMicroserviceWithRoutesAndPortMappings(microservice, transaction) } - + await ApplicationManager.delete({ name: `system-${fog.uuid.toLowerCase()}` }, transaction) await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.deleteNode, transaction) await FogManager.delete({ uuid: fog.uuid }, transaction) } diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index 0e13993e..d7e9f1d8 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -1573,16 +1573,21 @@ async function _validateApplication (name, isCLI, transaction) { const application = await ApplicationManager.findOne(where, transaction) if (!application) { - // Try with id - const where = isCLI - ? { id: name, isSystem: false } - : { id: name, isSystem: false } - - const application = await ApplicationManager.findOne(where, transaction) - if (!application) { + // Try with id - but only if name is actually a valid integer + if (Number.isInteger(Number(name)) && !isNaN(name)) { + const where = isCLI + ? { id: parseInt(name), isSystem: false } + : { id: parseInt(name), isSystem: false } + + const application = await ApplicationManager.findOne(where, transaction) + if (!application) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, name)) + } + return application + } else { + // If name is not a valid integer, it's not a valid ID either throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, name)) } - return application } return application } @@ -1599,16 +1604,21 @@ async function _validateSystemApplication (name, isCLI, transaction) { const application = await ApplicationManager.findOne(where, transaction) if (!application) { - // Try with id - const where = isCLI - ? { id: name, isSystem: true } - : { id: name, isSystem: true } - - const application = await ApplicationManager.findOne(where, transaction) - if (!application) { + // Try with id - but only if name is actually a valid integer + if (Number.isInteger(Number(name)) && !isNaN(name)) { + const where = isCLI + ? { id: parseInt(name), isSystem: true } + : { id: parseInt(name), isSystem: true } + + const application = await ApplicationManager.findOne(where, transaction) + if (!application) { + throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, name)) + } + return application + } else { + // If name is not a valid integer, it's not a valid ID either throw new Errors.NotFoundError(AppHelper.formatMessage(ErrorMessages.INVALID_FLOW_ID, name)) } - return application } return application } diff --git a/src/services/router-service.js b/src/services/router-service.js index 07daa12d..8286c138 100644 --- a/src/services/router-service.js +++ b/src/services/router-service.js @@ -330,9 +330,10 @@ async function _createRouterMicroservice (isEdge, uuid, microserviceConfig, tran const capAddValues = [ { capAdd: 'NET_RAW' } ] - - await ApplicationManager.create(routerApplicationData, transaction) - const application = await ApplicationManager.findOne({ name: routerApplicationData.name }, transaction) + let application = await ApplicationManager.findOne({ name: routerApplicationData.name }, transaction) + if (!application) { + application = await ApplicationManager.create(routerApplicationData, transaction) + } routerMicroserviceData.applicationId = application.id const routerMicroservice = await MicroserviceManager.create(routerMicroserviceData, transaction) await MicroserviceStatusManager.create({ microserviceUuid: routerMicroserviceData.uuid }, transaction) diff --git a/src/services/services-service.js b/src/services/services-service.js index 3518d3aa..8503fe4d 100644 --- a/src/services/services-service.js +++ b/src/services/services-service.js @@ -658,8 +658,15 @@ async function _deleteTcpConnector (serviceName, transaction) { if (service.type === 'microservice') { microserviceSource = await MicroserviceManager.findOne({ uuid: service.resource }, transaction) } + let fogSource = null + if (service.type === 'agent') { + fogSource = await FogManager.findOne({ uuid: service.resource }, transaction) + if (!fogSource) { + fogSource = await FogManager.findOne({ name: service.resource }, transaction) + } + } - if (isDefaultRouter && !microserviceSource) { + if (isDefaultRouter && (!microserviceSource || !fogSource)) { if (isK8s) { // Update K8s router config const configMap = await K8sClient.getConfigMap(K8S_ROUTER_CONFIG_MAP) @@ -690,22 +697,26 @@ async function _deleteTcpConnector (serviceName, transaction) { await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) } - } else { - let fogNodeUuid = null - if (microserviceSource) { - fogNodeUuid = microserviceSource.iofogUuid - } else { - fogNodeUuid = service.defaultBridge // This is the actual fogNodeUuid for non-default router - } - const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) - const currentConfig = JSON.parse(routerMicroservice.config || '{}') + } - if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { - delete currentConfig.bridges.tcpConnectors[connectorName] - } + let fogNodeUuid = null + if (!isDefaultRouter && (!microserviceSource || !fogSource)) { + fogNodeUuid = service.defaultBridge + } + if (microserviceSource) { + fogNodeUuid = microserviceSource.iofogUuid + } + if (fogSource) { + fogNodeUuid = fogSource.uuid + } + const routerMicroservice = await _getRouterMicroservice(fogNodeUuid, transaction) + const currentConfig = JSON.parse(routerMicroservice.config || '{}') - await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) + if (currentConfig.bridges && currentConfig.bridges.tcpConnectors) { + delete currentConfig.bridges.tcpConnectors[connectorName] } + + await _updateRouterMicroserviceConfig(fogNodeUuid, currentConfig, transaction) } // Helper function to delete tcpListener from router config @@ -798,20 +809,18 @@ async function _createK8sService (serviceConfig, transaction) { name: serviceConfig.name, annotations: normalizedTags.reduce((acc, tag) => { const [key, value] = tag.split(':') - acc[key] = value || '' + acc[key] = (value || '').trim() return acc }, {}) }, spec: { type: serviceConfig.k8sType, selector: { - application: 'interior-router', - name: 'router', - 'skupper.io/component': 'router' + 'datasance.com/component': 'router' }, ports: [{ - port: parseInt(serviceConfig.bridgePort), - targetPort: parseInt(serviceConfig.servicePort), + targetPort: parseInt(serviceConfig.bridgePort), + port: parseInt(serviceConfig.servicePort), protocol: 'TCP' }] } @@ -841,7 +850,7 @@ async function _updateK8sService (serviceConfig, transaction) { metadata: { annotations: normalizedTags.reduce((acc, tag) => { const [key, value] = tag.split(':') - acc[key] = value || '' + acc[key] = (value || '').trim() return acc }, {}) }, diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js index 65896321..0aa8c9ae 100644 --- a/src/utils/k8s-client.js +++ b/src/utils/k8s-client.js @@ -209,10 +209,13 @@ async function watchLoadBalancerIP (serviceName, maxRetries = 10, retryInterval service.status.loadBalancer && service.status.loadBalancer.ingress && service.status.loadBalancer.ingress.length > 0) { - const ip = service.status.loadBalancer.ingress[0].ip - if (ip) { - logger.info(`Found LoadBalancer IP: ${ip} for service: ${serviceName}`) - return ip + const ingress = service.status.loadBalancer.ingress[0] + if (ingress.ip) { + logger.info(`Found LoadBalancer IP: ${ingress.ip} for service: ${serviceName}`) + return ingress.ip + } else if (ingress.hostname) { + logger.info(`Found LoadBalancer hostname: ${ingress.hostname} for service: ${serviceName}`) + return ingress.hostname } } logger.info(`Service ${serviceName} is LoadBalancer type but IP not yet assigned (attempt ${attempt + 1}/${maxRetries})`) From 2a2455570a7a315203e3c5a2831d90172b9be505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 5 Jul 2025 16:38:12 +0300 Subject: [PATCH 13/25] veiwer version updated, node's router secrets deletion added to node delete method --- package-lock.json | 12 ++++++------ package.json | 4 ++-- src/services/iofog-service.js | 15 +++++++++++++++ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/package-lock.json b/package-lock.json index 1f7e89d1..b2a6955e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta", + "version": "3.5.0-beta1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta", + "version": "3.5.0-beta1", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-alpha1", + "@datasance/ecn-viewer": "1.0.0-beta1", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-alpha1", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-alpha1.tgz", - "integrity": "sha512-gFOvOf9gZqSJyW32UxWAz/e5vLKak0uPx9+Lqp5CpP5pLETaMWmApiWQFaPKxVWhGVOwT38b7WoWo4W78SWzDQ==" + "version": "1.0.0-beta1", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta1.tgz", + "integrity": "sha512-ZCpiRgCwbdxvnMf83dIQn7AluitD00qeVyddYKgR/LSBrB+oXXTttVlr+CH7kpOPshKrNCAeASceLuTmKVuD8Q==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index cfa58995..a9699d77 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta", + "version": "3.5.0-beta1", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-alpha1", + "@datasance/ecn-viewer": "1.0.0-beta1", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/services/iofog-service.js b/src/services/iofog-service.js index ce9bac21..b088420a 100644 --- a/src/services/iofog-service.js +++ b/src/services/iofog-service.js @@ -44,6 +44,7 @@ const CertificateService = require('./certificate-service') const logger = require('../logger') const ServiceManager = require('../data/managers/service-manager') const FogStates = require('../enums/fog-state') +const SecretManager = require('../data/managers/secret-manager') const SITE_CA_CERT = 'pot-site-ca' const DEFAULT_ROUTER_LOCAL_CA = 'default-router-local-ca' @@ -1041,6 +1042,20 @@ async function _processDeleteCommand (fog, transaction) { } await ApplicationManager.delete({ name: `system-${fog.uuid.toLowerCase()}` }, transaction) await ChangeTrackingService.update(fog.uuid, ChangeTrackingService.events.deleteNode, transaction) + // Delete router-related secrets if they exist + const secretNames = [ + `${fog.uuid}-site-server`, + `${fog.uuid}-local-ca`, + `${fog.uuid}-local-server`, + `${fog.uuid}-local-agent` + ] + + for (const secretName of secretNames) { + const secret = await SecretManager.findOne({ name: secretName }, transaction) + if (secret) { + await SecretManager.delete({ name: secretName }, transaction) + } + } await FogManager.delete({ uuid: fog.uuid }, transaction) } From e184a589485e4c652970d628c2c14ecceb509b21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sun, 6 Jul 2025 23:16:18 +0300 Subject: [PATCH 14/25] viewer version updated and gpsstatus added to fog data model --- package-lock.json | 12 ++++++------ package.json | 4 ++-- .../migrations/mysql/db_migration_mysql_v1.0.2.sql | 2 ++ .../migrations/postgres/db_migration_pg_v1.0.2.sql | 2 ++ .../migrations/sqlite/db_migration_sqlite_v1.0.2.sql | 4 +++- src/data/models/fog.js | 4 ++++ src/schemas/agent.js | 3 ++- src/services/agent-service.js | 3 ++- 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/package-lock.json b/package-lock.json index b2a6955e..ec57a510 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta1", + "version": "3.5.0-beta2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta1", + "version": "3.5.0-beta2", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta1", + "@datasance/ecn-viewer": "1.0.0-beta2", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta1", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta1.tgz", - "integrity": "sha512-ZCpiRgCwbdxvnMf83dIQn7AluitD00qeVyddYKgR/LSBrB+oXXTttVlr+CH7kpOPshKrNCAeASceLuTmKVuD8Q==" + "version": "1.0.0-beta2", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta2.tgz", + "integrity": "sha512-SCwolkINC1ClpKn84DHVFxIDf4m4lj4+ePzJVHJwtcG1ZqbiDsaIs8Wu27kohXMm4a234gdj78Hkc8UmC+vOPA==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index a9699d77..4503db1a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta1", + "version": "3.5.0-beta2", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta1", + "@datasance/ecn-viewer": "1.0.0-beta2", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql index 528e829c..99c2c961 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql @@ -787,4 +787,6 @@ CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); +ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); + COMMIT; \ No newline at end of file diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql index d8367c48..bec76608 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql @@ -786,3 +786,5 @@ CREATE TABLE IF NOT EXISTS "MicroserviceExecStatuses" ( ); CREATE INDEX idx_microservice_exec_status_microservice_uuid ON "MicroserviceExecStatuses" (microservice_uuid); + +ALTER TABLE "Fogs" ADD COLUMN gps_status VARCHAR(32); \ No newline at end of file diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql index f5d2aee4..a8e45ccc 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql @@ -772,4 +772,6 @@ CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE ); -CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); \ No newline at end of file +CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); + +ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); \ No newline at end of file diff --git a/src/data/models/fog.js b/src/data/models/fog.js index 49c69961..7bd62e3f 100644 --- a/src/data/models/fog.js +++ b/src/data/models/fog.js @@ -356,6 +356,10 @@ module.exports = (sequelize, DataTypes) => { type: DataTypes.TEXT, field: 'warning_message', defaultValue: 'HEALTHY' + }, + gpsStatus: { + type: DataTypes.TEXT, + field: 'gps_status' } }, { tableName: 'Fogs', diff --git a/src/schemas/agent.js b/src/schemas/agent.js index cf4e6c9c..87c4e123 100644 --- a/src/schemas/agent.js +++ b/src/schemas/agent.js @@ -103,7 +103,8 @@ const updateAgentStatus = { 'tunnelStatus': { 'type': 'string' }, 'version': { 'type': 'string' }, 'isReadyToUpgrade': { 'type': 'boolean' }, - 'isReadyToRollback': { 'type': 'boolean' } + 'isReadyToRollback': { 'type': 'boolean' }, + 'gpsStatus': { 'type': 'string' } }, 'additionalProperties': true } diff --git a/src/services/agent-service.js b/src/services/agent-service.js index 74111edb..a1217b79 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -255,7 +255,8 @@ const updateAgentStatus = async function (agentStatus, fog, transaction) { isReadyToUpgrade: agentStatus.isReadyToUpgrade, isReadyToRollback: agentStatus.isReadyToRollback, activeVolumeMounts: agentStatus.activeVolumeMounts, - volumeMountLastUpdate: agentStatus.volumeMountLastUpdate + volumeMountLastUpdate: agentStatus.volumeMountLastUpdate, + gpsStatus: agentStatus.gpsStatus } fogStatus = AppHelper.deleteUndefinedFields(fogStatus) From bf18ab6b09a81b9649a1a97b3134232340fe3a00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 9 Jul 2025 03:46:30 +0300 Subject: [PATCH 15/25] microservice yaml parser fixed, secret update schema fixed --- package-lock.json | 12 ++--- package.json | 4 +- src/schemas/secret.js | 1 + src/services/yaml-parser-service.js | 70 ++++++++++++++++++++++++++++- 4 files changed, 78 insertions(+), 9 deletions(-) diff --git a/package-lock.json b/package-lock.json index ec57a510..19295bc0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta2", + "version": "3.5.0-beta3", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta2", + "version": "3.5.0-beta3", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta2", + "@datasance/ecn-viewer": "1.0.0-beta3", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta2", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta2.tgz", - "integrity": "sha512-SCwolkINC1ClpKn84DHVFxIDf4m4lj4+ePzJVHJwtcG1ZqbiDsaIs8Wu27kohXMm4a234gdj78Hkc8UmC+vOPA==" + "version": "1.0.0-beta3", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta3.tgz", + "integrity": "sha512-adYlBAgICw2MluooMhnv669vYXGX9nuliT00RkG45XJZ5TuC3Z+/ZrGUwAGz9Z4m74YS9xNB/qkw/1FSdf1B3w==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index 4503db1a..38c28474 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta2", + "version": "3.5.0-beta3", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta2", + "@datasance/ecn-viewer": "1.0.0-beta3", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/schemas/secret.js b/src/schemas/secret.js index af2ac259..68251328 100644 --- a/src/schemas/secret.js +++ b/src/schemas/secret.js @@ -14,6 +14,7 @@ const secretUpdate = { id: '/secretUpdate', type: 'object', properties: { + name: { type: 'string', minLength: 1, maxLength: 255 }, data: { type: 'object' } }, required: ['data'], diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index 93953fb6..de3b205c 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -258,6 +258,74 @@ const parseMicroserviceImages = async (fileImages) => { const parseMicroserviceYAML = async (microservice) => { const { registryId, catalogItemId, images } = await parseMicroserviceImages(microservice.images) const container = microservice.container || {} + + // Parse environment variables with support for value, valueFromSecret, and valueFromConfigMap + const parseEnvVariables = (envArray) => { + if (!envArray || !Array.isArray(envArray)) { + return [] + } + + return envArray.map(env => { + if (!env || typeof env !== 'object') { + throw new Errors.ValidationError('Invalid environment variable format') + } + + if (!env.key) { + throw new Errors.ValidationError('Environment variable must have a key') + } + + const envVar = { + key: env.key.toString() + } + + // Check that exactly one of value, valueFromSecret, or valueFromConfigMap is provided + const hasValue = env.hasOwnProperty('value') + const hasValueFromSecret = env.hasOwnProperty('valueFromSecret') + const hasValueFromConfigMap = env.hasOwnProperty('valueFromConfigMap') + + const valueCount = [hasValue, hasValueFromSecret, hasValueFromConfigMap].filter(Boolean).length + + if (valueCount === 0) { + throw new Errors.ValidationError(`Environment variable '${env.key}' must have either value, valueFromSecret, or valueFromConfigMap`) + } + + if (valueCount > 1) { + throw new Errors.ValidationError(`Environment variable '${env.key}' can only have one of: value, valueFromSecret, or valueFromConfigMap`) + } + + // Handle simple value + if (hasValue) { + envVar.value = env.value.toString() + } + + // Handle valueFromSecret + if (hasValueFromSecret) { + if (typeof env.valueFromSecret !== 'string') { + throw new Errors.ValidationError(`valueFromSecret for environment variable '${env.key}' must be a string`) + } + const parts = env.valueFromSecret.split('/') + if (parts.length !== 2 || !parts[0] || !parts[1]) { + throw new Errors.ValidationError(`valueFromSecret for environment variable '${env.key}' must be in format 'secret-name/key'`) + } + envVar.valueFromSecret = env.valueFromSecret + } + + // Handle valueFromConfigMap + if (hasValueFromConfigMap) { + if (typeof env.valueFromConfigMap !== 'string') { + throw new Errors.ValidationError(`valueFromConfigMap for environment variable '${env.key}' must be a string`) + } + const parts = env.valueFromConfigMap.split('/') + if (parts.length !== 2 || !parts[0] || !parts[1]) { + throw new Errors.ValidationError(`valueFromConfigMap for environment variable '${env.key}' must be in format 'configmap-name/key'`) + } + envVar.valueFromConfigMap = env.valueFromConfigMap + } + + return envVar + }) + } + const microserviceData = { config: microservice.config != null ? JSON.stringify(microservice.config) : undefined, name: microservice.name, @@ -274,7 +342,7 @@ const parseMicroserviceYAML = async (microservice) => { ports: (lget(microservice, 'container.ports', [])), volumeMappings: lget(microservice, 'container.volumes', []), cmd: lget(microservice, 'container.commands', []), - env: (lget(microservice, 'container.env', [])).map(e => ({ key: e.key.toString(), value: e.value.toString() })), + env: parseEnvVariables(lget(microservice, 'container.env', [])), images, extraHosts: lget(microservice, 'container.extraHosts', []), ...microservice.msRoutes, From 3822caa3bf332ef86314d1e0ce3355b0f58a0c2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 12 Jul 2025 17:23:01 +0300 Subject: [PATCH 16/25] microservice healchecki cpuSetCpus, momoryLimit configuration and healthstatus resposnse added --- package-lock.json | 4 +- package.json | 2 +- .../microservice-healthcheck-manager.js | 37 ++++++ src/data/managers/microservice-manager.js | 19 +++ ....0.2.sql => db_migration_mysql_v1.0.3.sql} | 21 +++ ..._v1.0.2.sql => db_migration_pg_v1.0.3.sql} | 23 +++- ...0.2.sql => db_migration_sqlite_v1.0.3.sql} | 23 +++- src/data/models/microservice.js | 17 +++ src/data/models/microserviceHealthCheck.js | 52 ++++++++ src/data/models/microservicestatus.js | 5 + src/data/providers/database-provider.js | 12 +- src/schemas/agent.js | 1 + src/schemas/microservice.js | 34 ++++- src/services/agent-service.js | 38 ++++++ src/services/certificate-service.js | 2 +- src/services/microservices-service.js | 120 +++++++++++++++++- src/services/yaml-parser-service.js | 3 + 17 files changed, 397 insertions(+), 16 deletions(-) create mode 100644 src/data/managers/microservice-healthcheck-manager.js rename src/data/migrations/mysql/{db_migration_mysql_v1.0.2.sql => db_migration_mysql_v1.0.3.sql} (97%) rename src/data/migrations/postgres/{db_migration_pg_v1.0.2.sql => db_migration_pg_v1.0.3.sql} (97%) rename src/data/migrations/sqlite/{db_migration_sqlite_v1.0.2.sql => db_migration_sqlite_v1.0.3.sql} (97%) create mode 100644 src/data/models/microserviceHealthCheck.js diff --git a/package-lock.json b/package-lock.json index 19295bc0..0b67c0b4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta3", + "version": "3.5.0-beta4", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta3", + "version": "3.5.0-beta4", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { diff --git a/package.json b/package.json index 38c28474..0726c49d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta3", + "version": "3.5.0-beta4", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", diff --git a/src/data/managers/microservice-healthcheck-manager.js b/src/data/managers/microservice-healthcheck-manager.js new file mode 100644 index 00000000..9d905bcd --- /dev/null +++ b/src/data/managers/microservice-healthcheck-manager.js @@ -0,0 +1,37 @@ +/* + * ******************************************************************************* + * * Copyright (c) 2023 Datasance Teknoloji A.S. + * * + * * This program and the accompanying materials are made available under the + * * terms of the Eclipse Public License v. 2.0 which is available at + * * http://www.eclipse.org/legal/epl-2.0 + * * + * * SPDX-License-Identifier: EPL-2.0 + * ******************************************************************************* + * + */ + +const BaseManager = require('./base-manager') +const models = require('../models') +const MicroserviceHealthCheck = models.MicroserviceHealthCheck + +const microserviceHealthCheckExcludedFields = [ + 'id', + 'microservice_uuid', + 'microserviceUuid', + 'created_at', + 'updated_at' +] + +class MicroserviceHealthCheckManager extends BaseManager { + getEntity () { + return MicroserviceHealthCheck + } + + findAllExcludeFields (where, transaction) { + return this.findAllWithAttributes(where, { exclude: microserviceHealthCheckExcludedFields }, transaction) + } +} + +const instance = new MicroserviceHealthCheckManager() +module.exports = instance diff --git a/src/data/managers/microservice-manager.js b/src/data/managers/microservice-manager.js index 6d361ef6..573aa7d2 100644 --- a/src/data/managers/microservice-manager.js +++ b/src/data/managers/microservice-manager.js @@ -31,6 +31,7 @@ const Application = models.Application const Routing = models.Routing const Registry = models.Registry const MicroserviceStatus = models.MicroserviceStatus +const MicroserviceHealthCheck = models.MicroserviceHealthCheck const Op = require('sequelize').Op const microserviceExcludedFields = [ @@ -146,6 +147,12 @@ class MicroserviceManager extends BaseManager { }], attributes: { exclude: ['id', 'source_microservice_uuid', 'sourceMicroserviceUuid', 'destMicroserviceUuid'] } + }, + { + model: MicroserviceHealthCheck, + as: 'healthCheck', + required: false, + attributes: ['test', 'interval', 'timeout', 'startPeriod', 'startInterval', 'retries'] } ], where: where, @@ -252,6 +259,12 @@ class MicroserviceManager extends BaseManager { as: 'subTags', attributes: ['value'], through: { attributes: [] } + }, + { + model: MicroserviceHealthCheck, + as: 'healthCheck', + required: false, + attributes: ['test', 'interval', 'timeout', 'startPeriod', 'startInterval', 'retries'] } ], where: { @@ -367,6 +380,12 @@ class MicroserviceManager extends BaseManager { }], attributes: { exclude: ['id', 'sourceMicroserviceUuid', 'destMicroserviceUuid'] } + }, + { + model: MicroserviceHealthCheck, + as: 'healthCheck', + required: false, + attributes: ['test', 'interval', 'timeout', 'startPeriod', 'startInterval', 'retries'] } ], where: where, diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql similarity index 97% rename from src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql rename to src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql index 99c2c961..ad7b3699 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.2.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql @@ -789,4 +789,25 @@ CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecS ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); +ALTER TABLE Microservices ADD COLUMN cpu_set_cpus TEXT; +ALTER TABLE Microservices ADD COLUMN memory_limit BIGINT; + +CREATE TABLE IF NOT EXISTS MicroserviceHealthChecks ( + id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, + test TEXT, + interval BIGINT, + timeout BIGINT, + start_period BIGINT, + start_interval BIGINT, + retries INT, + microservice_uuid VARCHAR(36), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_health_check_microservice_uuid ON MicroserviceHealthChecks (microservice_uuid); + +ALTER TABLE MicroserviceStatuses ADD COLUMN health_status TEXT; + COMMIT; \ No newline at end of file diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql similarity index 97% rename from src/data/migrations/postgres/db_migration_pg_v1.0.2.sql rename to src/data/migrations/postgres/db_migration_pg_v1.0.3.sql index bec76608..660fc03f 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.2.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql @@ -787,4 +787,25 @@ CREATE TABLE IF NOT EXISTS "MicroserviceExecStatuses" ( CREATE INDEX idx_microservice_exec_status_microservice_uuid ON "MicroserviceExecStatuses" (microservice_uuid); -ALTER TABLE "Fogs" ADD COLUMN gps_status VARCHAR(32); \ No newline at end of file +ALTER TABLE "Fogs" ADD COLUMN gps_status VARCHAR(32); + +ALTER TABLE "Microservices" ADD COLUMN cpu_set_cpus TEXT; +ALTER TABLE "Microservices" ADD COLUMN memory_limit BIGINT; + +CREATE TABLE IF NOT EXISTS "MicroserviceHealthChecks" ( + id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, + test TEXT, + interval BIGINT, + timeout BIGINT, + start_period BIGINT, + start_interval BIGINT, + retries INT, + microservice_uuid VARCHAR(36), + created_at TIMESTAMP(0), + updated_at TIMESTAMP(0), + FOREIGN KEY (microservice_uuid) REFERENCES "Microservices" (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_health_check_microservice_uuid ON "MicroserviceHealthChecks" (microservice_uuid); + +ALTER TABLE "MicroserviceStatuses" ADD COLUMN health_status TEXT; \ No newline at end of file diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql similarity index 97% rename from src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql rename to src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql index a8e45ccc..0240c0ea 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.2.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql @@ -774,4 +774,25 @@ CREATE TABLE IF NOT EXISTS MicroserviceExecStatuses ( CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecStatuses (microservice_uuid); -ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); \ No newline at end of file +ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); + +ALTER TABLE Microservices ADD COLUMN cpu_set_cpus TEXT; +ALTER TABLE Microservices ADD COLUMN memory_limit BIGINT; + +CREATE TABLE IF NOT EXISTS MicroserviceHealthChecks ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + test TEXT, + interval BIGINT, + timeout BIGINT, + start_period BIGINT, + start_interval BIGINT, + retries INT, + microservice_uuid VARCHAR(36), + created_at DATETIME, + updated_at DATETIME, + FOREIGN KEY (microservice_uuid) REFERENCES Microservices (uuid) ON DELETE CASCADE +); + +CREATE INDEX idx_microservice_health_check_microservice_uuid ON MicroserviceHealthChecks (microservice_uuid); + +ALTER TABLE MicroserviceStatuses ADD COLUMN health_status TEXT; \ No newline at end of file diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index c90e9871..4f090329 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -80,6 +80,18 @@ module.exports = (sequelize, DataTypes) => { field: 'schedule', defaultValue: 50 }, + cpuSetCpus: { + type: DataTypes.TEXT, + field: 'cpu_set_cpus', + defaultValue: '' + }, + memoryLimit: { + type: DataTypes.BIGINT, + get () { + return convertToInt(this.getDataValue('memoryLimit')) + }, + field: 'memory_limit' + }, imageSnapshot: { type: DataTypes.TEXT, field: 'image_snapshot', @@ -178,6 +190,11 @@ module.exports = (sequelize, DataTypes) => { as: 'microserviceExecStatus' }) + Microservice.hasOne(models.MicroserviceHealthCheck, { + foreignKey: 'microservice_uuid', + as: 'healthCheck' + }) + Microservice.hasMany(models.MicroserviceEnv, { foreignKey: 'microservice_uuid', as: 'env' diff --git a/src/data/models/microserviceHealthCheck.js b/src/data/models/microserviceHealthCheck.js new file mode 100644 index 00000000..6e319f51 --- /dev/null +++ b/src/data/models/microserviceHealthCheck.js @@ -0,0 +1,52 @@ +'use strict' +module.exports = (sequelize, DataTypes) => { + const MicroserviceHealthCheck = sequelize.define('MicroserviceHealthCheck', { + id: { + type: DataTypes.INTEGER, + primaryKey: true, + autoIncrement: true, + allowNull: false, + field: 'id' + }, + test: { + type: DataTypes.TEXT, + field: 'test' + }, + interval: { + type: DataTypes.BIGINT, + field: 'interval' + }, + timeout: { + type: DataTypes.BIGINT, + field: 'timeout' + }, + startPeriod: { + type: DataTypes.BIGINT, + field: 'start_period' + }, + startInterval: { + type: DataTypes.BIGINT, + field: 'start_interval' + }, + retries: { + type: DataTypes.INTEGER, + field: 'retries' + } + }, { + tableName: 'MicroserviceHealthChecks', + // add the timestamp attributes (updatedAt, createdAt) + timestamps: true, + underscored: true + }) + MicroserviceHealthCheck.associate = function (models) { + MicroserviceHealthCheck.belongsTo(models.Microservice, { + foreignKey: { + name: 'microserviceUuid', + field: 'microservice_uuid' + }, + as: 'microservice', + onDelete: 'cascade' + }) + } + return MicroserviceHealthCheck +} diff --git a/src/data/models/microservicestatus.js b/src/data/models/microservicestatus.js index 61a71b37..b9f7a3cb 100644 --- a/src/data/models/microservicestatus.js +++ b/src/data/models/microservicestatus.js @@ -66,6 +66,11 @@ module.exports = (sequelize, DataTypes) => { defaultValue: '', field: 'ip_address' }, + healthStatus: { + type: DataTypes.TEXT, + defaultValue: '', + field: 'health_status' + }, execSessionIds: { type: DataTypes.TEXT, defaultValue: '[]', diff --git a/src/data/providers/database-provider.js b/src/data/providers/database-provider.js index 2035eb1d..a3206db1 100644 --- a/src/data/providers/database-provider.js +++ b/src/data/providers/database-provider.js @@ -251,8 +251,8 @@ class DatabaseProvider { // SQLite migration async runMigrationSQLite (dbName) { - const migrationSqlPath = path.resolve(__dirname, '../migrations/sqlite/db_migration_sqlite_v1.0.2.sql') - const migrationVersion = '1.0.2' + const migrationSqlPath = path.resolve(__dirname, '../migrations/sqlite/db_migration_sqlite_v1.0.3.sql') + const migrationVersion = '1.0.3' if (!fs.existsSync(migrationSqlPath)) { logger.error(`Migration file not found: ${migrationSqlPath}`) @@ -324,8 +324,8 @@ class DatabaseProvider { // MySQL migration async runMigrationMySQL (db) { - const migrationSqlPath = path.resolve(__dirname, '../migrations/mysql/db_migration_mysql_v1.0.2.sql') - const migrationVersion = '1.0.2' + const migrationSqlPath = path.resolve(__dirname, '../migrations/mysql/db_migration_mysql_v1.0.3.sql') + const migrationVersion = '1.0.3' if (!fs.existsSync(migrationSqlPath)) { logger.error(`Migration file not found: ${migrationSqlPath}`) @@ -385,8 +385,8 @@ class DatabaseProvider { // PostgreSQL migration async runMigrationPostgres (db) { - const migrationSqlPath = path.resolve(__dirname, '../migrations/postgres/db_migration_pg_v1.0.2.sql') - const migrationVersion = '1.0.2' + const migrationSqlPath = path.resolve(__dirname, '../migrations/postgres/db_migration_pg_v1.0.3.sql') + const migrationVersion = '1.0.3' if (!fs.existsSync(migrationSqlPath)) { logger.error(`Migration file not found: ${migrationSqlPath}`) diff --git a/src/schemas/agent.js b/src/schemas/agent.js index 87c4e123..c41bde55 100644 --- a/src/schemas/agent.js +++ b/src/schemas/agent.js @@ -140,6 +140,7 @@ const microserviceStatus = { 'id': { 'type': 'string' }, 'containerId': { 'type': 'string' }, 'status': { 'type': 'string' }, + 'healthStatus': { 'type': 'string' }, 'startTime': { 'type': 'integer' }, 'operatingDuration': { 'type': 'integer' }, 'cpuUsage': { 'type': 'number' }, diff --git a/src/schemas/microservice.js b/src/schemas/microservice.js index 44aacc3d..4ec80352 100644 --- a/src/schemas/microservice.js +++ b/src/schemas/microservice.js @@ -68,6 +68,8 @@ const microserviceCreate = { 'runAsUser': { 'type': 'string' }, 'platform': { 'type': 'string' }, 'runtime': { 'type': 'string' }, + 'cpuSetCpus': { 'type': 'string' }, + 'memoryLimit': { 'type': 'integer' }, 'pubTags': { 'type': 'array', 'items': { 'type': 'string' } @@ -75,6 +77,10 @@ const microserviceCreate = { 'subTags': { 'type': 'array', 'items': { 'type': 'string' } + }, + 'healthCheck': { + 'type': 'object', + 'properties': { '$ref': '/microserviceHealthCheck' } } }, 'required': ['name'], @@ -135,6 +141,8 @@ const microserviceUpdate = { 'runAsUser': { 'type': 'string' }, 'platform': { 'type': 'string' }, 'runtime': { 'type': 'string' }, + 'cpuSetCpus': { 'type': 'string' }, + 'memoryLimit': { 'type': 'integer' }, 'pubTags': { 'type': 'array', 'items': { 'type': 'string' } @@ -142,6 +150,10 @@ const microserviceUpdate = { 'subTags': { 'type': 'array', 'items': { 'type': 'string' } + }, + 'healthCheck': { + 'type': 'object', + 'properties': { '$ref': '/microserviceHealthCheck' } } }, 'additionalProperties': true @@ -230,7 +242,25 @@ const volumeMappings = { 'additionalProperties': true } +const microserviceHealthCheck = { + + 'id': '/microserviceHealthCheck', + 'type': 'object', + 'properties': { + 'test': { + 'type': 'array', + 'items': { 'type': 'string' } + }, + 'interval': { 'type': 'integer' }, + 'timeout': { 'type': 'integer' }, + 'startPeriod': { 'type': 'integer' }, + 'startInterval': { 'type': 'integer' }, + 'retries': { 'type': 'integer' } + }, + 'required': ['test'] +} + module.exports = { - mainSchemas: [microserviceCreate, microserviceUpdate, env, ports, extraHosts, portsCreate, microserviceDelete, volumeMappings], - innerSchemas: [volumeMappings, ports, env, extraHosts, microserviceCreate] + mainSchemas: [microserviceCreate, microserviceUpdate, env, ports, extraHosts, portsCreate, microserviceDelete, volumeMappings, microserviceHealthCheck], + innerSchemas: [volumeMappings, ports, env, extraHosts, microserviceCreate, microserviceHealthCheck] } diff --git a/src/services/agent-service.js b/src/services/agent-service.js index a1217b79..3c2e2927 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -292,6 +292,7 @@ const _updateMicroserviceStatuses = async function (microserviceStatus, fog, tra let microserviceStatus = { containerId: status.containerId, status: status.status, + healthStatus: status.healthStatus, startTime: status.startTime, operatingDuration: status.operatingDuration, cpuUsage: status.cpuUsage, @@ -349,6 +350,40 @@ const getAgentMicroservices = async function (fog, transaction) { const extraHosts = microservice.extraHosts ? microservice.extraHosts.map(_mapExtraHost) : [] + // Process health check data - handle both old and new formats + let healthCheck = null + + if (microservice.healthCheck) { + // Handle the test field - it could be already an array or a JSON string + let testData = microservice.healthCheck.test + if (testData && testData !== null && testData !== undefined && testData.length > 0) { + if (typeof testData === 'string') { + // It's a JSON string, try to parse it + try { + testData = JSON.parse(testData) + } catch (e) { + // If not valid JSON, treat as a single string command + testData = [testData] + } + } else if (!Array.isArray(testData)) { + // If it's not an array, convert to array + testData = [testData] + } + // If it's already an array, leave as is + } + + healthCheck = { + test: testData, + interval: microservice.healthCheck.interval, + timeout: microservice.healthCheck.timeout, + startPeriod: microservice.healthCheck.startPeriod, + startInterval: microservice.healthCheck.startInterval, + retries: microservice.healthCheck.retries + } + } else { + healthCheck = {} + } + const responseMicroservice = { uuid: microservice.uuid, imageId: imageId, @@ -356,6 +391,9 @@ const getAgentMicroservices = async function (fog, transaction) { annotations: microservice.annotations, rebuild: microservice.rebuild, rootHostAccess: microservice.rootHostAccess, + cpuSetCpus: microservice.cpuSetCpus, + memoryLimit: microservice.memoryLimit, + healthCheck: healthCheck, pidMode: microservice.pidMode, ipcMode: microservice.ipcMode, runAsUser: microservice.runAsUser, diff --git a/src/services/certificate-service.js b/src/services/certificate-service.js index 97bdc578..316062bc 100644 --- a/src/services/certificate-service.js +++ b/src/services/certificate-service.js @@ -446,7 +446,7 @@ async function renewCertificateEndpoint (name, transaction) { const secret = await SecretManager.findOne({ name, type: 'tls' }, transaction) if (secret) { isNewRecord = true - console.log(`Certificate record not found for ${name}, but secret exists. Will create new record.`) + // console.log(`Certificate record not found for ${name}, but secret exists. Will create new record.`) } else { throw new Errors.NotFoundError(`Certificate with name ${name} not found`) } diff --git a/src/services/microservices-service.js b/src/services/microservices-service.js index d7e9f1d8..173d7bb2 100644 --- a/src/services/microservices-service.js +++ b/src/services/microservices-service.js @@ -21,6 +21,7 @@ const MicroserviceCapAddManager = require('../data/managers/microservice-cap-add const MicroserviceCapDropManager = require('../data/managers/microservice-cap-drop-manager') const MicroserviceEnvManager = require('../data/managers/microservice-env-manager') const MicroservicePortService = require('../services/microservice-ports/microservice-port') +const MicroserviceHealthCheckManager = require('../data/managers/microservice-healthcheck-manager') const CatalogItemImageManager = require('../data/managers/catalog-item-image-manager') const RegistryManager = require('../data/managers/registry-manager') // const RouterManager = require('../data/managers/router-manager') @@ -377,6 +378,13 @@ async function createMicroserviceEndPoint (microserviceData, isCLI, transaction) await _createCdiDevices(microservice, cdiDevices, transaction) } } + if (microserviceData.healthCheck) { + const healthCheckData = { + microserviceUuid: microservice.uuid, + ..._processHealthCheckForDB(microserviceData.healthCheck) + } + await MicroserviceHealthCheckManager.create(healthCheckData, transaction) + } if (microserviceData.capAdd) { for (const capAdd of microserviceData.capAdd) { await _createCapAdd(microservice, capAdd, transaction) @@ -512,6 +520,8 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + cpuSetCpus: microserviceData.cpuSetCpus, + memoryLimit: microserviceData.memoryLimit, schedule: microserviceData.schedule, pidMode: microserviceData.pidMode, ipcMode: microserviceData.ipcMode, @@ -526,7 +536,8 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD volumeMappings: microserviceData.volumeMappings, env: microserviceData.env, cmd: microserviceData.cmd, - ports: microserviceData.ports + ports: microserviceData.ports, + healthCheck: microserviceData.healthCheck } const microserviceDataUpdate = AppHelper.deleteUndefinedFields(microserviceToUpdate) @@ -632,6 +643,8 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD (microserviceDataUpdate.rootHostAccess !== undefined && microservice.rootHostAccess !== microserviceDataUpdate.rootHostAccess) || microserviceDataUpdate.pidMode || microserviceDataUpdate.ipcMode || + microserviceDataUpdate.cpuSetCpus || + microserviceDataUpdate.memoryLimit || microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || @@ -675,6 +688,19 @@ async function updateSystemMicroserviceEndPoint (microserviceUuid, microserviceD await _updateCapAdd(microserviceDataUpdate.capAdd, microserviceUuid, transaction) } + if (microserviceDataUpdate.healthCheck) { + await MicroserviceHealthCheckManager.delete({ + microserviceUuid: microservice.uuid + }, transaction) + const healthCheckData = { + microserviceUuid: microservice.uuid, + ..._processHealthCheckForDB(microserviceDataUpdate.healthCheck) + } + if (healthCheckData.test && healthCheckData.test.length > 0) { + await MicroserviceHealthCheckManager.create(healthCheckData, transaction) + } + } + if (microserviceDataUpdate.capDrop) { await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) } @@ -737,6 +763,8 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i rebuild: microserviceData.rebuild, iofogUuid: newFog.uuid, rootHostAccess: microserviceData.rootHostAccess, + cpuSetCpus: microserviceData.cpuSetCpus, + memoryLimit: microserviceData.memoryLimit, schedule: microserviceData.schedule, pidMode: microserviceData.pidMode, ipcMode: microserviceData.ipcMode, @@ -751,7 +779,8 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i volumeMappings: microserviceData.volumeMappings, env: microserviceData.env, cmd: microserviceData.cmd, - ports: microserviceData.ports + ports: microserviceData.ports, + healthCheck: microserviceData.healthCheck } const microserviceDataUpdate = AppHelper.deleteUndefinedFields(microserviceToUpdate) @@ -861,6 +890,8 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i (microserviceDataUpdate.rootHostAccess !== undefined && microservice.rootHostAccess !== microserviceDataUpdate.rootHostAccess) || microserviceDataUpdate.pidMode || microserviceDataUpdate.ipcMode || + microserviceDataUpdate.cpuSetCpus || + microserviceDataUpdate.memoryLimit || microserviceDataUpdate.env || microserviceDataUpdate.cmd || microserviceDataUpdate.cdiDevices || @@ -904,6 +935,19 @@ async function updateMicroserviceEndPoint (microserviceUuid, microserviceData, i await _updateCapAdd(microserviceDataUpdate.capAdd, microserviceUuid, transaction) } + if (microserviceDataUpdate.healthCheck) { + await MicroserviceHealthCheckManager.delete({ + microserviceUuid: microservice.uuid + }, transaction) + const healthCheckData = { + microserviceUuid: microservice.uuid, + ..._processHealthCheckForDB(microserviceDataUpdate.healthCheck) + } + if (healthCheckData.test && healthCheckData.test.length > 0) { + await MicroserviceHealthCheckManager.create(healthCheckData, transaction) + } + } + if (microserviceDataUpdate.capDrop) { await _updateCapDrop(microserviceDataUpdate.capDrop, microserviceUuid, transaction) } @@ -1510,6 +1554,28 @@ function _validateMicroserviceAnnotations (annotations) { return result } +function _validateMicroserviceHealthCheck (healthCheck) { + let result + if (healthCheck) { + // Convert the health check object to a JSON string for database storage + result = JSON.stringify(healthCheck) + } + return result +} + +function _processHealthCheckForDB (healthCheckData) { + if (!healthCheckData) return null + + return { + test: _validateMicroserviceHealthCheck(healthCheckData.test), + interval: healthCheckData.interval, + timeout: healthCheckData.timeout, + startPeriod: healthCheckData.startPeriod, + startInterval: healthCheckData.startInterval, + retries: healthCheckData.retries + } +} + async function _createMicroservice (microserviceData, isCLI, transaction) { const config = _validateMicroserviceConfig(microserviceData.config) const annotations = _validateMicroserviceAnnotations(microserviceData.annotations) @@ -1522,6 +1588,8 @@ async function _createMicroservice (microserviceData, isCLI, transaction) { catalogItemId: microserviceData.catalogItemId, iofogUuid: microserviceData.iofogUuid, rootHostAccess: microserviceData.rootHostAccess, + cpuSetCpus: microserviceData.cpuSetCpus, + memoryLimit: microserviceData.memoryLimit, pidMode: microserviceData.pidMode, ipcMode: microserviceData.ipcMode, cdiDevices: microserviceData.cdiDevices, @@ -1935,6 +2003,7 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { const subTags = microservice.subTags ? microservice.subTags.map(t => t.value) : [] const status = await MicroserviceStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) const execStatus = await MicroserviceExecStatusManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) + const healthCheck = await MicroserviceHealthCheckManager.findAllExcludeFields({ microserviceUuid: microserviceUuid }, transaction) // build microservice response const res = Object.assign({}, microservice) res.ports = [] @@ -1958,6 +2027,53 @@ async function _buildGetMicroserviceResponse (microservice, transaction) { if (execStatus && execStatus.length) { res.execStatus = execStatus[0] } + if (healthCheck && healthCheck.length) { + const healthCheckData = healthCheck[0] + // Create a copy of the health check data to avoid modifying the Sequelize object + const healthCheckResponse = { + test: healthCheckData.test + } + + // Only add fields if they are not null + if (healthCheckData.interval !== null) { + healthCheckResponse.interval = healthCheckData.interval + } + if (healthCheckData.timeout !== null) { + healthCheckResponse.timeout = healthCheckData.timeout + } + if (healthCheckData.startPeriod !== null) { + healthCheckResponse.startPeriod = healthCheckData.startPeriod + } + if (healthCheckData.startInterval !== null) { + healthCheckResponse.startInterval = healthCheckData.startInterval + } + if (healthCheckData.retries !== null) { + healthCheckResponse.retries = healthCheckData.retries + } + + // Handle the test field - ensure it's always an array + if (healthCheckResponse.test) { + if (typeof healthCheckResponse.test === 'string') { + // It's a JSON string, try to parse it + try { + healthCheckResponse.test = JSON.parse(healthCheckResponse.test) + } catch (e) { + // If not valid JSON, treat as a single string command + healthCheckResponse.test = [healthCheckResponse.test] + } + } else if (!Array.isArray(healthCheckResponse.test)) { + // If it's not an array, convert to array + healthCheckResponse.test = [healthCheckResponse.test] + } + // If it's already an array, leave as is + } + + if (healthCheckResponse.test && healthCheckResponse.test.length > 0) { + res.healthCheck = healthCheckResponse + } else { + res.healthCheck = {} + } + } res.pubTags = pubTags res.subTags = subTags diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index de3b205c..68efeeb7 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -336,6 +336,9 @@ const parseMicroserviceYAML = async (microservice) => { rootHostAccess: lget(microservice, 'container.rootHostAccess', false), pidMode: lget(microservice, 'container.pidMode', ''), ipcMode: lget(microservice, 'container.ipcMode', ''), + cpuSetCpus: lget(microservice, 'container.cpuSetCpus', ''), + memoryLimit: lget(microservice, 'container.memoryLimit', null), + healthCheck: lget(microservice, 'container.healthCheck', null), annotations: container.annotations != null ? JSON.stringify(container.annotations) : undefined, capAdd: lget(microservice, 'container.capAdd', []), capDrop: lget(microservice, 'container.capDrop', []), From e0b320717e5626c73ad42202c6fe34a6480138d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 12 Jul 2025 19:13:54 +0300 Subject: [PATCH 17/25] viewer version updated --- package-lock.json | 8 ++++---- package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package-lock.json b/package-lock.json index 0b67c0b4..b9dfda37 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,7 @@ "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta3", + "@datasance/ecn-viewer": "1.0.0-beta4", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta3", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta3.tgz", - "integrity": "sha512-adYlBAgICw2MluooMhnv669vYXGX9nuliT00RkG45XJZ5TuC3Z+/ZrGUwAGz9Z4m74YS9xNB/qkw/1FSdf1B3w==" + "version": "1.0.0-beta4", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta4.tgz", + "integrity": "sha512-9qb8caxwZJkZhCSYnOjLLgA8KZnbEXW37dQtLGzaEBCIzEzc3G2GiZTNuoW17pEkaOw8bxaMaEVAKDQdFQMBWA==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index 0726c49d..c1d03501 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta3", + "@datasance/ecn-viewer": "1.0.0-beta4", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", From 0a8b86689204e9a303a2f500a601a84768a16069 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sun, 13 Jul 2025 21:07:13 +0300 Subject: [PATCH 18/25] healthcheck datatypes fixed --- package-lock.json | 12 ++++++------ package.json | 4 ++-- .../migrations/mysql/db_migration_mysql_v1.0.3.sql | 2 +- .../migrations/postgres/db_migration_pg_v1.0.3.sql | 10 +++++----- .../migrations/sqlite/db_migration_sqlite_v1.0.3.sql | 10 +++++----- src/data/models/microservice.js | 5 +---- src/data/models/microserviceHealthCheck.js | 8 ++++---- src/services/yaml-parser-service.js | 4 ++-- 8 files changed, 26 insertions(+), 29 deletions(-) diff --git a/package-lock.json b/package-lock.json index b9dfda37..ff653862 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta4", + "version": "3.5.0-beta5", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta4", + "version": "3.5.0-beta5", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta4", + "@datasance/ecn-viewer": "1.0.0-beta5", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta4", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta4.tgz", - "integrity": "sha512-9qb8caxwZJkZhCSYnOjLLgA8KZnbEXW37dQtLGzaEBCIzEzc3G2GiZTNuoW17pEkaOw8bxaMaEVAKDQdFQMBWA==" + "version": "1.0.0-beta5", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta5.tgz", + "integrity": "sha512-sffoQbT0mOh2qsmLrq7MudftEA0VoWTtOLY5ETLMIqL0StNpgCnJ3bwilXj1UrLU0tFTeD6i3Fa5mCVYWvTQ/g==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index c1d03501..6cbf1a23 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta4", + "version": "3.5.0-beta5", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta4", + "@datasance/ecn-viewer": "1.0.0-beta5", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql index ad7b3699..60fd7a29 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql @@ -790,7 +790,7 @@ CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecS ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); ALTER TABLE Microservices ADD COLUMN cpu_set_cpus TEXT; -ALTER TABLE Microservices ADD COLUMN memory_limit BIGINT; +ALTER TABLE Microservices ADD COLUMN memory_limit FLOAT; CREATE TABLE IF NOT EXISTS MicroserviceHealthChecks ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql index 660fc03f..5303bda5 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql @@ -790,15 +790,15 @@ CREATE INDEX idx_microservice_exec_status_microservice_uuid ON "MicroserviceExec ALTER TABLE "Fogs" ADD COLUMN gps_status VARCHAR(32); ALTER TABLE "Microservices" ADD COLUMN cpu_set_cpus TEXT; -ALTER TABLE "Microservices" ADD COLUMN memory_limit BIGINT; +ALTER TABLE "Microservices" ADD COLUMN memory_limit DOUBLE PRECISION; CREATE TABLE IF NOT EXISTS "MicroserviceHealthChecks" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, test TEXT, - interval BIGINT, - timeout BIGINT, - start_period BIGINT, - start_interval BIGINT, + interval DOUBLE PRECISION, + timeout DOUBLE PRECISION, + start_period DOUBLE PRECISION, + start_interval DOUBLE PRECISION, retries INT, microservice_uuid VARCHAR(36), created_at TIMESTAMP(0), diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql index 0240c0ea..d8715585 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql @@ -777,15 +777,15 @@ CREATE INDEX idx_microservice_exec_status_microservice_uuid ON MicroserviceExecS ALTER TABLE Fogs ADD COLUMN gps_status VARCHAR(32); ALTER TABLE Microservices ADD COLUMN cpu_set_cpus TEXT; -ALTER TABLE Microservices ADD COLUMN memory_limit BIGINT; +ALTER TABLE Microservices ADD COLUMN memory_limit FLOAT; CREATE TABLE IF NOT EXISTS MicroserviceHealthChecks ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, test TEXT, - interval BIGINT, - timeout BIGINT, - start_period BIGINT, - start_interval BIGINT, + interval FLOAT, + timeout FLOAT, + start_period FLOAT, + start_interval FLOAT, retries INT, microservice_uuid VARCHAR(36), created_at DATETIME, diff --git a/src/data/models/microservice.js b/src/data/models/microservice.js index 4f090329..261ffdab 100644 --- a/src/data/models/microservice.js +++ b/src/data/models/microservice.js @@ -86,10 +86,7 @@ module.exports = (sequelize, DataTypes) => { defaultValue: '' }, memoryLimit: { - type: DataTypes.BIGINT, - get () { - return convertToInt(this.getDataValue('memoryLimit')) - }, + type: DataTypes.FLOAT, field: 'memory_limit' }, imageSnapshot: { diff --git a/src/data/models/microserviceHealthCheck.js b/src/data/models/microserviceHealthCheck.js index 6e319f51..d6f32e69 100644 --- a/src/data/models/microserviceHealthCheck.js +++ b/src/data/models/microserviceHealthCheck.js @@ -13,19 +13,19 @@ module.exports = (sequelize, DataTypes) => { field: 'test' }, interval: { - type: DataTypes.BIGINT, + type: DataTypes.FLOAT, field: 'interval' }, timeout: { - type: DataTypes.BIGINT, + type: DataTypes.FLOAT, field: 'timeout' }, startPeriod: { - type: DataTypes.BIGINT, + type: DataTypes.FLOAT, field: 'start_period' }, startInterval: { - type: DataTypes.BIGINT, + type: DataTypes.FLOAT, field: 'start_interval' }, retries: { diff --git a/src/services/yaml-parser-service.js b/src/services/yaml-parser-service.js index 68efeeb7..adc8d94c 100644 --- a/src/services/yaml-parser-service.js +++ b/src/services/yaml-parser-service.js @@ -337,8 +337,8 @@ const parseMicroserviceYAML = async (microservice) => { pidMode: lget(microservice, 'container.pidMode', ''), ipcMode: lget(microservice, 'container.ipcMode', ''), cpuSetCpus: lget(microservice, 'container.cpuSetCpus', ''), - memoryLimit: lget(microservice, 'container.memoryLimit', null), - healthCheck: lget(microservice, 'container.healthCheck', null), + memoryLimit: lget(microservice, 'container.memoryLimit', undefined), + healthCheck: lget(microservice, 'container.healthCheck', {}), annotations: container.annotations != null ? JSON.stringify(container.annotations) : undefined, capAdd: lget(microservice, 'container.capAdd', []), capDrop: lget(microservice, 'container.capDrop', []), From f258223c4c7b345f8b777d351809e30b0f2f1bec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 16 Jul 2025 16:27:36 +0300 Subject: [PATCH 19/25] k8s create/update/delete servis logic updated, ecn-veiwer version upgraded --- package-lock.json | 12 ++--- package.json | 4 +- src/services/services-service.js | 91 +++++++++++++++++++------------- src/utils/k8s-client.js | 14 +++++ 4 files changed, 77 insertions(+), 44 deletions(-) diff --git a/package-lock.json b/package-lock.json index ff653862..2e753a95 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta5", + "version": "3.5.0-beta6", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta5", + "version": "3.5.0-beta6", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta5", + "@datasance/ecn-viewer": "1.0.0-beta7", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta5", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta5.tgz", - "integrity": "sha512-sffoQbT0mOh2qsmLrq7MudftEA0VoWTtOLY5ETLMIqL0StNpgCnJ3bwilXj1UrLU0tFTeD6i3Fa5mCVYWvTQ/g==" + "version": "1.0.0-beta7", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta7.tgz", + "integrity": "sha512-XGihsQHigPv/s+0NhQZMicXkRzQiEWEmcWD2/K4l4zG5RrX2o2bMBPGJr5eB6KRmr1QGemx9/B/jrUQEM75Tmw==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index 6cbf1a23..89ddb2b9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta5", + "version": "3.5.0-beta6", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta5", + "@datasance/ecn-viewer": "1.0.0-beta7", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", diff --git a/src/services/services-service.js b/src/services/services-service.js index 8503fe4d..24b34d5a 100644 --- a/src/services/services-service.js +++ b/src/services/services-service.js @@ -819,6 +819,7 @@ async function _createK8sService (serviceConfig, transaction) { 'datasance.com/component': 'router' }, ports: [{ + name: 'pot-service', targetPort: parseInt(serviceConfig.bridgePort), port: parseInt(serviceConfig.servicePort), protocol: 'TCP' @@ -845,51 +846,69 @@ async function _createK8sService (serviceConfig, transaction) { // Helper function to update Kubernetes service async function _updateK8sService (serviceConfig, transaction) { - const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) - const patchData = { - metadata: { - annotations: normalizedTags.reduce((acc, tag) => { - const [key, value] = tag.split(':') - acc[key] = (value || '').trim() - return acc - }, {}) - }, - spec: { - type: serviceConfig.k8sType, - selector: { - application: 'interior-router', - name: 'router', - 'skupper.io/component': 'router' + const existingService = await K8sClient.getService(serviceConfig.name) + if (!existingService) { + logger.debug(`Service not found: ${serviceConfig.name}, creating new service`) + const service = await _createK8sService(serviceConfig, transaction) + return service + } else { + const normalizedTags = serviceConfig.tags.map(tag => tag.includes(':') ? tag : `${tag}:`) + const patchData = { + metadata: { + annotations: normalizedTags.reduce((acc, tag) => { + const [key, value] = tag.split(':') + acc[key] = (value || '').trim() + return acc + }, {}) }, - ports: [{ - port: parseInt(serviceConfig.bridgePort), - targetPort: parseInt(serviceConfig.servicePort), - protocol: 'TCP' - }] + spec: { + type: serviceConfig.k8sType, + selector: { + application: 'interior-router', + name: 'router', + 'skupper.io/component': 'router' + }, + ports: [{ + name: 'pot-service', + port: parseInt(serviceConfig.bridgePort), + targetPort: parseInt(serviceConfig.servicePort), + protocol: 'TCP' + }] + } } - } - logger.debug(`Updating service: ${serviceConfig.name}`) - const service = await K8sClient.updateService(serviceConfig.name, patchData) - - // If LoadBalancer type, wait for and set the external IP - if (serviceConfig.k8sType === 'LoadBalancer') { - const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) - if (loadBalancerIP) { - await ServiceManager.update( - { name: serviceConfig.name }, - { serviceEndpoint: loadBalancerIP }, - transaction - ) + logger.debug(`Updating service: ${serviceConfig.name}`) + const updatedService = await K8sClient.updateService(serviceConfig.name, patchData) + + // If LoadBalancer type, wait for and set the external IP + if (serviceConfig.k8sType === 'LoadBalancer') { + const loadBalancerIP = await K8sClient.watchLoadBalancerIP(serviceConfig.name) + if (loadBalancerIP) { + await ServiceManager.update( + { name: serviceConfig.name }, + { serviceEndpoint: loadBalancerIP }, + transaction + ) + } } + return updatedService } - - return service } // Helper function to delete Kubernetes service async function _deleteK8sService (serviceName) { - await K8sClient.deleteService(serviceName) + try { + await K8sClient.deleteService(serviceName) + } catch (error) { + // If it's a 404 (Not Found), log a warning and continue + if (error.response && error.response.status === 404) { + logger.warn(`K8s service ${serviceName} not found during delete. It may have already been deleted.`) + } else { + // For other errors, you may want to log and rethrow, or just log as warning + logger.warn(`Failed to delete K8s service ${serviceName}: ${error.message}`) + } + // Do not throw, so the flow continues + } } // Create service endpoint diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js index 0aa8c9ae..ad6bbc5a 100644 --- a/src/utils/k8s-client.js +++ b/src/utils/k8s-client.js @@ -49,6 +49,19 @@ async function getSecret (secretName) { } } +async function getService (serviceName) { + logger.debug(`Getting service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) + try { + const api = await initializeK8sClient() + const response = await api.readNamespacedService(serviceName, CONTROLLER_NAMESPACE) + logger.info(`Successfully retrieved service: ${serviceName}`) + return response.body + } catch (error) { + logger.error(`Failed to get service ${serviceName}: ${error.message}`) + throw error + } +} + // ConfigMap methods async function getConfigMap (configMapName) { logger.debug(`Getting ConfigMap: ${configMapName} in namespace: ${CONTROLLER_NAMESPACE}`) @@ -240,6 +253,7 @@ async function watchLoadBalancerIP (serviceName, maxRetries = 10, retryInterval module.exports = { getSecret, + getService, getConfigMap, patchConfigMap, getNamespacedServices, From d256540834f83365377b345977b8ed916e9e9157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 16 Jul 2025 18:01:44 +0300 Subject: [PATCH 20/25] viewer version upgraded --- package-lock.json | 12 ++++++------ package.json | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2e753a95..5481e619 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta6", + "version": "3.5.0-beta7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta6", + "version": "3.5.0-beta7", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta7", + "@datasance/ecn-viewer": "1.0.0-beta8", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta7", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta7.tgz", - "integrity": "sha512-XGihsQHigPv/s+0NhQZMicXkRzQiEWEmcWD2/K4l4zG5RrX2o2bMBPGJr5eB6KRmr1QGemx9/B/jrUQEM75Tmw==" + "version": "1.0.0-beta8", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta8.tgz", + "integrity": "sha512-1zxIzsd5WxjhQbH2Hc7tvtkDQm/VgVJO6Whhccf308u6nhgwF1jZyx/iGigSw9hl7sI6nbgdQZtUyIeIyc+UCw==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index 89ddb2b9..b8de3d19 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta6", + "version": "3.5.0-beta7", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta7", + "@datasance/ecn-viewer": "1.0.0-beta8", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", From 19e969a645bfd1ad1cb9848f92a813006578ceb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 16 Jul 2025 19:00:45 +0300 Subject: [PATCH 21/25] k8s update service logic fixed --- package-lock.json | 4 ++-- package.json | 2 +- src/services/services-service.js | 8 +++----- src/utils/k8s-client.js | 2 +- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/package-lock.json b/package-lock.json index 5481e619..ccadc800 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta7", + "version": "3.5.0-beta8", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta7", + "version": "3.5.0-beta8", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { diff --git a/package.json b/package.json index b8de3d19..eba0196c 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta7", + "version": "3.5.0-beta8", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", diff --git a/src/services/services-service.js b/src/services/services-service.js index 24b34d5a..be2982ab 100644 --- a/src/services/services-service.js +++ b/src/services/services-service.js @@ -864,14 +864,12 @@ async function _updateK8sService (serviceConfig, transaction) { spec: { type: serviceConfig.k8sType, selector: { - application: 'interior-router', - name: 'router', - 'skupper.io/component': 'router' + 'datasance.com/component': 'router' }, ports: [{ name: 'pot-service', - port: parseInt(serviceConfig.bridgePort), - targetPort: parseInt(serviceConfig.servicePort), + port: parseInt(serviceConfig.servicePort), + targetPort: parseInt(serviceConfig.bridgePort), protocol: 'TCP' }] } diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js index ad6bbc5a..51ea4f12 100644 --- a/src/utils/k8s-client.js +++ b/src/utils/k8s-client.js @@ -185,7 +185,7 @@ async function updateService (serviceName, patchData) { undefined, undefined, undefined, - { headers: { 'Content-Type': 'application/strategic-merge-patch+json' } } + { headers: { 'Content-Type': 'application/json-patch+json' } } ) logger.info(`Successfully updated service: ${serviceName} in namespace: ${CONTROLLER_NAMESPACE}`) return response.body From 5834cb28f2787265fc9365b3b0cd52de705c60fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 16 Jul 2025 19:22:56 +0300 Subject: [PATCH 22/25] viewer version updated --- package-lock.json | 8 ++++---- package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/package-lock.json b/package-lock.json index ccadc800..55e02df2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,7 @@ "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta8", + "@datasance/ecn-viewer": "1.0.0-beta9", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -427,9 +427,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta8", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta8.tgz", - "integrity": "sha512-1zxIzsd5WxjhQbH2Hc7tvtkDQm/VgVJO6Whhccf308u6nhgwF1jZyx/iGigSw9hl7sI6nbgdQZtUyIeIyc+UCw==" + "version": "1.0.0-beta9", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta9.tgz", + "integrity": "sha512-Cgq2fRTZdWZjdYPutb07cK//BHHo+xxDRHxnBpIiYTa0FpLXZu99n2N82t1sdEPw/wrnMgxNhINjPYSYn1sNSA==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index eba0196c..634f7fd7 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta8", + "@datasance/ecn-viewer": "1.0.0-beta9", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", From 01fe4be71fd8bcd91098c9f3c473be5722d46696 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Wed, 16 Jul 2025 22:12:46 +0300 Subject: [PATCH 23/25] k8s service update fixed --- package-lock.json | 4 ++-- package.json | 2 +- src/utils/k8s-client.js | 19 +++++++++++++++---- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/package-lock.json b/package-lock.json index 55e02df2..def5bb78 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta8", + "version": "3.5.0-beta9", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta8", + "version": "3.5.0-beta9", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { diff --git a/package.json b/package.json index 634f7fd7..b7947d55 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta8", + "version": "3.5.0-beta9", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", diff --git a/src/utils/k8s-client.js b/src/utils/k8s-client.js index 51ea4f12..7cbdabe3 100644 --- a/src/utils/k8s-client.js +++ b/src/utils/k8s-client.js @@ -170,12 +170,23 @@ async function updateService (serviceName, patchData) { try { const api = await initializeK8sClient() - // For strategic merge patch, we send the data as a map - const patch = { - spec: patchData.spec, - metadata: patchData.metadata + const patch = [] + + // Update spec fields + if (patchData.spec.type) { + patch.push({ op: 'replace', path: '/spec/type', value: patchData.spec.type }) + } + if (patchData.spec.selector) { + patch.push({ op: 'replace', path: '/spec/selector', value: patchData.spec.selector }) + } + if (patchData.spec.ports) { + patch.push({ op: 'replace', path: '/spec/ports', value: patchData.spec.ports }) } + // Update annotations + if (patchData.metadata && patchData.metadata.annotations) { + patch.push({ op: 'replace', path: '/metadata/annotations', value: patchData.metadata.annotations }) + } const response = await api.patchNamespacedService( serviceName, CONTROLLER_NAMESPACE, From e54f9e0672687353babc04e267c7d04221863d39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Sat, 19 Jul 2025 11:30:37 +0300 Subject: [PATCH 24/25] new volume mount linked nodes endpoint added, secret types fixed, viewer upgraded --- docs/swagger.yaml | 40 ++++++++- package-lock.json | 81 +++++-------------- package.json | 7 +- src/controllers/volume-mount-controller.js | 5 ++ .../mysql/db_migration_mysql_v1.0.3.sql | 2 +- .../postgres/db_migration_pg_v1.0.3.sql | 2 +- .../sqlite/db_migration_sqlite_v1.0.3.sql | 2 +- src/data/models/secret.js | 2 +- src/routes/volumeMount.js | 31 +++++++ src/schemas/secret.js | 6 +- src/services/agent-service.js | 2 +- src/services/volume-mount-service.js | 10 ++- 12 files changed, 112 insertions(+), 78 deletions(-) diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 307efca3..bc5704d0 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -4549,6 +4549,40 @@ paths: description: Internal Server Error "/volumeMounts/{name}/link": + get: + tags: + - VolumeMounts + summary: Gets volume mount link info + operationId: getVolumeMountLink + parameters: + - in: path + name: name + description: Volume mount name + required: true + schema: + type: string + security: + - authToken: [] + responses: + "200": + description: Success + content: + application/json: + schema: + type: object + properties: + fogUuids: + type: array + items: + type: string + "400": + description: Bad Request + "401": + description: Not Authorized + "404": + description: Not Found + "500": + description: Internal Server Error post: tags: - VolumeMounts @@ -6428,7 +6462,7 @@ components: maxLength: 255 type: type: string - enum: [opaque, tls] + enum: [Opaque, tls] data: type: object SecretUpdate: @@ -6454,7 +6488,7 @@ components: type: string type: type: string - enum: [opaque, tls] + enum: [Opaque, tls] data: type: object created_at: @@ -6485,7 +6519,7 @@ components: type: string type: type: string - enum: [opaque, tls] + enum: [Opaque, tls] created_at: type: string format: date-time diff --git a/package-lock.json b/package-lock.json index def5bb78..2e0f2b6c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta9", + "version": "3.5.0-beta10", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta9", + "version": "3.5.0-beta10", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta9", + "@datasance/ecn-viewer": "1.0.0-beta10", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -32,7 +32,7 @@ "dotenv": "^16.5.0", "ejs": "3.1.10", "express": "4.21.2", - "express-session": "1.18.1", + "express-session": "1.18.2", "formidable": "3.5.4", "ftp": "0.3.10", "globally": "^0.0.0", @@ -46,7 +46,6 @@ "minimatch": "10.0.1", "moment": "2.30.1", "moment-timezone": "0.5.45", - "morgan": "1.10.0", "multer": "1.4.5-lts.1", "mysql2": "3.10.1", "nconf": "0.12.1", @@ -427,9 +426,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta9", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta9.tgz", - "integrity": "sha512-Cgq2fRTZdWZjdYPutb07cK//BHHo+xxDRHxnBpIiYTa0FpLXZu99n2N82t1sdEPw/wrnMgxNhINjPYSYn1sNSA==" + "version": "1.0.0-beta10", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta10.tgz", + "integrity": "sha512-prH0v9eFvkLdXCLeu7oK3E2Jx1/hfIOqbjsjLe0myieK3mcHFlE11FydDgo15d/DlHIvXrDFcGRHHDN+BtmStQ==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", @@ -3009,22 +3008,6 @@ } ] }, - "node_modules/basic-auth": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", - "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", - "dependencies": { - "safe-buffer": "5.1.2" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/basic-auth/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, "node_modules/basic-ftp": { "version": "5.0.5", "resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.0.5.tgz", @@ -5649,15 +5632,15 @@ } }, "node_modules/express-session": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/express-session/-/express-session-1.18.1.tgz", - "integrity": "sha512-a5mtTqEaZvBCL9A9aqkrtfz+3SMDhOVUnjafjo+s7A9Txkq+SVX2DLvSp1Zrv4uCXa3lMSK3viWnh9Gg07PBUA==", + "version": "1.18.2", + "resolved": "https://registry.npmjs.org/express-session/-/express-session-1.18.2.tgz", + "integrity": "sha512-SZjssGQC7TzTs9rpPDuUrR23GNZ9+2+IkA/+IJWmvQilTr5OSliEHGF+D9scbIpdC6yGtTI0/VhaHoVes2AN/A==", "dependencies": { "cookie": "0.7.2", "cookie-signature": "1.0.7", "debug": "2.6.9", "depd": "~2.0.0", - "on-headers": "~1.0.2", + "on-headers": "~1.1.0", "parseurl": "~1.3.3", "safe-buffer": "5.2.1", "uid-safe": "~2.1.5" @@ -5671,6 +5654,14 @@ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==" }, + "node_modules/express-session/node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/express/node_modules/cookie": { "version": "0.7.1", "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", @@ -9232,32 +9223,6 @@ "node": "*" } }, - "node_modules/morgan": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.0.tgz", - "integrity": "sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==", - "dependencies": { - "basic-auth": "~2.0.1", - "debug": "2.6.9", - "depd": "~2.0.0", - "on-finished": "~2.3.0", - "on-headers": "~1.0.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/morgan/node_modules/on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, "node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", @@ -10117,14 +10082,6 @@ "node": ">= 0.8" } }, - "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "engines": { - "node": ">= 0.8" - } - }, "node_modules/once": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/once/-/once-1.3.3.tgz", diff --git a/package.json b/package.json index b7947d55..4e9d45d0 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta9", + "version": "3.5.0-beta10", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta9", + "@datasance/ecn-viewer": "1.0.0-beta10", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -77,7 +77,7 @@ "dotenv": "^16.5.0", "ejs": "3.1.10", "express": "4.21.2", - "express-session": "1.18.1", + "express-session": "1.18.2", "formidable": "3.5.4", "ftp": "0.3.10", "globally": "^0.0.0", @@ -91,7 +91,6 @@ "minimatch": "10.0.1", "moment": "2.30.1", "moment-timezone": "0.5.45", - "morgan": "1.10.0", "multer": "1.4.5-lts.1", "mysql2": "3.10.1", "nconf": "0.12.1", diff --git a/src/controllers/volume-mount-controller.js b/src/controllers/volume-mount-controller.js index 5a3ce6be..9628a8ba 100644 --- a/src/controllers/volume-mount-controller.js +++ b/src/controllers/volume-mount-controller.js @@ -50,6 +50,10 @@ const updateVolumeMountYamlEndpoint = async (req) => { return VolumeMountService.updateVolumeMountEndpoint(name, volumeMountData) } +const getVolumeMountLinkEndpoint = async (req) => { + return VolumeMountService.getVolumeMountLinkEndpoint(req.params.name) +} + const linkVolumeMountEndpoint = async (req) => { return VolumeMountService.linkVolumeMountEndpoint(req.params.name, req.body.fogUuids) } @@ -66,6 +70,7 @@ module.exports = { deleteVolumeMountEndpoint, createVolumeMountYamlEndpoint, updateVolumeMountYamlEndpoint, + getVolumeMountLinkEndpoint, linkVolumeMountEndpoint, unlinkVolumeMountEndpoint } diff --git a/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql index 60fd7a29..4126b2ae 100644 --- a/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql +++ b/src/data/migrations/mysql/db_migration_mysql_v1.0.3.sql @@ -641,7 +641,7 @@ ALTER TABLE MicroserviceStatuses ADD COLUMN ip_address TEXT; CREATE TABLE IF NOT EXISTS Secrets ( id INT AUTO_INCREMENT PRIMARY KEY NOT NULL, name VARCHAR(255) UNIQUE NOT NULL, - type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + type VARCHAR(50) NOT NULL CHECK (type IN ('Opaque', 'tls')), data TEXT NOT NULL, created_at DATETIME, updated_at DATETIME diff --git a/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql index 5303bda5..9aeb5f6c 100644 --- a/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql +++ b/src/data/migrations/postgres/db_migration_pg_v1.0.3.sql @@ -640,7 +640,7 @@ DROP TABLE IF EXISTS "FogAccessTokens"; CREATE TABLE IF NOT EXISTS "Secrets" ( id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL, name VARCHAR(255) UNIQUE NOT NULL, - type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + type VARCHAR(50) NOT NULL CHECK (type IN ('Opaque', 'tls')), data TEXT NOT NULL, created_at TIMESTAMP(0), updated_at TIMESTAMP(0) diff --git a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql index d8715585..0781f38d 100644 --- a/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql +++ b/src/data/migrations/sqlite/db_migration_sqlite_v1.0.3.sql @@ -632,7 +632,7 @@ ALTER TABLE MicroserviceStatuses ADD COLUMN ip_address TEXT; CREATE TABLE IF NOT EXISTS Secrets ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, name VARCHAR(255) UNIQUE NOT NULL, - type VARCHAR(50) NOT NULL CHECK (type IN ('opaque', 'tls')), + type VARCHAR(50) NOT NULL CHECK (type IN ('Opaque', 'tls')), data TEXT NOT NULL, created_at DATETIME, updated_at DATETIME diff --git a/src/data/models/secret.js b/src/data/models/secret.js index 3b19442a..9b1447ae 100644 --- a/src/data/models/secret.js +++ b/src/data/models/secret.js @@ -22,7 +22,7 @@ module.exports = (sequelize, DataTypes) => { allowNull: false, field: 'type', validate: { - isIn: [['opaque', 'tls']] + isIn: [['Opaque', 'tls']] } }, data: { diff --git a/src/routes/volumeMount.js b/src/routes/volumeMount.js index 05c20b50..99dee9a5 100644 --- a/src/routes/volumeMount.js +++ b/src/routes/volumeMount.js @@ -247,6 +247,37 @@ module.exports = [ }) } }, + { + method: 'get', + path: '/api/v3/volumeMounts/:name/link', + middleware: async (req, res) => { + logger.apiReq(req) + + const successCode = constants.HTTP_CODE_SUCCESS + const errorCodes = [ + { + code: constants.HTTP_CODE_UNAUTHORIZED, + errors: [Errors.AuthenticationError] + }, + { + code: constants.HTTP_CODE_BAD_REQUEST, + errors: [Errors.ValidationError] + } + ] + + // Add keycloak.protect() middleware to protect the route for SRE role + await keycloak.protect(['SRE', 'Developer'])(req, res, async () => { + const getVolumeMountLinkEndpoint = ResponseDecorator.handleErrors(VolumeMountController.getVolumeMountLinkEndpoint, successCode, errorCodes) + const responseObject = await getVolumeMountLinkEndpoint(req) + const user = req.kauth.grant.access_token.content.preferred_username + res + .status(responseObject.code) + .send(responseObject.body) + + logger.apiRes({ req: req, user: user, res: res, responseObject: responseObject }) + }) + } + }, { method: 'post', path: '/api/v3/volumeMounts/:name/link', diff --git a/src/schemas/secret.js b/src/schemas/secret.js index 68251328..6b52e5eb 100644 --- a/src/schemas/secret.js +++ b/src/schemas/secret.js @@ -3,7 +3,7 @@ const secretCreate = { type: 'object', properties: { name: { type: 'string', minLength: 1, maxLength: 255 }, - type: { type: 'string', enum: ['opaque', 'tls'] }, + type: { type: 'string', enum: ['Opaque', 'tls'] }, data: { type: 'object' } }, required: ['name', 'type', 'data'], @@ -27,7 +27,7 @@ const secretResponse = { properties: { id: { type: 'integer' }, name: { type: 'string' }, - type: { type: 'string', enum: ['opaque', 'tls'] }, + type: { type: 'string', enum: ['Opaque', 'tls'] }, data: { type: 'object' }, created_at: { type: 'string', format: 'date-time' }, updated_at: { type: 'string', format: 'date-time' } @@ -47,7 +47,7 @@ const secretListResponse = { properties: { id: { type: 'integer' }, name: { type: 'string' }, - type: { type: 'string', enum: ['opaque', 'tls'] }, + type: { type: 'string', enum: ['Opaque', 'tls'] }, created_at: { type: 'string', format: 'date-time' }, updated_at: { type: 'string', format: 'date-time' } }, diff --git a/src/services/agent-service.js b/src/services/agent-service.js index 3c2e2927..24fb0080 100644 --- a/src/services/agent-service.js +++ b/src/services/agent-service.js @@ -752,7 +752,7 @@ const getAgentLinkedVolumeMounts = async function (fog, transaction) { // For TLS secrets, values are already base64 encoded data = secret.data } else { - // For opaque secrets, we need to base64 encode all values + // For Opaque secrets, we need to base64 encode all values data = Object.entries(secret.data).reduce((acc, [key, value]) => { acc[key] = Buffer.from(value).toString('base64') return acc diff --git a/src/services/volume-mount-service.js b/src/services/volume-mount-service.js index 403afb1a..21193e23 100644 --- a/src/services/volume-mount-service.js +++ b/src/services/volume-mount-service.js @@ -184,6 +184,13 @@ async function unlinkVolumeMountEndpoint (name, fogUuids, transaction) { return {} } +async function getVolumeMountLinkEndpoint (name, transaction) { + const linkedFogUuids = await findVolumeMountedFogNodes(name, transaction) + return { + fogUuids: linkedFogUuids + } +} + module.exports = { listVolumeMountsEndpoint: TransactionDecorator.generateTransaction(listVolumeMountsEndpoint), getVolumeMountEndpoint: TransactionDecorator.generateTransaction(getVolumeMountEndpoint), @@ -192,5 +199,6 @@ module.exports = { deleteVolumeMountEndpoint: TransactionDecorator.generateTransaction(deleteVolumeMountEndpoint), linkVolumeMountEndpoint: TransactionDecorator.generateTransaction(linkVolumeMountEndpoint), unlinkVolumeMountEndpoint: TransactionDecorator.generateTransaction(unlinkVolumeMountEndpoint), - findVolumeMountedFogNodes: TransactionDecorator.generateTransaction(findVolumeMountedFogNodes) + findVolumeMountedFogNodes: TransactionDecorator.generateTransaction(findVolumeMountedFogNodes), + getVolumeMountLinkEndpoint: TransactionDecorator.generateTransaction(getVolumeMountLinkEndpoint) } From dc6cba7524fa8ffeb4a8a3ee768412e8b4183c4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emirhan=20Durmu=C5=9F?= Date: Mon, 21 Jul 2025 13:30:40 +0300 Subject: [PATCH 25/25] release prep & viewer version upgraded --- docs/swagger.json | 2 +- docs/swagger.yaml | 2 +- package-lock.json | 12 ++++++------ package.json | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/swagger.json b/docs/swagger.json index 98745d60..16aac50d 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -359,7 +359,7 @@ "/iofog/{uuid}/prune" : { "post" : { "tags" : [ "ioFog" ], - "summary" : "prune reboot fog agent", + "summary" : "prune remote fog agent", "operationId" : "setPruneCommand", "parameters" : [ { "name" : "uuid", diff --git a/docs/swagger.yaml b/docs/swagger.yaml index bc5704d0..c3da65dc 100755 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -294,7 +294,7 @@ paths: post: tags: - ioFog - summary: prune reboot fog agent + summary: prune remote fog agent operationId: setPruneCommand parameters: - in: path diff --git a/package-lock.json b/package-lock.json index 2e0f2b6c..e2141bf4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,16 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta10", + "version": "3.5.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta10", + "version": "3.5.0", "hasInstallScript": true, "license": "EPL-2.0", "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta10", + "@datasance/ecn-viewer": "1.0.0", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0", @@ -426,9 +426,9 @@ } }, "node_modules/@datasance/ecn-viewer": { - "version": "1.0.0-beta10", - "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0-beta10.tgz", - "integrity": "sha512-prH0v9eFvkLdXCLeu7oK3E2Jx1/hfIOqbjsjLe0myieK3mcHFlE11FydDgo15d/DlHIvXrDFcGRHHDN+BtmStQ==" + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@datasance/ecn-viewer/-/ecn-viewer-1.0.0.tgz", + "integrity": "sha512-dgKXX2wmWQQl3UKt1cUmDXz721Rvl8jEDHJIGi99wPmk98aeF47XWO5oev83FFmHCQiBaIDtO2QXdtWuHaRSxg==" }, "node_modules/@eslint-community/eslint-utils": { "version": "4.7.0", diff --git a/package.json b/package.json index 4e9d45d0..29032157 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@datasance/iofogcontroller", - "version": "3.5.0-beta10", + "version": "3.5.0", "description": "ioFog Controller project for Datasance PoT @ datasance.com \\nCopyright (c) 2023 Datasance Teknoloji A.S.", "main": "./src/main.js", "author": "Emirhan Durmus", @@ -55,7 +55,7 @@ "iofog-controller": "src/main.js" }, "dependencies": { - "@datasance/ecn-viewer": "1.0.0-beta10", + "@datasance/ecn-viewer": "1.0.0", "@kubernetes/client-node": "^0.22.3", "@msgpack/msgpack": "^3.1.2", "@opentelemetry/api": "^1.9.0",